From 676bcaf123d35ca9bf56b073073d75bc4e84b125 Mon Sep 17 00:00:00 2001 From: zhuxiujuan28 <562873187@qq.com> Date: Fri, 25 Aug 2023 17:19:41 +0800 Subject: [PATCH] copy v3.3 to v3.4 Signed-off-by: zhuxiujuan28 <562873187@qq.com> --- content/en/docs/v3.4/_index.md | 62 ++ .../_index.md | 13 + .../external-authentication/_index.md | 8 + .../oidc-identity-provider.md | 62 ++ .../set-up-external-authentication.md | 112 ++++ .../use-an-ldap-service.md | 104 +++ .../use-an-oauth2-identity-provider.md | 130 ++++ .../multi-tenancy-in-kubesphere.md | 57 ++ .../en/docs/v3.4/application-store/_index.md | 16 + .../app-developer-guide/_index.md | 7 + .../helm-developer-guide.md | 157 +++++ .../app-developer-guide/helm-specification.md | 130 ++++ .../app-lifecycle-management.md | 220 +++++++ .../application-store/built-in-apps/_index.md | 7 + .../built-in-apps/deploy-chaos-mesh.md | 82 +++ .../built-in-apps/etcd-app.md | 58 ++ .../built-in-apps/harbor-app.md | 123 ++++ .../built-in-apps/memcached-app.md | 49 ++ .../built-in-apps/meshery-app.md | 53 ++ .../built-in-apps/minio-app.md | 57 ++ .../built-in-apps/mongodb-app.md | 54 ++ .../built-in-apps/mysql-app.md | 66 ++ .../built-in-apps/nginx-app.md | 60 ++ .../built-in-apps/postgresql-app.md | 60 ++ .../built-in-apps/rabbitmq-app.md | 61 ++ .../built-in-apps/radondb-mysql-app.md | 50 ++ .../built-in-apps/radondb-postgresql-app.md | 62 ++ .../built-in-apps/redis-app.md | 48 ++ .../built-in-apps/tomcat-app.md | 63 ++ .../application-store/external-apps/_index.md | 7 + .../external-apps/deploy-clickhouse.md | 144 +++++ .../external-apps/deploy-gitlab.md | 119 ++++ .../external-apps/deploy-litmus.md | 140 +++++ .../external-apps/deploy-metersphere.md | 64 ++ .../external-apps/deploy-radondb-mysql.md | 160 +++++ .../external-apps/deploy-tidb.md | 142 +++++ .../v3.4/cluster-administration/_index.md | 20 + .../application-resources-monitoring.md | 30 + .../cluster-settings/_index.md | 7 + .../cluster-settings/cluster-gateway.md | 82 +++ .../cluster-visibility-and-authorization.md | 53 ++ .../log-collections/_index.md | 7 + .../log-collections/add-es-as-receiver.md | 35 ++ .../add-fluentd-as-receiver.md | 154 +++++ .../log-collections/add-kafka-as-receiver.md | 131 ++++ .../log-collections/introduction.md | 87 +++ .../cluster-status-monitoring.md | 134 ++++ .../_index.md | 7 + .../alerting-message.md | 28 + .../alerting-policy.md | 69 ++ .../alertmanager.md | 37 ++ .../docs/v3.4/cluster-administration/nodes.md | 62 ++ .../notification-management/_index.md | 7 + .../configure-dingtalk.md | 38 ++ .../configure-email.md | 79 +++ .../configure-slack.md | 93 +++ .../configure-webhook.md | 63 ++ .../configure-wecom.md | 33 + .../customize-cluster-name.md | 40 ++ ...hut-down-and-restart-cluster-gracefully.md | 74 +++ .../cluster-administration/snapshotclass.md | 31 + .../cluster-administration/storageclass.md | 184 ++++++ .../en/docs/v3.4/devops-user-guide/_index.md | 14 + .../devops-overview/_index.md | 7 + .../devops-project-management.md | 49 ++ .../devops-overview/overview.md | 46 ++ .../v3.4/devops-user-guide/examples/_index.md | 7 + .../examples/a-maven-project.md | 161 +++++ .../examples/create-multi-cluster-pipeline.md | 253 ++++++++ .../examples/go-project-pipeline.md | 132 ++++ .../examples/multi-cluster-project-example.md | 132 ++++ .../examples/use-nexus-in-pipelines.md | 170 +++++ .../how-to-integrate/_index.md | 7 + .../how-to-integrate/harbor.md | 144 +++++ .../how-to-integrate/sonarqube.md | 273 ++++++++ .../devops-user-guide/how-to-use/_index.md | 7 + .../how-to-use/code-repositories/_index.md | 7 + .../import-code-repositories.md | 98 +++ .../continuous-deployments/_index.md | 7 + .../use-gitops-for-continous-deployment.md | 404 ++++++++++++ .../how-to-use/devops-settings/_index.md | 7 + .../devops-settings/add-cd-allowlist.md | 28 + .../devops-settings/credential-management.md | 93 +++ .../role-and-member-management.md | 76 +++ .../how-to-use/devops-settings/set-ci-node.md | 49 ++ .../how-to-use/pipelines/_index.md | 7 + .../pipelines/choose-jenkins-agent.md | 134 ++++ ...-pipeline-using-graphical-editing-panel.md | 389 ++++++++++++ .../create-a-pipeline-using-jenkinsfile.md | 281 +++++++++ .../pipelines/customize-jenkins-agent.md | 70 +++ .../pipelines/gitlab-multibranch-pipeline.md | 129 ++++ .../how-to-use/pipelines/jenkins-email.md | 42 ++ .../how-to-use/pipelines/jenkins-setting.md | 50 ++ .../pipelines/jenkins-shared-library.md | 122 ++++ .../how-to-use/pipelines/pipeline-settings.md | 170 +++++ .../how-to-use/pipelines/pipeline-webhook.md | 67 ++ .../pipelines/use-pipeline-templates.md | 104 +++ content/en/docs/v3.4/faq/_index.md | 12 + .../en/docs/v3.4/faq/access-control/_index.md | 7 + ...netes-namespace-to-kubesphere-workspace.md | 38 ++ .../v3.4/faq/access-control/cannot-login.md | 141 +++++ .../faq/access-control/forgot-password.md | 33 + .../faq/access-control/session-timeout.md | 21 + .../en/docs/v3.4/faq/applications/_index.md | 7 + .../faq/applications/remove-built-in-apps.md | 32 + content/en/docs/v3.4/faq/console/_index.md | 7 + .../faq/console/change-console-language.md | 25 + .../v3.4/faq/console/console-web-browser.md | 11 + .../edit-resources-in-system-workspace.md | 50 ++ content/en/docs/v3.4/faq/devops/_index.md | 7 + .../devops/create-devops-kubeconfig-on-aws.md | 106 ++++ .../faq/devops/install-jenkins-plugins.md | 67 ++ .../en/docs/v3.4/faq/installation/_index.md | 7 + .../faq/installation/configure-booster.md | 84 +++ ...nstall-addon-through-yaml-using-kubekey.md | 19 + .../installation/ssh-connection-failure.md | 40 ++ .../docs/v3.4/faq/installation/telemetry.md | 86 +++ .../faq/multi-cluster-management/_index.md | 7 + .../host-cluster-access-member-cluster.md | 71 +++ .../manage-multi-cluster.md | 61 ++ .../en/docs/v3.4/faq/observability/_index.md | 7 + .../en/docs/v3.4/faq/observability/byop.md | 205 ++++++ .../en/docs/v3.4/faq/observability/logging.md | 163 +++++ .../docs/v3.4/faq/observability/monitoring.md | 123 ++++ content/en/docs/v3.4/faq/upgrade/_index.md | 7 + .../v3.4/faq/upgrade/qingcloud-csi-upgrade.md | 60 ++ .../v3.4/installing-on-kubernetes/_index.md | 28 + .../hosted-kubernetes/_index.md | 7 + .../install-kubesphere-on-aks.md | 131 ++++ .../install-kubesphere-on-do.md | 113 ++++ .../install-kubesphere-on-eks.md | 184 ++++++ .../install-kubesphere-on-gke.md | 106 ++++ .../install-kubesphere-on-huaweicloud-cce.md | 112 ++++ .../install-kubesphere-on-oke.md | 147 +++++ .../introduction/_index.md | 7 + .../introduction/overview.md | 63 ++ .../introduction/prerequisites.md | 49 ++ .../on-prem-kubernetes/_index.md | 7 + .../install-ks-on-linux-airgapped.md | 412 ++++++++++++ .../uninstall-kubesphere-from-k8s.md | 15 + .../docs/v3.4/installing-on-linux/_index.md | 17 + .../cluster-operation/_index.md | 7 + .../cluster-operation/add-edge-nodes.md | 259 ++++++++ .../cluster-operation/add-new-nodes.md | 156 +++++ .../cluster-operation/remove-nodes.md | 33 + .../_index.md | 7 + .../ha-configuration.md | 216 +++++++ .../internal-ha-configuration.md | 198 ++++++ ...-up-ha-cluster-using-keepalived-haproxy.md | 414 ++++++++++++ .../introduction/_index.md | 7 + .../introduction/air-gapped-installation.md | 590 ++++++++++++++++++ .../installing-on-linux/introduction/intro.md | 70 +++ .../introduction/kubekey.md | 89 +++ .../introduction/multioverview.md | 364 +++++++++++ .../introduction/port-firewall.md | 33 + .../installing-on-linux/introduction/vars.md | 126 ++++ .../installing-on-linux/on-premises/_index.md | 9 + .../on-premises/install-kubesphere-and-k3s.md | 181 ++++++ .../install-kubesphere-on-bare-metal.md | 397 ++++++++++++ .../install-kubesphere-on-vmware-vsphere.md | 540 ++++++++++++++++ .../installing-kubesphere-on-minikube.md | 148 +++++ .../_index.md | 7 + .../install-ceph-csi-rbd.md | 128 ++++ .../install-glusterfs.md | 297 +++++++++ .../install-nfs-client.md | 270 ++++++++ .../install-qingcloud-csi.md | 274 ++++++++ .../understand-persistent-storage.md | 55 ++ .../public-cloud/_index.md | 7 + .../install-kubesphere-on-azure-vms.md | 264 ++++++++ .../install-kubesphere-on-qingcloud-vms.md | 341 ++++++++++ .../uninstall-kubesphere-and-Kubernetes.md | 25 + content/en/docs/v3.4/introduction/_index.md | 14 + .../en/docs/v3.4/introduction/advantages.md | 92 +++ .../en/docs/v3.4/introduction/architecture.md | 44 ++ .../en/docs/v3.4/introduction/ecosystem.md | 15 + content/en/docs/v3.4/introduction/features.md | 172 +++++ content/en/docs/v3.4/introduction/scen | 0 .../en/docs/v3.4/introduction/scenarios.md | 105 ++++ .../v3.4/introduction/what's-new-in-3.3.md | 13 + .../v3.4/introduction/what-is-kubesphere.md | 39 ++ .../v3.4/multicluster-management/_index.md | 15 + .../enable-multicluster/_index.md | 7 + .../enable-multicluster/agent-connection.md | 270 ++++++++ .../enable-multicluster/direct-connection.md | 196 ++++++ .../retrieve-kubeconfig.md | 43 ++ .../enable-multicluster/update-kubeconfig.md | 18 + .../import-cloud-hosted-k8s/_index.md | 7 + .../import-aliyun-ack.md | 70 +++ .../import-cloud-hosted-k8s/import-aws-eks.md | 171 +++++ .../import-cloud-hosted-k8s/import-gke.md | 116 ++++ .../introduction/_index.md | 7 + .../introduction/kubefed-in-kubesphere.md | 49 ++ .../introduction/overview.md | 15 + .../multicluster-management/unbind-cluster.md | 61 ++ .../docs/v3.4/pluggable-components/_index.md | 13 + .../v3.4/pluggable-components/alerting.md | 100 +++ .../v3.4/pluggable-components/app-store.md | 120 ++++ .../pluggable-components/auditing-logs.md | 182 ++++++ .../docs/v3.4/pluggable-components/devops.md | 130 ++++ .../docs/v3.4/pluggable-components/events.md | 191 ++++++ .../v3.4/pluggable-components/kubeedge.md | 184 ++++++ .../docs/v3.4/pluggable-components/logging.md | 199 ++++++ .../pluggable-components/metrics-server.md | 113 ++++ .../pluggable-components/network-policy.md | 109 ++++ .../v3.4/pluggable-components/overview.md | 98 +++ .../v3.4/pluggable-components/pod-ip-pools.md | 104 +++ .../v3.4/pluggable-components/service-mesh.md | 157 +++++ .../pluggable-components/service-topology.md | 130 ++++ .../uninstall-pluggable-components.md | 205 ++++++ .../v3.4/project-administration/_index.md | 13 + .../container-limit-ranges.md | 47 ++ .../disk-log-collection.md | 75 +++ .../project-and-multicluster-project.md | 95 +++ .../project-administration/project-gateway.md | 66 ++ .../project-network-isolation.md | 206 ++++++ .../role-and-member-management.md | 75 +++ .../en/docs/v3.4/project-user-guide/_index.md | 12 + .../project-user-guide/alerting/_index.md | 7 + .../alerting/alerting-message.md | 27 + .../alerting/alerting-policy.md | 60 ++ .../application-workloads/_index.md | 7 + .../container-image-settings.md | 268 ++++++++ .../application-workloads/cronjobs.md | 105 ++++ .../application-workloads/daemonsets.md | 137 ++++ .../application-workloads/deployments.md | 139 +++++ .../horizontal-pod-autoscaling.md | 104 +++ .../application-workloads/jobs.md | 162 +++++ .../application-workloads/routes.md | 133 ++++ .../application-workloads/services.md | 190 ++++++ .../application-workloads/statefulsets.md | 148 +++++ .../project-user-guide/application/_index.md | 7 + .../application/app-template.md | 33 + .../application/compose-app.md | 96 +++ .../application/deploy-app-from-appstore.md | 62 ++ .../application/deploy-app-from-template.md | 92 +++ .../configuration/_index.md | 7 + .../configuration/configmaps.md | 71 +++ .../configuration/image-registry.md | 104 +++ .../configuration/secrets.md | 121 ++++ .../configuration/serviceaccounts.md | 50 ++ .../custom-application-monitoring/_index.md | 7 + .../examples/_index.md | 7 + .../examples/monitor-mysql.md | 72 +++ .../examples/monitor-sample-web.md | 72 +++ .../introduction.md | 53 ++ .../visualization/_index.md | 7 + .../visualization/overview.md | 71 +++ .../visualization/panel.md | 34 + .../visualization/querying.md | 13 + .../grayscale-release/_index.md | 7 + .../blue-green-deployment.md | 74 +++ .../grayscale-release/canary-release.md | 120 ++++ .../grayscale-release/overview.md | 39 ++ .../grayscale-release/traffic-mirroring.md | 81 +++ .../image-builder/_index.md | 7 + .../image-builder/binary-to-image.md | 141 +++++ .../image-builder/s2i-and-b2i-webhooks.md | 81 +++ .../image-builder/s2i-introduction.md | 39 ++ .../image-builder/s2i-templates.md | 324 ++++++++++ .../image-builder/source-to-image.md | 113 ++++ .../v3.4/project-user-guide/storage/_index.md | 7 + .../storage/volume-snapshots.md | 78 +++ .../project-user-guide/storage/volumes.md | 239 +++++++ content/en/docs/v3.4/quick-start/_index.md | 16 + .../v3.4/quick-start/all-in-one-on-linux.md | 259 ++++++++ .../create-workspace-and-project.md | 239 +++++++ .../quick-start/deploy-bookinfo-to-k8s.md | 94 +++ .../enable-pluggable-components.md | 146 +++++ .../quick-start/minimal-kubesphere-on-k8s.md | 62 ++ .../v3.4/quick-start/wordpress-deployment.md | 135 ++++ content/en/docs/v3.4/reference/_index.md | 14 + .../docs/v3.4/reference/api-changes/_index.md | 12 + .../v3.4/reference/api-changes/logging.md | 27 + .../v3.4/reference/api-changes/monitoring.md | 110 ++++ .../reference/api-changes/notification.md | 15 + content/en/docs/v3.4/reference/api-docs.md | 122 ++++ .../reference/environment-requirements.md | 37 ++ content/en/docs/v3.4/reference/glossary.md | 160 +++++ .../storage-system-installation/_index.md | 12 + .../glusterfs-server.md | 516 +++++++++++++++ .../storage-system-installation/nfs-server.md | 102 +++ content/en/docs/v3.4/release/_index.md | 14 + content/en/docs/v3.4/release/release-v200.md | 92 +++ content/en/docs/v3.4/release/release-v201.md | 19 + content/en/docs/v3.4/release/release-v202.md | 40 ++ content/en/docs/v3.4/release/release-v210.md | 155 +++++ content/en/docs/v3.4/release/release-v211.md | 122 ++++ content/en/docs/v3.4/release/release-v300.md | 207 ++++++ content/en/docs/v3.4/release/release-v310.md | 175 ++++++ content/en/docs/v3.4/release/release-v311.md | 168 +++++ content/en/docs/v3.4/release/release-v320.md | 177 ++++++ content/en/docs/v3.4/release/release-v321.md | 46 ++ content/en/docs/v3.4/release/release-v330.md | 90 +++ content/en/docs/v3.4/release/release-v331.md | 42 ++ content/en/docs/v3.4/release/release-v332.md | 92 +++ content/en/docs/v3.4/toolbox/_index.md | 13 + .../en/docs/v3.4/toolbox/auditing/_index.md | 7 + .../v3.4/toolbox/auditing/auditing-query.md | 87 +++ .../auditing/auditing-receive-customize.md | 180 ++++++ .../v3.4/toolbox/auditing/auditing-rule.md | 207 ++++++ content/en/docs/v3.4/toolbox/events-query.md | 39 ++ content/en/docs/v3.4/toolbox/log-query.md | 59 ++ .../toolbox/metering-and-billing/_index.md | 7 + .../metering-and-billing/enable-billing.md | 84 +++ .../view-resource-consumption.md | 71 +++ content/en/docs/v3.4/toolbox/web-kubectl.md | 44 ++ content/en/docs/v3.4/upgrade/_index.md | 14 + .../air-gapped-upgrade-with-ks-installer.md | 182 ++++++ .../air-gapped-upgrade-with-kubekey.md | 349 +++++++++++ content/en/docs/v3.4/upgrade/overview.md | 28 + .../v3.4/upgrade/upgrade-with-ks-installer.md | 41 ++ .../docs/v3.4/upgrade/upgrade-with-kubekey.md | 146 +++++ content/en/docs/v3.4/upgrade/what-changed.md | 12 + .../v3.4/workspace-administration/_index.md | 16 + .../app-repository/_index.md | 7 + .../app-repository/import-helm-repository.md | 52 ++ .../upload-app-to-public-repository.md | 44 ++ .../department-management.md | 80 +++ .../project-quotas.md | 56 ++ .../role-and-member-management.md | 61 ++ .../upload-helm-based-application.md | 38 ++ .../what-is-workspace.md | 83 +++ .../workspace-network-isolation.md | 37 ++ .../workspace-quotas.md | 41 ++ content/zh/docs/v3.4/_index.md | 62 ++ .../_index.md | 13 + .../external-authentication/_index.md | 8 + .../cas-identity-provider.md | 58 ++ .../oidc-identity-provider.md | 64 ++ .../set-up-external-authentication.md | 112 ++++ .../use-an-ldap-service.md | 104 +++ .../use-an-oauth2-identity-provider.md | 130 ++++ .../multi-tenancy-in-kubesphere.md | 57 ++ .../zh/docs/v3.4/application-store/_index.md | 16 + .../app-developer-guide/_index.md | 7 + .../helm-developer-guide.md | 158 +++++ .../app-developer-guide/helm-specification.md | 131 ++++ .../app-lifecycle-management.md | 230 +++++++ .../application-store/built-in-apps/_index.md | 7 + .../built-in-apps/chaos-mesh-app.md | 93 +++ .../built-in-apps/etcd-app.md | 60 ++ .../built-in-apps/harbor-app.md | 124 ++++ .../built-in-apps/jh-gitlab.md | 69 ++ .../built-in-apps/memcached-app.md | 50 ++ .../built-in-apps/minio-app.md | 58 ++ .../built-in-apps/mongodb-app.md | 55 ++ .../built-in-apps/mysql-app.md | 67 ++ .../built-in-apps/nginx-app.md | 61 ++ .../built-in-apps/postgresql-app.md | 59 ++ .../built-in-apps/rabbitmq-app.md | 63 ++ .../built-in-apps/radondb-mysql-app.md | 51 ++ .../built-in-apps/radondb-postgresql-app.md | 60 ++ .../built-in-apps/redis-app.md | 47 ++ .../built-in-apps/tomcat-app.md | 65 ++ .../application-store/external-apps/_index.md | 7 + .../external-apps/deploy-clickhouse.md | 150 +++++ .../external-apps/deploy-gitlab.md | 122 ++++ .../external-apps/deploy-metersphere.md | 65 ++ .../external-apps/deploy-radondb-mysql.md | 167 +++++ .../external-apps/deploy-tidb.md | 146 +++++ .../v3.4/cluster-administration/_index.md | 20 + .../application-resources-monitoring.md | 29 + .../cluster-settings/_index.md | 7 + .../cluster-settings/cluster-gateway.md | 84 +++ .../cluster-visibility-and-authorization.md | 53 ++ .../log-collections/_index.md | 7 + .../log-collections/add-es-as-receiver.md | 34 + .../add-fluentd-as-receiver.md | 154 +++++ .../log-collections/add-kafka-as-receiver.md | 131 ++++ .../log-collections/introduction.md | 87 +++ .../cluster-status-monitoring.md | 137 ++++ .../_index.md | 7 + .../alerting-message.md | 27 + .../alerting-policy.md | 70 +++ .../alertmanager.md | 37 ++ .../docs/v3.4/cluster-administration/nodes.md | 64 ++ .../notification-management/_index.md | 7 + .../configure-dingtalk.md | 127 ++++ .../configure-email.md | 75 +++ .../configure-slack.md | 90 +++ .../configure-webhook.md | 63 ++ .../configure-wecom.md | 104 +++ .../customize-cluster-name.md | 40 ++ ...hut-down-and-restart-cluster-gracefully.md | 89 +++ .../cluster-administration/snapshotclass.md | 34 + .../cluster-administration/storageclass.md | 195 ++++++ .../zh/docs/v3.4/devops-user-guide/_index.md | 14 + .../devops-overview/_index.md | 7 + .../devops-project-management.md | 49 ++ .../devops-overview/overview.md | 46 ++ .../v3.4/devops-user-guide/examples/_index.md | 7 + .../examples/a-maven-project.md | 162 +++++ .../examples/create-multi-cluster-pipeline.md | 246 ++++++++ .../examples/go-project-pipeline.md | 134 ++++ .../examples/multi-cluster-project-example.md | 132 ++++ .../examples/use-nexus-in-pipelines.md | 174 ++++++ .../how-to-integrate/_index.md | 7 + .../how-to-integrate/harbor.md | 143 +++++ .../how-to-integrate/sonarqube.md | 273 ++++++++ .../devops-user-guide/how-to-use/_index.md | 7 + .../how-to-use/code-repositories/_index.md | 7 + .../import-code-repositories.md | 98 +++ .../continuous-deployments/_index.md | 7 + .../use-gitops-for-continous-deployment.md | 404 ++++++++++++ .../how-to-use/devops-settings/_index.md | 7 + .../devops-settings/add-cd-allowlist.md | 28 + .../devops-settings/credential-management.md | 93 +++ .../role-and-member-management.md | 76 +++ .../how-to-use/devops-settings/set-ci-node.md | 49 ++ .../how-to-use/pipelines/_index.md | 7 + .../pipelines/choose-jenkins-agent.md | 134 ++++ ...-pipeline-using-graphical-editing-panel.md | 389 ++++++++++++ .../create-a-pipeline-using-jenkinsfile.md | 281 +++++++++ .../pipelines/customize-jenkins-agent.md | 70 +++ .../pipelines/gitlab-multibranch-pipeline.md | 129 ++++ .../how-to-use/pipelines/jenkins-email.md | 42 ++ .../how-to-use/pipelines/jenkins-setting.md | 50 ++ .../pipelines/jenkins-shared-library.md | 122 ++++ .../how-to-use/pipelines/pipeline-settings.md | 170 +++++ .../how-to-use/pipelines/pipeline-webhook.md | 66 ++ .../pipelines/use-pipeline-templates.md | 92 +++ content/zh/docs/v3.4/faq/_index.md | 12 + .../zh/docs/v3.4/faq/access-control/_index.md | 7 + ...netes-namespace-to-kubesphere-workspace.md | 38 ++ .../v3.4/faq/access-control/cannot-login.md | 143 +++++ .../faq/access-control/forgot-password.md | 33 + .../faq/access-control/session-timeout.md | 21 + .../zh/docs/v3.4/faq/applications/_index.md | 7 + .../faq/applications/remove-built-in-apps.md | 33 + content/zh/docs/v3.4/faq/console/_index.md | 7 + .../faq/console/change-console-language.md | 25 + .../v3.4/faq/console/console-web-browser.md | 11 + .../edit-resources-in-system-workspace.md | 49 ++ content/zh/docs/v3.4/faq/devops/_index.md | 7 + .../devops/create-devops-kubeconfig-on-aws.md | 106 ++++ .../faq/devops/install-jenkins-plugins.md | 67 ++ .../zh/docs/v3.4/faq/installation/_index.md | 7 + .../faq/installation/configure-booster.md | 84 +++ ...nstall-addon-through-yaml-using-kubekey.md | 19 + .../installation/ssh-connection-failure.md | 40 ++ .../docs/v3.4/faq/installation/telemetry.md | 86 +++ .../faq/multi-cluster-management/_index.md | 7 + .../host-cluster-access-member-cluster.md | 71 +++ .../manage-multi-cluster.md | 60 ++ .../zh/docs/v3.4/faq/observability/_index.md | 7 + .../zh/docs/v3.4/faq/observability/byop.md | 207 ++++++ .../zh/docs/v3.4/faq/observability/logging.md | 170 +++++ .../docs/v3.4/faq/observability/monitoring.md | 124 ++++ content/zh/docs/v3.4/faq/upgrade/_index.md | 7 + .../v3.4/faq/upgrade/qingcloud-csi-upgrade.md | 60 ++ .../v3.4/installing-on-kubernetes/_index.md | 18 + .../hosted-kubernetes/_index.md | 7 + .../install-ks-on-tencent-tke.md | 139 +++++ .../install-kubesphere-on-ack.md | 233 +++++++ .../install-kubesphere-on-aks.md | 154 +++++ .../install-kubesphere-on-do.md | 116 ++++ .../install-kubesphere-on-eks.md | 214 +++++++ .../install-kubesphere-on-gke.md | 107 ++++ .../install-kubesphere-on-huaweicloud-cce.md | 113 ++++ .../install-kubesphere-on-oke.md | 142 +++++ .../introduction/_index.md | 7 + .../introduction/overview.md | 65 ++ .../introduction/prerequisites.md | 51 ++ .../on-prem-kubernetes/_index.md | 7 + .../install-ks-on-linux-airgapped.md | 411 ++++++++++++ .../uninstall-kubesphere-from-k8s.md | 15 + .../docs/v3.4/installing-on-linux/_index.md | 14 + .../cluster-operation/_index.md | 7 + .../cluster-operation/add-edge-nodes.md | 258 ++++++++ .../cluster-operation/add-new-nodes.md | 158 +++++ .../cluster-operation/remove-nodes.md | 33 + .../_index.md | 7 + .../ha-configuration.md | 216 +++++++ .../internal-ha-configuration.md | 200 ++++++ ...-up-ha-cluster-using-keepalived-haproxy.md | 413 ++++++++++++ .../introduction/_index.md | 7 + .../introduction/air-gapped-installation.md | 587 +++++++++++++++++ .../installing-on-linux/introduction/intro.md | 70 +++ .../introduction/kubekey.md | 90 +++ .../introduction/multioverview.md | 354 +++++++++++ .../introduction/port-firewall.md | 31 + .../installing-on-linux/introduction/vars.md | 130 ++++ .../installing-on-linux/on-premises/_index.md | 9 + .../on-premises/install-kubesphere-and-k3s.md | 184 ++++++ .../install-kubesphere-on-bare-metal.md | 398 ++++++++++++ .../install-kubesphere-on-vmware-vsphere.md | 468 ++++++++++++++ .../_index.md | 7 + .../install-ceph-csi-rbd.md | 126 ++++ .../install-glusterfs.md | 298 +++++++++ .../install-nfs-client.md | 270 ++++++++ .../install-qingcloud-csi.md | 274 ++++++++ .../understand-persistent-storage.md | 49 ++ .../public-cloud/_index.md | 8 + .../install-kubesphere-on-ali-ecs.md | 273 ++++++++ .../install-kubesphere-on-azure-vms.md | 273 ++++++++ .../install-kubesphere-on-huaweicloud-ecs.md | 315 ++++++++++ .../install-kubesphere-on-qingcloud-vms.md | 339 ++++++++++ .../uninstall-kubesphere-and-Kubernetes.md | 23 + content/zh/docs/v3.4/introduction/_index.md | 14 + .../zh/docs/v3.4/introduction/advantages.md | 92 +++ .../zh/docs/v3.4/introduction/architecture.md | 41 ++ .../zh/docs/v3.4/introduction/ecosystem.md | 15 + content/zh/docs/v3.4/introduction/features.md | 173 +++++ .../zh/docs/v3.4/introduction/scenarios.md | 96 +++ .../v3.4/introduction/what's-new-in-3.3.md | 13 + .../v3.4/introduction/what-is-kubesphere.md | 39 ++ .../v3.4/multicluster-management/_index.md | 15 + .../enable-multicluster/_index.md | 7 + .../enable-multicluster/agent-connection.md | 264 ++++++++ .../enable-multicluster/direct-connection.md | 195 ++++++ .../retrieve-kubeconfig.md | 43 ++ .../enable-multicluster/update-kubeconfig.md | 18 + .../import-cloud-hosted-k8s/_index.md | 7 + .../import-aliyun-ack.md | 70 +++ .../import-cloud-hosted-k8s/import-aws-eks.md | 171 +++++ .../import-cloud-hosted-k8s/import-gke.md | 116 ++++ .../introduction/_index.md | 7 + .../introduction/kubefed-in-kubesphere.md | 49 ++ .../introduction/overview.md | 15 + .../multicluster-management/unbind-cluster.md | 61 ++ .../docs/v3.4/pluggable-components/_index.md | 13 + .../v3.4/pluggable-components/alerting.md | 97 +++ .../v3.4/pluggable-components/app-store.md | 118 ++++ .../pluggable-components/auditing-logs.md | 182 ++++++ .../docs/v3.4/pluggable-components/devops.md | 127 ++++ .../docs/v3.4/pluggable-components/events.md | 191 ++++++ .../v3.4/pluggable-components/kubeedge.md | 185 ++++++ .../docs/v3.4/pluggable-components/logging.md | 199 ++++++ .../pluggable-components/metrics-server.md | 114 ++++ .../pluggable-components/network-policy.md | 109 ++++ .../v3.4/pluggable-components/overview.md | 98 +++ .../v3.4/pluggable-components/pod-ip-pools.md | 102 +++ .../v3.4/pluggable-components/service-mesh.md | 157 +++++ .../pluggable-components/service-topology.md | 131 ++++ .../uninstall-pluggable-components.md | 204 ++++++ .../v3.4/project-administration/_index.md | 13 + .../container-limit-ranges.md | 49 ++ .../disk-log-collection.md | 78 +++ .../project-and-multicluster-project.md | 97 +++ .../project-administration/project-gateway.md | 64 ++ .../project-network-isolation.md | 210 +++++++ .../role-and-member-management.md | 79 +++ .../zh/docs/v3.4/project-user-guide/_index.md | 12 + .../project-user-guide/alerting/_index.md | 7 + .../alerting/alerting-message.md | 28 + .../alerting/alerting-policy.md | 60 ++ .../application-workloads/_index.md | 7 + .../container-image-settings.md | 268 ++++++++ .../application-workloads/cronjobs.md | 104 +++ .../application-workloads/daemonsets.md | 136 ++++ .../application-workloads/deployments.md | 139 +++++ .../horizontal-pod-autoscaling.md | 103 +++ .../application-workloads/jobs.md | 163 +++++ .../application-workloads/routes.md | 132 ++++ .../application-workloads/services.md | 191 ++++++ .../application-workloads/statefulsets.md | 148 +++++ .../project-user-guide/application/_index.md | 7 + .../application/app-template.md | 33 + .../application/compose-app.md | 96 +++ .../application/deploy-app-from-appstore.md | 62 ++ .../application/deploy-app-from-template.md | 93 +++ .../configuration/_index.md | 7 + .../configuration/configmaps.md | 71 +++ .../configuration/image-registry.md | 104 +++ .../configuration/secrets.md | 121 ++++ .../configuration/serviceaccounts.md | 51 ++ .../custom-application-monitoring/_index.md | 7 + .../examples/_index.md | 7 + .../examples/monitor-mysql.md | 72 +++ .../examples/monitor-sample-web.md | 72 +++ .../introduction.md | 54 ++ .../visualization/_index.md | 7 + .../visualization/overview.md | 71 +++ .../visualization/panel.md | 34 + .../visualization/querying.md | 13 + .../grayscale-release/_index.md | 7 + .../blue-green-deployment.md | 74 +++ .../grayscale-release/canary-release.md | 127 ++++ .../grayscale-release/overview.md | 39 ++ .../grayscale-release/traffic-mirroring.md | 81 +++ .../image-builder/_index.md | 7 + .../image-builder/binary-to-image.md | 148 +++++ .../image-builder/s2i-and-b2i-webhooks.md | 84 +++ .../image-builder/s2i-introduction.md | 39 ++ .../image-builder/s2i-templates.md | 321 ++++++++++ .../image-builder/source-to-image.md | 119 ++++ .../v3.4/project-user-guide/storage/_index.md | 7 + .../storage/volume-snapshots.md | 73 +++ .../project-user-guide/storage/volumes.md | 240 +++++++ content/zh/docs/v3.4/quick-start/_index.md | 14 + .../v3.4/quick-start/all-in-one-on-linux.md | 261 ++++++++ .../create-workspace-and-project.md | 239 +++++++ .../quick-start/deploy-bookinfo-to-k8s.md | 101 +++ .../enable-pluggable-components.md | 146 +++++ .../quick-start/minimal-kubesphere-on-k8s.md | 59 ++ .../v3.4/quick-start/wordpress-deployment.md | 137 ++++ content/zh/docs/v3.4/reference/_index.md | 14 + .../docs/v3.4/reference/api-changes/_index.md | 12 + .../v3.4/reference/api-changes/logging.md | 27 + .../v3.4/reference/api-changes/monitoring.md | 110 ++++ content/zh/docs/v3.4/reference/api-docs.md | 125 ++++ .../reference/environment-requirements.md | 37 ++ content/zh/docs/v3.4/reference/glossary.md | 154 +++++ .../storage-system-installation/_index.md | 12 + .../glusterfs-server.md | 517 +++++++++++++++ .../storage-system-installation/nfs-server.md | 102 +++ content/zh/docs/v3.4/release/_index.md | 14 + content/zh/docs/v3.4/release/release-v200.md | 92 +++ content/zh/docs/v3.4/release/release-v201.md | 19 + content/zh/docs/v3.4/release/release-v202.md | 40 ++ content/zh/docs/v3.4/release/release-v210.md | 155 +++++ content/zh/docs/v3.4/release/release-v211.md | 122 ++++ content/zh/docs/v3.4/release/release-v300.md | 206 ++++++ content/zh/docs/v3.4/release/release-v310.md | 175 ++++++ content/zh/docs/v3.4/release/release-v311.md | 171 +++++ content/zh/docs/v3.4/release/release-v320.md | 177 ++++++ content/zh/docs/v3.4/release/release-v321.md | 41 ++ content/zh/docs/v3.4/release/release-v330.md | 89 +++ content/zh/docs/v3.4/release/release-v331.md | 38 ++ content/zh/docs/v3.4/release/release-v332.md | 97 +++ content/zh/docs/v3.4/toolbox/_index.md | 13 + .../zh/docs/v3.4/toolbox/auditing/_index.md | 7 + .../v3.4/toolbox/auditing/auditing-query.md | 85 +++ .../auditing/auditing-receive-customize.md | 180 ++++++ .../v3.4/toolbox/auditing/auditing-rule.md | 207 ++++++ content/zh/docs/v3.4/toolbox/events-query.md | 45 ++ content/zh/docs/v3.4/toolbox/log-query.md | 69 ++ .../toolbox/metering-and-billing/_index.md | 7 + .../metering-and-billing/enable-billing.md | 83 +++ .../view-resource-consumption.md | 73 +++ content/zh/docs/v3.4/toolbox/web-kubectl.md | 42 ++ content/zh/docs/v3.4/upgrade/_index.md | 14 + .../air-gapped-upgrade-with-ks-installer.md | 182 ++++++ .../air-gapped-upgrade-with-kubekey.md | 352 +++++++++++ content/zh/docs/v3.4/upgrade/overview.md | 31 + .../v3.4/upgrade/upgrade-with-ks-installer.md | 40 ++ .../docs/v3.4/upgrade/upgrade-with-kubekey.md | 149 +++++ content/zh/docs/v3.4/upgrade/what-changed.md | 12 + .../v3.4/workspace-administration/_index.md | 17 + .../app-repository/_index.md | 7 + .../app-repository/import-helm-repository.md | 54 ++ .../upload-app-to-public-repository.md | 44 ++ .../department-management.md | 80 +++ .../project-quotas.md | 55 ++ .../role-and-member-management.md | 63 ++ .../upload-helm-based-application.md | 38 ++ .../what-is-workspace.md | 81 +++ .../workspace-network-isolation.md | 37 ++ .../workspace-quotas.md | 41 ++ 649 files changed, 61211 insertions(+) create mode 100644 content/en/docs/v3.4/_index.md create mode 100644 content/en/docs/v3.4/access-control-and-account-management/_index.md create mode 100644 content/en/docs/v3.4/access-control-and-account-management/external-authentication/_index.md create mode 100644 content/en/docs/v3.4/access-control-and-account-management/external-authentication/oidc-identity-provider.md create mode 100644 content/en/docs/v3.4/access-control-and-account-management/external-authentication/set-up-external-authentication.md create mode 100644 content/en/docs/v3.4/access-control-and-account-management/external-authentication/use-an-ldap-service.md create mode 100644 content/en/docs/v3.4/access-control-and-account-management/external-authentication/use-an-oauth2-identity-provider.md create mode 100644 content/en/docs/v3.4/access-control-and-account-management/multi-tenancy-in-kubesphere.md create mode 100644 content/en/docs/v3.4/application-store/_index.md create mode 100644 content/en/docs/v3.4/application-store/app-developer-guide/_index.md create mode 100644 content/en/docs/v3.4/application-store/app-developer-guide/helm-developer-guide.md create mode 100644 content/en/docs/v3.4/application-store/app-developer-guide/helm-specification.md create mode 100644 content/en/docs/v3.4/application-store/app-lifecycle-management.md create mode 100644 content/en/docs/v3.4/application-store/built-in-apps/_index.md create mode 100644 content/en/docs/v3.4/application-store/built-in-apps/deploy-chaos-mesh.md create mode 100644 content/en/docs/v3.4/application-store/built-in-apps/etcd-app.md create mode 100644 content/en/docs/v3.4/application-store/built-in-apps/harbor-app.md create mode 100644 content/en/docs/v3.4/application-store/built-in-apps/memcached-app.md create mode 100644 content/en/docs/v3.4/application-store/built-in-apps/meshery-app.md create mode 100644 content/en/docs/v3.4/application-store/built-in-apps/minio-app.md create mode 100644 content/en/docs/v3.4/application-store/built-in-apps/mongodb-app.md create mode 100644 content/en/docs/v3.4/application-store/built-in-apps/mysql-app.md create mode 100644 content/en/docs/v3.4/application-store/built-in-apps/nginx-app.md create mode 100644 content/en/docs/v3.4/application-store/built-in-apps/postgresql-app.md create mode 100644 content/en/docs/v3.4/application-store/built-in-apps/rabbitmq-app.md create mode 100644 content/en/docs/v3.4/application-store/built-in-apps/radondb-mysql-app.md create mode 100644 content/en/docs/v3.4/application-store/built-in-apps/radondb-postgresql-app.md create mode 100644 content/en/docs/v3.4/application-store/built-in-apps/redis-app.md create mode 100644 content/en/docs/v3.4/application-store/built-in-apps/tomcat-app.md create mode 100644 content/en/docs/v3.4/application-store/external-apps/_index.md create mode 100644 content/en/docs/v3.4/application-store/external-apps/deploy-clickhouse.md create mode 100644 content/en/docs/v3.4/application-store/external-apps/deploy-gitlab.md create mode 100644 content/en/docs/v3.4/application-store/external-apps/deploy-litmus.md create mode 100644 content/en/docs/v3.4/application-store/external-apps/deploy-metersphere.md create mode 100644 content/en/docs/v3.4/application-store/external-apps/deploy-radondb-mysql.md create mode 100644 content/en/docs/v3.4/application-store/external-apps/deploy-tidb.md create mode 100644 content/en/docs/v3.4/cluster-administration/_index.md create mode 100644 content/en/docs/v3.4/cluster-administration/application-resources-monitoring.md create mode 100644 content/en/docs/v3.4/cluster-administration/cluster-settings/_index.md create mode 100644 content/en/docs/v3.4/cluster-administration/cluster-settings/cluster-gateway.md create mode 100644 content/en/docs/v3.4/cluster-administration/cluster-settings/cluster-visibility-and-authorization.md create mode 100644 content/en/docs/v3.4/cluster-administration/cluster-settings/log-collections/_index.md create mode 100644 content/en/docs/v3.4/cluster-administration/cluster-settings/log-collections/add-es-as-receiver.md create mode 100644 content/en/docs/v3.4/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver.md create mode 100644 content/en/docs/v3.4/cluster-administration/cluster-settings/log-collections/add-kafka-as-receiver.md create mode 100644 content/en/docs/v3.4/cluster-administration/cluster-settings/log-collections/introduction.md create mode 100644 content/en/docs/v3.4/cluster-administration/cluster-status-monitoring.md create mode 100644 content/en/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/_index.md create mode 100644 content/en/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/alerting-message.md create mode 100644 content/en/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy.md create mode 100644 content/en/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/alertmanager.md create mode 100644 content/en/docs/v3.4/cluster-administration/nodes.md create mode 100644 content/en/docs/v3.4/cluster-administration/platform-settings/notification-management/_index.md create mode 100644 content/en/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-dingtalk.md create mode 100644 content/en/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-email.md create mode 100644 content/en/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-slack.md create mode 100644 content/en/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-webhook.md create mode 100644 content/en/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-wecom.md create mode 100644 content/en/docs/v3.4/cluster-administration/platform-settings/notification-management/customize-cluster-name.md create mode 100644 content/en/docs/v3.4/cluster-administration/shut-down-and-restart-cluster-gracefully.md create mode 100644 content/en/docs/v3.4/cluster-administration/snapshotclass.md create mode 100644 content/en/docs/v3.4/cluster-administration/storageclass.md create mode 100644 content/en/docs/v3.4/devops-user-guide/_index.md create mode 100644 content/en/docs/v3.4/devops-user-guide/devops-overview/_index.md create mode 100644 content/en/docs/v3.4/devops-user-guide/devops-overview/devops-project-management.md create mode 100644 content/en/docs/v3.4/devops-user-guide/devops-overview/overview.md create mode 100644 content/en/docs/v3.4/devops-user-guide/examples/_index.md create mode 100644 content/en/docs/v3.4/devops-user-guide/examples/a-maven-project.md create mode 100644 content/en/docs/v3.4/devops-user-guide/examples/create-multi-cluster-pipeline.md create mode 100644 content/en/docs/v3.4/devops-user-guide/examples/go-project-pipeline.md create mode 100644 content/en/docs/v3.4/devops-user-guide/examples/multi-cluster-project-example.md create mode 100644 content/en/docs/v3.4/devops-user-guide/examples/use-nexus-in-pipelines.md create mode 100644 content/en/docs/v3.4/devops-user-guide/how-to-integrate/_index.md create mode 100644 content/en/docs/v3.4/devops-user-guide/how-to-integrate/harbor.md create mode 100644 content/en/docs/v3.4/devops-user-guide/how-to-integrate/sonarqube.md create mode 100644 content/en/docs/v3.4/devops-user-guide/how-to-use/_index.md create mode 100644 content/en/docs/v3.4/devops-user-guide/how-to-use/code-repositories/_index.md create mode 100755 content/en/docs/v3.4/devops-user-guide/how-to-use/code-repositories/import-code-repositories.md create mode 100644 content/en/docs/v3.4/devops-user-guide/how-to-use/continuous-deployments/_index.md create mode 100755 content/en/docs/v3.4/devops-user-guide/how-to-use/continuous-deployments/use-gitops-for-continous-deployment.md create mode 100644 content/en/docs/v3.4/devops-user-guide/how-to-use/devops-settings/_index.md create mode 100644 content/en/docs/v3.4/devops-user-guide/how-to-use/devops-settings/add-cd-allowlist.md create mode 100644 content/en/docs/v3.4/devops-user-guide/how-to-use/devops-settings/credential-management.md create mode 100644 content/en/docs/v3.4/devops-user-guide/how-to-use/devops-settings/role-and-member-management.md create mode 100644 content/en/docs/v3.4/devops-user-guide/how-to-use/devops-settings/set-ci-node.md create mode 100644 content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/_index.md create mode 100644 content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/choose-jenkins-agent.md create mode 100644 content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-graphical-editing-panel.md create mode 100644 content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-jenkinsfile.md create mode 100644 content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/customize-jenkins-agent.md create mode 100644 content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/gitlab-multibranch-pipeline.md create mode 100644 content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/jenkins-email.md create mode 100644 content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/jenkins-setting.md create mode 100644 content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/jenkins-shared-library.md create mode 100644 content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/pipeline-settings.md create mode 100644 content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/pipeline-webhook.md create mode 100644 content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/use-pipeline-templates.md create mode 100644 content/en/docs/v3.4/faq/_index.md create mode 100644 content/en/docs/v3.4/faq/access-control/_index.md create mode 100644 content/en/docs/v3.4/faq/access-control/add-kubernetes-namespace-to-kubesphere-workspace.md create mode 100644 content/en/docs/v3.4/faq/access-control/cannot-login.md create mode 100644 content/en/docs/v3.4/faq/access-control/forgot-password.md create mode 100644 content/en/docs/v3.4/faq/access-control/session-timeout.md create mode 100644 content/en/docs/v3.4/faq/applications/_index.md create mode 100644 content/en/docs/v3.4/faq/applications/remove-built-in-apps.md create mode 100644 content/en/docs/v3.4/faq/console/_index.md create mode 100644 content/en/docs/v3.4/faq/console/change-console-language.md create mode 100644 content/en/docs/v3.4/faq/console/console-web-browser.md create mode 100644 content/en/docs/v3.4/faq/console/edit-resources-in-system-workspace.md create mode 100644 content/en/docs/v3.4/faq/devops/_index.md create mode 100644 content/en/docs/v3.4/faq/devops/create-devops-kubeconfig-on-aws.md create mode 100644 content/en/docs/v3.4/faq/devops/install-jenkins-plugins.md create mode 100644 content/en/docs/v3.4/faq/installation/_index.md create mode 100644 content/en/docs/v3.4/faq/installation/configure-booster.md create mode 100644 content/en/docs/v3.4/faq/installation/install-addon-through-yaml-using-kubekey.md create mode 100644 content/en/docs/v3.4/faq/installation/ssh-connection-failure.md create mode 100644 content/en/docs/v3.4/faq/installation/telemetry.md create mode 100644 content/en/docs/v3.4/faq/multi-cluster-management/_index.md create mode 100644 content/en/docs/v3.4/faq/multi-cluster-management/host-cluster-access-member-cluster.md create mode 100644 content/en/docs/v3.4/faq/multi-cluster-management/manage-multi-cluster.md create mode 100644 content/en/docs/v3.4/faq/observability/_index.md create mode 100644 content/en/docs/v3.4/faq/observability/byop.md create mode 100644 content/en/docs/v3.4/faq/observability/logging.md create mode 100644 content/en/docs/v3.4/faq/observability/monitoring.md create mode 100644 content/en/docs/v3.4/faq/upgrade/_index.md create mode 100644 content/en/docs/v3.4/faq/upgrade/qingcloud-csi-upgrade.md create mode 100644 content/en/docs/v3.4/installing-on-kubernetes/_index.md create mode 100644 content/en/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/_index.md create mode 100644 content/en/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks.md create mode 100644 content/en/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do.md create mode 100644 content/en/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks.md create mode 100644 content/en/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke.md create mode 100644 content/en/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-huaweicloud-cce.md create mode 100644 content/en/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke.md create mode 100644 content/en/docs/v3.4/installing-on-kubernetes/introduction/_index.md create mode 100644 content/en/docs/v3.4/installing-on-kubernetes/introduction/overview.md create mode 100644 content/en/docs/v3.4/installing-on-kubernetes/introduction/prerequisites.md create mode 100644 content/en/docs/v3.4/installing-on-kubernetes/on-prem-kubernetes/_index.md create mode 100644 content/en/docs/v3.4/installing-on-kubernetes/on-prem-kubernetes/install-ks-on-linux-airgapped.md create mode 100644 content/en/docs/v3.4/installing-on-kubernetes/uninstall-kubesphere-from-k8s.md create mode 100644 content/en/docs/v3.4/installing-on-linux/_index.md create mode 100644 content/en/docs/v3.4/installing-on-linux/cluster-operation/_index.md create mode 100644 content/en/docs/v3.4/installing-on-linux/cluster-operation/add-edge-nodes.md create mode 100644 content/en/docs/v3.4/installing-on-linux/cluster-operation/add-new-nodes.md create mode 100644 content/en/docs/v3.4/installing-on-linux/cluster-operation/remove-nodes.md create mode 100644 content/en/docs/v3.4/installing-on-linux/high-availability-configurations/_index.md create mode 100644 content/en/docs/v3.4/installing-on-linux/high-availability-configurations/ha-configuration.md create mode 100644 content/en/docs/v3.4/installing-on-linux/high-availability-configurations/internal-ha-configuration.md create mode 100644 content/en/docs/v3.4/installing-on-linux/high-availability-configurations/set-up-ha-cluster-using-keepalived-haproxy.md create mode 100644 content/en/docs/v3.4/installing-on-linux/introduction/_index.md create mode 100644 content/en/docs/v3.4/installing-on-linux/introduction/air-gapped-installation.md create mode 100644 content/en/docs/v3.4/installing-on-linux/introduction/intro.md create mode 100644 content/en/docs/v3.4/installing-on-linux/introduction/kubekey.md create mode 100644 content/en/docs/v3.4/installing-on-linux/introduction/multioverview.md create mode 100644 content/en/docs/v3.4/installing-on-linux/introduction/port-firewall.md create mode 100644 content/en/docs/v3.4/installing-on-linux/introduction/vars.md create mode 100644 content/en/docs/v3.4/installing-on-linux/on-premises/_index.md create mode 100644 content/en/docs/v3.4/installing-on-linux/on-premises/install-kubesphere-and-k3s.md create mode 100644 content/en/docs/v3.4/installing-on-linux/on-premises/install-kubesphere-on-bare-metal.md create mode 100644 content/en/docs/v3.4/installing-on-linux/on-premises/install-kubesphere-on-vmware-vsphere.md create mode 100644 content/en/docs/v3.4/installing-on-linux/on-premises/installing-kubesphere-on-minikube.md create mode 100644 content/en/docs/v3.4/installing-on-linux/persistent-storage-configurations/_index.md create mode 100644 content/en/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-ceph-csi-rbd.md create mode 100644 content/en/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-glusterfs.md create mode 100644 content/en/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-nfs-client.md create mode 100644 content/en/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-qingcloud-csi.md create mode 100644 content/en/docs/v3.4/installing-on-linux/persistent-storage-configurations/understand-persistent-storage.md create mode 100644 content/en/docs/v3.4/installing-on-linux/public-cloud/_index.md create mode 100644 content/en/docs/v3.4/installing-on-linux/public-cloud/install-kubesphere-on-azure-vms.md create mode 100644 content/en/docs/v3.4/installing-on-linux/public-cloud/install-kubesphere-on-qingcloud-vms.md create mode 100644 content/en/docs/v3.4/installing-on-linux/uninstall-kubesphere-and-Kubernetes.md create mode 100644 content/en/docs/v3.4/introduction/_index.md create mode 100644 content/en/docs/v3.4/introduction/advantages.md create mode 100644 content/en/docs/v3.4/introduction/architecture.md create mode 100644 content/en/docs/v3.4/introduction/ecosystem.md create mode 100644 content/en/docs/v3.4/introduction/features.md create mode 100644 content/en/docs/v3.4/introduction/scen create mode 100644 content/en/docs/v3.4/introduction/scenarios.md create mode 100644 content/en/docs/v3.4/introduction/what's-new-in-3.3.md create mode 100644 content/en/docs/v3.4/introduction/what-is-kubesphere.md create mode 100644 content/en/docs/v3.4/multicluster-management/_index.md create mode 100644 content/en/docs/v3.4/multicluster-management/enable-multicluster/_index.md create mode 100644 content/en/docs/v3.4/multicluster-management/enable-multicluster/agent-connection.md create mode 100644 content/en/docs/v3.4/multicluster-management/enable-multicluster/direct-connection.md create mode 100644 content/en/docs/v3.4/multicluster-management/enable-multicluster/retrieve-kubeconfig.md create mode 100644 content/en/docs/v3.4/multicluster-management/enable-multicluster/update-kubeconfig.md create mode 100644 content/en/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/_index.md create mode 100644 content/en/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/import-aliyun-ack.md create mode 100644 content/en/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/import-aws-eks.md create mode 100644 content/en/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/import-gke.md create mode 100644 content/en/docs/v3.4/multicluster-management/introduction/_index.md create mode 100644 content/en/docs/v3.4/multicluster-management/introduction/kubefed-in-kubesphere.md create mode 100644 content/en/docs/v3.4/multicluster-management/introduction/overview.md create mode 100644 content/en/docs/v3.4/multicluster-management/unbind-cluster.md create mode 100644 content/en/docs/v3.4/pluggable-components/_index.md create mode 100644 content/en/docs/v3.4/pluggable-components/alerting.md create mode 100644 content/en/docs/v3.4/pluggable-components/app-store.md create mode 100644 content/en/docs/v3.4/pluggable-components/auditing-logs.md create mode 100644 content/en/docs/v3.4/pluggable-components/devops.md create mode 100644 content/en/docs/v3.4/pluggable-components/events.md create mode 100644 content/en/docs/v3.4/pluggable-components/kubeedge.md create mode 100644 content/en/docs/v3.4/pluggable-components/logging.md create mode 100644 content/en/docs/v3.4/pluggable-components/metrics-server.md create mode 100644 content/en/docs/v3.4/pluggable-components/network-policy.md create mode 100644 content/en/docs/v3.4/pluggable-components/overview.md create mode 100644 content/en/docs/v3.4/pluggable-components/pod-ip-pools.md create mode 100644 content/en/docs/v3.4/pluggable-components/service-mesh.md create mode 100644 content/en/docs/v3.4/pluggable-components/service-topology.md create mode 100644 content/en/docs/v3.4/pluggable-components/uninstall-pluggable-components.md create mode 100644 content/en/docs/v3.4/project-administration/_index.md create mode 100644 content/en/docs/v3.4/project-administration/container-limit-ranges.md create mode 100644 content/en/docs/v3.4/project-administration/disk-log-collection.md create mode 100644 content/en/docs/v3.4/project-administration/project-and-multicluster-project.md create mode 100644 content/en/docs/v3.4/project-administration/project-gateway.md create mode 100644 content/en/docs/v3.4/project-administration/project-network-isolation.md create mode 100644 content/en/docs/v3.4/project-administration/role-and-member-management.md create mode 100644 content/en/docs/v3.4/project-user-guide/_index.md create mode 100644 content/en/docs/v3.4/project-user-guide/alerting/_index.md create mode 100644 content/en/docs/v3.4/project-user-guide/alerting/alerting-message.md create mode 100644 content/en/docs/v3.4/project-user-guide/alerting/alerting-policy.md create mode 100644 content/en/docs/v3.4/project-user-guide/application-workloads/_index.md create mode 100644 content/en/docs/v3.4/project-user-guide/application-workloads/container-image-settings.md create mode 100644 content/en/docs/v3.4/project-user-guide/application-workloads/cronjobs.md create mode 100644 content/en/docs/v3.4/project-user-guide/application-workloads/daemonsets.md create mode 100644 content/en/docs/v3.4/project-user-guide/application-workloads/deployments.md create mode 100755 content/en/docs/v3.4/project-user-guide/application-workloads/horizontal-pod-autoscaling.md create mode 100644 content/en/docs/v3.4/project-user-guide/application-workloads/jobs.md create mode 100644 content/en/docs/v3.4/project-user-guide/application-workloads/routes.md create mode 100644 content/en/docs/v3.4/project-user-guide/application-workloads/services.md create mode 100644 content/en/docs/v3.4/project-user-guide/application-workloads/statefulsets.md create mode 100644 content/en/docs/v3.4/project-user-guide/application/_index.md create mode 100644 content/en/docs/v3.4/project-user-guide/application/app-template.md create mode 100644 content/en/docs/v3.4/project-user-guide/application/compose-app.md create mode 100644 content/en/docs/v3.4/project-user-guide/application/deploy-app-from-appstore.md create mode 100644 content/en/docs/v3.4/project-user-guide/application/deploy-app-from-template.md create mode 100644 content/en/docs/v3.4/project-user-guide/configuration/_index.md create mode 100644 content/en/docs/v3.4/project-user-guide/configuration/configmaps.md create mode 100644 content/en/docs/v3.4/project-user-guide/configuration/image-registry.md create mode 100644 content/en/docs/v3.4/project-user-guide/configuration/secrets.md create mode 100644 content/en/docs/v3.4/project-user-guide/configuration/serviceaccounts.md create mode 100644 content/en/docs/v3.4/project-user-guide/custom-application-monitoring/_index.md create mode 100644 content/en/docs/v3.4/project-user-guide/custom-application-monitoring/examples/_index.md create mode 100644 content/en/docs/v3.4/project-user-guide/custom-application-monitoring/examples/monitor-mysql.md create mode 100644 content/en/docs/v3.4/project-user-guide/custom-application-monitoring/examples/monitor-sample-web.md create mode 100644 content/en/docs/v3.4/project-user-guide/custom-application-monitoring/introduction.md create mode 100644 content/en/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/_index.md create mode 100644 content/en/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/overview.md create mode 100644 content/en/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/panel.md create mode 100644 content/en/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/querying.md create mode 100644 content/en/docs/v3.4/project-user-guide/grayscale-release/_index.md create mode 100644 content/en/docs/v3.4/project-user-guide/grayscale-release/blue-green-deployment.md create mode 100644 content/en/docs/v3.4/project-user-guide/grayscale-release/canary-release.md create mode 100644 content/en/docs/v3.4/project-user-guide/grayscale-release/overview.md create mode 100644 content/en/docs/v3.4/project-user-guide/grayscale-release/traffic-mirroring.md create mode 100644 content/en/docs/v3.4/project-user-guide/image-builder/_index.md create mode 100644 content/en/docs/v3.4/project-user-guide/image-builder/binary-to-image.md create mode 100644 content/en/docs/v3.4/project-user-guide/image-builder/s2i-and-b2i-webhooks.md create mode 100644 content/en/docs/v3.4/project-user-guide/image-builder/s2i-introduction.md create mode 100644 content/en/docs/v3.4/project-user-guide/image-builder/s2i-templates.md create mode 100644 content/en/docs/v3.4/project-user-guide/image-builder/source-to-image.md create mode 100644 content/en/docs/v3.4/project-user-guide/storage/_index.md create mode 100644 content/en/docs/v3.4/project-user-guide/storage/volume-snapshots.md create mode 100644 content/en/docs/v3.4/project-user-guide/storage/volumes.md create mode 100644 content/en/docs/v3.4/quick-start/_index.md create mode 100644 content/en/docs/v3.4/quick-start/all-in-one-on-linux.md create mode 100644 content/en/docs/v3.4/quick-start/create-workspace-and-project.md create mode 100644 content/en/docs/v3.4/quick-start/deploy-bookinfo-to-k8s.md create mode 100644 content/en/docs/v3.4/quick-start/enable-pluggable-components.md create mode 100644 content/en/docs/v3.4/quick-start/minimal-kubesphere-on-k8s.md create mode 100644 content/en/docs/v3.4/quick-start/wordpress-deployment.md create mode 100644 content/en/docs/v3.4/reference/_index.md create mode 100644 content/en/docs/v3.4/reference/api-changes/_index.md create mode 100644 content/en/docs/v3.4/reference/api-changes/logging.md create mode 100644 content/en/docs/v3.4/reference/api-changes/monitoring.md create mode 100644 content/en/docs/v3.4/reference/api-changes/notification.md create mode 100644 content/en/docs/v3.4/reference/api-docs.md create mode 100644 content/en/docs/v3.4/reference/environment-requirements.md create mode 100644 content/en/docs/v3.4/reference/glossary.md create mode 100644 content/en/docs/v3.4/reference/storage-system-installation/_index.md create mode 100644 content/en/docs/v3.4/reference/storage-system-installation/glusterfs-server.md create mode 100644 content/en/docs/v3.4/reference/storage-system-installation/nfs-server.md create mode 100644 content/en/docs/v3.4/release/_index.md create mode 100644 content/en/docs/v3.4/release/release-v200.md create mode 100644 content/en/docs/v3.4/release/release-v201.md create mode 100644 content/en/docs/v3.4/release/release-v202.md create mode 100644 content/en/docs/v3.4/release/release-v210.md create mode 100644 content/en/docs/v3.4/release/release-v211.md create mode 100644 content/en/docs/v3.4/release/release-v300.md create mode 100644 content/en/docs/v3.4/release/release-v310.md create mode 100644 content/en/docs/v3.4/release/release-v311.md create mode 100644 content/en/docs/v3.4/release/release-v320.md create mode 100644 content/en/docs/v3.4/release/release-v321.md create mode 100644 content/en/docs/v3.4/release/release-v330.md create mode 100644 content/en/docs/v3.4/release/release-v331.md create mode 100644 content/en/docs/v3.4/release/release-v332.md create mode 100644 content/en/docs/v3.4/toolbox/_index.md create mode 100644 content/en/docs/v3.4/toolbox/auditing/_index.md create mode 100644 content/en/docs/v3.4/toolbox/auditing/auditing-query.md create mode 100644 content/en/docs/v3.4/toolbox/auditing/auditing-receive-customize.md create mode 100644 content/en/docs/v3.4/toolbox/auditing/auditing-rule.md create mode 100644 content/en/docs/v3.4/toolbox/events-query.md create mode 100644 content/en/docs/v3.4/toolbox/log-query.md create mode 100644 content/en/docs/v3.4/toolbox/metering-and-billing/_index.md create mode 100644 content/en/docs/v3.4/toolbox/metering-and-billing/enable-billing.md create mode 100644 content/en/docs/v3.4/toolbox/metering-and-billing/view-resource-consumption.md create mode 100644 content/en/docs/v3.4/toolbox/web-kubectl.md create mode 100644 content/en/docs/v3.4/upgrade/_index.md create mode 100644 content/en/docs/v3.4/upgrade/air-gapped-upgrade-with-ks-installer.md create mode 100644 content/en/docs/v3.4/upgrade/air-gapped-upgrade-with-kubekey.md create mode 100644 content/en/docs/v3.4/upgrade/overview.md create mode 100644 content/en/docs/v3.4/upgrade/upgrade-with-ks-installer.md create mode 100644 content/en/docs/v3.4/upgrade/upgrade-with-kubekey.md create mode 100644 content/en/docs/v3.4/upgrade/what-changed.md create mode 100644 content/en/docs/v3.4/workspace-administration/_index.md create mode 100644 content/en/docs/v3.4/workspace-administration/app-repository/_index.md create mode 100644 content/en/docs/v3.4/workspace-administration/app-repository/import-helm-repository.md create mode 100644 content/en/docs/v3.4/workspace-administration/app-repository/upload-app-to-public-repository.md create mode 100644 content/en/docs/v3.4/workspace-administration/department-management.md create mode 100644 content/en/docs/v3.4/workspace-administration/project-quotas.md create mode 100644 content/en/docs/v3.4/workspace-administration/role-and-member-management.md create mode 100644 content/en/docs/v3.4/workspace-administration/upload-helm-based-application.md create mode 100644 content/en/docs/v3.4/workspace-administration/what-is-workspace.md create mode 100644 content/en/docs/v3.4/workspace-administration/workspace-network-isolation.md create mode 100644 content/en/docs/v3.4/workspace-administration/workspace-quotas.md create mode 100644 content/zh/docs/v3.4/_index.md create mode 100644 content/zh/docs/v3.4/access-control-and-account-management/_index.md create mode 100644 content/zh/docs/v3.4/access-control-and-account-management/external-authentication/_index.md create mode 100644 content/zh/docs/v3.4/access-control-and-account-management/external-authentication/cas-identity-provider.md create mode 100644 content/zh/docs/v3.4/access-control-and-account-management/external-authentication/oidc-identity-provider.md create mode 100644 content/zh/docs/v3.4/access-control-and-account-management/external-authentication/set-up-external-authentication.md create mode 100644 content/zh/docs/v3.4/access-control-and-account-management/external-authentication/use-an-ldap-service.md create mode 100644 content/zh/docs/v3.4/access-control-and-account-management/external-authentication/use-an-oauth2-identity-provider.md create mode 100644 content/zh/docs/v3.4/access-control-and-account-management/multi-tenancy-in-kubesphere.md create mode 100644 content/zh/docs/v3.4/application-store/_index.md create mode 100644 content/zh/docs/v3.4/application-store/app-developer-guide/_index.md create mode 100644 content/zh/docs/v3.4/application-store/app-developer-guide/helm-developer-guide.md create mode 100644 content/zh/docs/v3.4/application-store/app-developer-guide/helm-specification.md create mode 100644 content/zh/docs/v3.4/application-store/app-lifecycle-management.md create mode 100644 content/zh/docs/v3.4/application-store/built-in-apps/_index.md create mode 100644 content/zh/docs/v3.4/application-store/built-in-apps/chaos-mesh-app.md create mode 100644 content/zh/docs/v3.4/application-store/built-in-apps/etcd-app.md create mode 100644 content/zh/docs/v3.4/application-store/built-in-apps/harbor-app.md create mode 100644 content/zh/docs/v3.4/application-store/built-in-apps/jh-gitlab.md create mode 100644 content/zh/docs/v3.4/application-store/built-in-apps/memcached-app.md create mode 100644 content/zh/docs/v3.4/application-store/built-in-apps/minio-app.md create mode 100644 content/zh/docs/v3.4/application-store/built-in-apps/mongodb-app.md create mode 100644 content/zh/docs/v3.4/application-store/built-in-apps/mysql-app.md create mode 100644 content/zh/docs/v3.4/application-store/built-in-apps/nginx-app.md create mode 100644 content/zh/docs/v3.4/application-store/built-in-apps/postgresql-app.md create mode 100644 content/zh/docs/v3.4/application-store/built-in-apps/rabbitmq-app.md create mode 100644 content/zh/docs/v3.4/application-store/built-in-apps/radondb-mysql-app.md create mode 100644 content/zh/docs/v3.4/application-store/built-in-apps/radondb-postgresql-app.md create mode 100644 content/zh/docs/v3.4/application-store/built-in-apps/redis-app.md create mode 100644 content/zh/docs/v3.4/application-store/built-in-apps/tomcat-app.md create mode 100644 content/zh/docs/v3.4/application-store/external-apps/_index.md create mode 100644 content/zh/docs/v3.4/application-store/external-apps/deploy-clickhouse.md create mode 100644 content/zh/docs/v3.4/application-store/external-apps/deploy-gitlab.md create mode 100644 content/zh/docs/v3.4/application-store/external-apps/deploy-metersphere.md create mode 100644 content/zh/docs/v3.4/application-store/external-apps/deploy-radondb-mysql.md create mode 100644 content/zh/docs/v3.4/application-store/external-apps/deploy-tidb.md create mode 100644 content/zh/docs/v3.4/cluster-administration/_index.md create mode 100644 content/zh/docs/v3.4/cluster-administration/application-resources-monitoring.md create mode 100644 content/zh/docs/v3.4/cluster-administration/cluster-settings/_index.md create mode 100644 content/zh/docs/v3.4/cluster-administration/cluster-settings/cluster-gateway.md create mode 100644 content/zh/docs/v3.4/cluster-administration/cluster-settings/cluster-visibility-and-authorization.md create mode 100644 content/zh/docs/v3.4/cluster-administration/cluster-settings/log-collections/_index.md create mode 100644 content/zh/docs/v3.4/cluster-administration/cluster-settings/log-collections/add-es-as-receiver.md create mode 100644 content/zh/docs/v3.4/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver.md create mode 100644 content/zh/docs/v3.4/cluster-administration/cluster-settings/log-collections/add-kafka-as-receiver.md create mode 100644 content/zh/docs/v3.4/cluster-administration/cluster-settings/log-collections/introduction.md create mode 100644 content/zh/docs/v3.4/cluster-administration/cluster-status-monitoring.md create mode 100644 content/zh/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/_index.md create mode 100644 content/zh/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/alerting-message.md create mode 100644 content/zh/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy.md create mode 100644 content/zh/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/alertmanager.md create mode 100644 content/zh/docs/v3.4/cluster-administration/nodes.md create mode 100644 content/zh/docs/v3.4/cluster-administration/platform-settings/notification-management/_index.md create mode 100644 content/zh/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-dingtalk.md create mode 100644 content/zh/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-email.md create mode 100644 content/zh/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-slack.md create mode 100644 content/zh/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-webhook.md create mode 100644 content/zh/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-wecom.md create mode 100644 content/zh/docs/v3.4/cluster-administration/platform-settings/notification-management/customize-cluster-name.md create mode 100644 content/zh/docs/v3.4/cluster-administration/shut-down-and-restart-cluster-gracefully.md create mode 100644 content/zh/docs/v3.4/cluster-administration/snapshotclass.md create mode 100644 content/zh/docs/v3.4/cluster-administration/storageclass.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/_index.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/devops-overview/_index.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/devops-overview/devops-project-management.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/devops-overview/overview.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/examples/_index.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/examples/a-maven-project.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/examples/create-multi-cluster-pipeline.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/examples/go-project-pipeline.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/examples/multi-cluster-project-example.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/examples/use-nexus-in-pipelines.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/how-to-integrate/_index.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/how-to-integrate/harbor.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/how-to-integrate/sonarqube.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/how-to-use/_index.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/how-to-use/code-repositories/_index.md create mode 100755 content/zh/docs/v3.4/devops-user-guide/how-to-use/code-repositories/import-code-repositories.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/how-to-use/continuous-deployments/_index.md create mode 100755 content/zh/docs/v3.4/devops-user-guide/how-to-use/continuous-deployments/use-gitops-for-continous-deployment.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/how-to-use/devops-settings/_index.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/how-to-use/devops-settings/add-cd-allowlist.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/how-to-use/devops-settings/credential-management.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/how-to-use/devops-settings/role-and-member-management.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/how-to-use/devops-settings/set-ci-node.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/_index.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/choose-jenkins-agent.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-graphical-editing-panel.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-jenkinsfile.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/customize-jenkins-agent.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/gitlab-multibranch-pipeline.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/jenkins-email.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/jenkins-setting.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/jenkins-shared-library.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/pipeline-settings.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/pipeline-webhook.md create mode 100644 content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/use-pipeline-templates.md create mode 100644 content/zh/docs/v3.4/faq/_index.md create mode 100644 content/zh/docs/v3.4/faq/access-control/_index.md create mode 100644 content/zh/docs/v3.4/faq/access-control/add-kubernetes-namespace-to-kubesphere-workspace.md create mode 100644 content/zh/docs/v3.4/faq/access-control/cannot-login.md create mode 100644 content/zh/docs/v3.4/faq/access-control/forgot-password.md create mode 100644 content/zh/docs/v3.4/faq/access-control/session-timeout.md create mode 100644 content/zh/docs/v3.4/faq/applications/_index.md create mode 100644 content/zh/docs/v3.4/faq/applications/remove-built-in-apps.md create mode 100644 content/zh/docs/v3.4/faq/console/_index.md create mode 100644 content/zh/docs/v3.4/faq/console/change-console-language.md create mode 100644 content/zh/docs/v3.4/faq/console/console-web-browser.md create mode 100644 content/zh/docs/v3.4/faq/console/edit-resources-in-system-workspace.md create mode 100644 content/zh/docs/v3.4/faq/devops/_index.md create mode 100644 content/zh/docs/v3.4/faq/devops/create-devops-kubeconfig-on-aws.md create mode 100644 content/zh/docs/v3.4/faq/devops/install-jenkins-plugins.md create mode 100644 content/zh/docs/v3.4/faq/installation/_index.md create mode 100644 content/zh/docs/v3.4/faq/installation/configure-booster.md create mode 100644 content/zh/docs/v3.4/faq/installation/install-addon-through-yaml-using-kubekey.md create mode 100644 content/zh/docs/v3.4/faq/installation/ssh-connection-failure.md create mode 100644 content/zh/docs/v3.4/faq/installation/telemetry.md create mode 100644 content/zh/docs/v3.4/faq/multi-cluster-management/_index.md create mode 100644 content/zh/docs/v3.4/faq/multi-cluster-management/host-cluster-access-member-cluster.md create mode 100644 content/zh/docs/v3.4/faq/multi-cluster-management/manage-multi-cluster.md create mode 100644 content/zh/docs/v3.4/faq/observability/_index.md create mode 100644 content/zh/docs/v3.4/faq/observability/byop.md create mode 100644 content/zh/docs/v3.4/faq/observability/logging.md create mode 100644 content/zh/docs/v3.4/faq/observability/monitoring.md create mode 100644 content/zh/docs/v3.4/faq/upgrade/_index.md create mode 100644 content/zh/docs/v3.4/faq/upgrade/qingcloud-csi-upgrade.md create mode 100644 content/zh/docs/v3.4/installing-on-kubernetes/_index.md create mode 100644 content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/_index.md create mode 100644 content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-ks-on-tencent-tke.md create mode 100644 content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-ack.md create mode 100644 content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks.md create mode 100644 content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do.md create mode 100644 content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks.md create mode 100644 content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke.md create mode 100644 content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-huaweicloud-cce.md create mode 100644 content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke.md create mode 100644 content/zh/docs/v3.4/installing-on-kubernetes/introduction/_index.md create mode 100644 content/zh/docs/v3.4/installing-on-kubernetes/introduction/overview.md create mode 100644 content/zh/docs/v3.4/installing-on-kubernetes/introduction/prerequisites.md create mode 100644 content/zh/docs/v3.4/installing-on-kubernetes/on-prem-kubernetes/_index.md create mode 100644 content/zh/docs/v3.4/installing-on-kubernetes/on-prem-kubernetes/install-ks-on-linux-airgapped.md create mode 100644 content/zh/docs/v3.4/installing-on-kubernetes/uninstall-kubesphere-from-k8s.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/_index.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/cluster-operation/_index.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/cluster-operation/add-edge-nodes.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/cluster-operation/add-new-nodes.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/cluster-operation/remove-nodes.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/high-availability-configurations/_index.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/high-availability-configurations/ha-configuration.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/high-availability-configurations/internal-ha-configuration.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/high-availability-configurations/set-up-ha-cluster-using-keepalived-haproxy.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/introduction/_index.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/introduction/air-gapped-installation.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/introduction/intro.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/introduction/kubekey.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/introduction/multioverview.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/introduction/port-firewall.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/introduction/vars.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/on-premises/_index.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/on-premises/install-kubesphere-and-k3s.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/on-premises/install-kubesphere-on-bare-metal.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/on-premises/install-kubesphere-on-vmware-vsphere.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/persistent-storage-configurations/_index.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-ceph-csi-rbd.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-glusterfs.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-nfs-client.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-qingcloud-csi.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/persistent-storage-configurations/understand-persistent-storage.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/public-cloud/_index.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/public-cloud/install-kubesphere-on-ali-ecs.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/public-cloud/install-kubesphere-on-azure-vms.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/public-cloud/install-kubesphere-on-huaweicloud-ecs.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/public-cloud/install-kubesphere-on-qingcloud-vms.md create mode 100644 content/zh/docs/v3.4/installing-on-linux/uninstall-kubesphere-and-Kubernetes.md create mode 100644 content/zh/docs/v3.4/introduction/_index.md create mode 100644 content/zh/docs/v3.4/introduction/advantages.md create mode 100644 content/zh/docs/v3.4/introduction/architecture.md create mode 100644 content/zh/docs/v3.4/introduction/ecosystem.md create mode 100644 content/zh/docs/v3.4/introduction/features.md create mode 100644 content/zh/docs/v3.4/introduction/scenarios.md create mode 100644 content/zh/docs/v3.4/introduction/what's-new-in-3.3.md create mode 100644 content/zh/docs/v3.4/introduction/what-is-kubesphere.md create mode 100644 content/zh/docs/v3.4/multicluster-management/_index.md create mode 100644 content/zh/docs/v3.4/multicluster-management/enable-multicluster/_index.md create mode 100644 content/zh/docs/v3.4/multicluster-management/enable-multicluster/agent-connection.md create mode 100644 content/zh/docs/v3.4/multicluster-management/enable-multicluster/direct-connection.md create mode 100644 content/zh/docs/v3.4/multicluster-management/enable-multicluster/retrieve-kubeconfig.md create mode 100644 content/zh/docs/v3.4/multicluster-management/enable-multicluster/update-kubeconfig.md create mode 100644 content/zh/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/_index.md create mode 100644 content/zh/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/import-aliyun-ack.md create mode 100644 content/zh/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/import-aws-eks.md create mode 100644 content/zh/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/import-gke.md create mode 100644 content/zh/docs/v3.4/multicluster-management/introduction/_index.md create mode 100644 content/zh/docs/v3.4/multicluster-management/introduction/kubefed-in-kubesphere.md create mode 100644 content/zh/docs/v3.4/multicluster-management/introduction/overview.md create mode 100644 content/zh/docs/v3.4/multicluster-management/unbind-cluster.md create mode 100644 content/zh/docs/v3.4/pluggable-components/_index.md create mode 100644 content/zh/docs/v3.4/pluggable-components/alerting.md create mode 100644 content/zh/docs/v3.4/pluggable-components/app-store.md create mode 100644 content/zh/docs/v3.4/pluggable-components/auditing-logs.md create mode 100644 content/zh/docs/v3.4/pluggable-components/devops.md create mode 100644 content/zh/docs/v3.4/pluggable-components/events.md create mode 100644 content/zh/docs/v3.4/pluggable-components/kubeedge.md create mode 100644 content/zh/docs/v3.4/pluggable-components/logging.md create mode 100644 content/zh/docs/v3.4/pluggable-components/metrics-server.md create mode 100644 content/zh/docs/v3.4/pluggable-components/network-policy.md create mode 100644 content/zh/docs/v3.4/pluggable-components/overview.md create mode 100644 content/zh/docs/v3.4/pluggable-components/pod-ip-pools.md create mode 100644 content/zh/docs/v3.4/pluggable-components/service-mesh.md create mode 100644 content/zh/docs/v3.4/pluggable-components/service-topology.md create mode 100644 content/zh/docs/v3.4/pluggable-components/uninstall-pluggable-components.md create mode 100644 content/zh/docs/v3.4/project-administration/_index.md create mode 100644 content/zh/docs/v3.4/project-administration/container-limit-ranges.md create mode 100644 content/zh/docs/v3.4/project-administration/disk-log-collection.md create mode 100644 content/zh/docs/v3.4/project-administration/project-and-multicluster-project.md create mode 100644 content/zh/docs/v3.4/project-administration/project-gateway.md create mode 100644 content/zh/docs/v3.4/project-administration/project-network-isolation.md create mode 100644 content/zh/docs/v3.4/project-administration/role-and-member-management.md create mode 100644 content/zh/docs/v3.4/project-user-guide/_index.md create mode 100644 content/zh/docs/v3.4/project-user-guide/alerting/_index.md create mode 100644 content/zh/docs/v3.4/project-user-guide/alerting/alerting-message.md create mode 100644 content/zh/docs/v3.4/project-user-guide/alerting/alerting-policy.md create mode 100644 content/zh/docs/v3.4/project-user-guide/application-workloads/_index.md create mode 100644 content/zh/docs/v3.4/project-user-guide/application-workloads/container-image-settings.md create mode 100644 content/zh/docs/v3.4/project-user-guide/application-workloads/cronjobs.md create mode 100644 content/zh/docs/v3.4/project-user-guide/application-workloads/daemonsets.md create mode 100644 content/zh/docs/v3.4/project-user-guide/application-workloads/deployments.md create mode 100755 content/zh/docs/v3.4/project-user-guide/application-workloads/horizontal-pod-autoscaling.md create mode 100644 content/zh/docs/v3.4/project-user-guide/application-workloads/jobs.md create mode 100644 content/zh/docs/v3.4/project-user-guide/application-workloads/routes.md create mode 100644 content/zh/docs/v3.4/project-user-guide/application-workloads/services.md create mode 100644 content/zh/docs/v3.4/project-user-guide/application-workloads/statefulsets.md create mode 100644 content/zh/docs/v3.4/project-user-guide/application/_index.md create mode 100644 content/zh/docs/v3.4/project-user-guide/application/app-template.md create mode 100644 content/zh/docs/v3.4/project-user-guide/application/compose-app.md create mode 100644 content/zh/docs/v3.4/project-user-guide/application/deploy-app-from-appstore.md create mode 100644 content/zh/docs/v3.4/project-user-guide/application/deploy-app-from-template.md create mode 100644 content/zh/docs/v3.4/project-user-guide/configuration/_index.md create mode 100644 content/zh/docs/v3.4/project-user-guide/configuration/configmaps.md create mode 100644 content/zh/docs/v3.4/project-user-guide/configuration/image-registry.md create mode 100644 content/zh/docs/v3.4/project-user-guide/configuration/secrets.md create mode 100644 content/zh/docs/v3.4/project-user-guide/configuration/serviceaccounts.md create mode 100644 content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/_index.md create mode 100644 content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/examples/_index.md create mode 100644 content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/examples/monitor-mysql.md create mode 100644 content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/examples/monitor-sample-web.md create mode 100644 content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/introduction.md create mode 100644 content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/_index.md create mode 100644 content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/overview.md create mode 100644 content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/panel.md create mode 100644 content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/querying.md create mode 100644 content/zh/docs/v3.4/project-user-guide/grayscale-release/_index.md create mode 100644 content/zh/docs/v3.4/project-user-guide/grayscale-release/blue-green-deployment.md create mode 100644 content/zh/docs/v3.4/project-user-guide/grayscale-release/canary-release.md create mode 100644 content/zh/docs/v3.4/project-user-guide/grayscale-release/overview.md create mode 100644 content/zh/docs/v3.4/project-user-guide/grayscale-release/traffic-mirroring.md create mode 100644 content/zh/docs/v3.4/project-user-guide/image-builder/_index.md create mode 100644 content/zh/docs/v3.4/project-user-guide/image-builder/binary-to-image.md create mode 100644 content/zh/docs/v3.4/project-user-guide/image-builder/s2i-and-b2i-webhooks.md create mode 100644 content/zh/docs/v3.4/project-user-guide/image-builder/s2i-introduction.md create mode 100644 content/zh/docs/v3.4/project-user-guide/image-builder/s2i-templates.md create mode 100644 content/zh/docs/v3.4/project-user-guide/image-builder/source-to-image.md create mode 100644 content/zh/docs/v3.4/project-user-guide/storage/_index.md create mode 100644 content/zh/docs/v3.4/project-user-guide/storage/volume-snapshots.md create mode 100644 content/zh/docs/v3.4/project-user-guide/storage/volumes.md create mode 100644 content/zh/docs/v3.4/quick-start/_index.md create mode 100644 content/zh/docs/v3.4/quick-start/all-in-one-on-linux.md create mode 100644 content/zh/docs/v3.4/quick-start/create-workspace-and-project.md create mode 100644 content/zh/docs/v3.4/quick-start/deploy-bookinfo-to-k8s.md create mode 100644 content/zh/docs/v3.4/quick-start/enable-pluggable-components.md create mode 100644 content/zh/docs/v3.4/quick-start/minimal-kubesphere-on-k8s.md create mode 100644 content/zh/docs/v3.4/quick-start/wordpress-deployment.md create mode 100644 content/zh/docs/v3.4/reference/_index.md create mode 100644 content/zh/docs/v3.4/reference/api-changes/_index.md create mode 100644 content/zh/docs/v3.4/reference/api-changes/logging.md create mode 100644 content/zh/docs/v3.4/reference/api-changes/monitoring.md create mode 100644 content/zh/docs/v3.4/reference/api-docs.md create mode 100644 content/zh/docs/v3.4/reference/environment-requirements.md create mode 100644 content/zh/docs/v3.4/reference/glossary.md create mode 100644 content/zh/docs/v3.4/reference/storage-system-installation/_index.md create mode 100644 content/zh/docs/v3.4/reference/storage-system-installation/glusterfs-server.md create mode 100644 content/zh/docs/v3.4/reference/storage-system-installation/nfs-server.md create mode 100644 content/zh/docs/v3.4/release/_index.md create mode 100644 content/zh/docs/v3.4/release/release-v200.md create mode 100644 content/zh/docs/v3.4/release/release-v201.md create mode 100644 content/zh/docs/v3.4/release/release-v202.md create mode 100644 content/zh/docs/v3.4/release/release-v210.md create mode 100644 content/zh/docs/v3.4/release/release-v211.md create mode 100644 content/zh/docs/v3.4/release/release-v300.md create mode 100644 content/zh/docs/v3.4/release/release-v310.md create mode 100644 content/zh/docs/v3.4/release/release-v311.md create mode 100644 content/zh/docs/v3.4/release/release-v320.md create mode 100644 content/zh/docs/v3.4/release/release-v321.md create mode 100644 content/zh/docs/v3.4/release/release-v330.md create mode 100644 content/zh/docs/v3.4/release/release-v331.md create mode 100644 content/zh/docs/v3.4/release/release-v332.md create mode 100644 content/zh/docs/v3.4/toolbox/_index.md create mode 100644 content/zh/docs/v3.4/toolbox/auditing/_index.md create mode 100644 content/zh/docs/v3.4/toolbox/auditing/auditing-query.md create mode 100644 content/zh/docs/v3.4/toolbox/auditing/auditing-receive-customize.md create mode 100644 content/zh/docs/v3.4/toolbox/auditing/auditing-rule.md create mode 100644 content/zh/docs/v3.4/toolbox/events-query.md create mode 100644 content/zh/docs/v3.4/toolbox/log-query.md create mode 100644 content/zh/docs/v3.4/toolbox/metering-and-billing/_index.md create mode 100644 content/zh/docs/v3.4/toolbox/metering-and-billing/enable-billing.md create mode 100644 content/zh/docs/v3.4/toolbox/metering-and-billing/view-resource-consumption.md create mode 100644 content/zh/docs/v3.4/toolbox/web-kubectl.md create mode 100644 content/zh/docs/v3.4/upgrade/_index.md create mode 100644 content/zh/docs/v3.4/upgrade/air-gapped-upgrade-with-ks-installer.md create mode 100644 content/zh/docs/v3.4/upgrade/air-gapped-upgrade-with-kubekey.md create mode 100644 content/zh/docs/v3.4/upgrade/overview.md create mode 100644 content/zh/docs/v3.4/upgrade/upgrade-with-ks-installer.md create mode 100644 content/zh/docs/v3.4/upgrade/upgrade-with-kubekey.md create mode 100644 content/zh/docs/v3.4/upgrade/what-changed.md create mode 100644 content/zh/docs/v3.4/workspace-administration/_index.md create mode 100644 content/zh/docs/v3.4/workspace-administration/app-repository/_index.md create mode 100644 content/zh/docs/v3.4/workspace-administration/app-repository/import-helm-repository.md create mode 100644 content/zh/docs/v3.4/workspace-administration/app-repository/upload-app-to-public-repository.md create mode 100644 content/zh/docs/v3.4/workspace-administration/department-management.md create mode 100644 content/zh/docs/v3.4/workspace-administration/project-quotas.md create mode 100644 content/zh/docs/v3.4/workspace-administration/role-and-member-management.md create mode 100644 content/zh/docs/v3.4/workspace-administration/upload-helm-based-application.md create mode 100644 content/zh/docs/v3.4/workspace-administration/what-is-workspace.md create mode 100644 content/zh/docs/v3.4/workspace-administration/workspace-network-isolation.md create mode 100644 content/zh/docs/v3.4/workspace-administration/workspace-quotas.md diff --git a/content/en/docs/v3.4/_index.md b/content/en/docs/v3.4/_index.md new file mode 100644 index 000000000..3e990b47d --- /dev/null +++ b/content/en/docs/v3.4/_index.md @@ -0,0 +1,62 @@ +--- +title: "Documentation" +css: "scss/docs.scss" +isDocsRoot: true + +LinkTitle: "Documentation" + + +section1: + title: KubeSphere Documentation + content: Learn how to build and manage cloud-native applications using KubeSphere Container Platform. Get documentation, example code, tutorials, and more. + image: /images/docs/v3.3/banner.png + +sectionLink: + docs: + title: Popular Pages + description: Learn how to use KubeSphere with these quickstarts, tutorials, and examples. + list: + - /docs/v3.3/quick-start/all-in-one-on-linux + - /docs/v3.3/quick-start/minimal-kubesphere-on-k8s + - /docs/v3.3/quick-start/create-workspace-and-project + - /docs/v3.3/introduction/what-is-kubesphere + - /docs/v3.3/pluggable-components + - /docs/v3.3/installing-on-linux/introduction/multioverview + - /docs/v3.3/pluggable-components/app-store + - /docs/v3.3/pluggable-components/devops + - /docs/v3.3/multicluster-management + - /docs/v3.3/project-user-guide/configuration/image-registry + - /docs/v3.3/devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-jenkinsfile + - /docs/v3.3/devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-graphical-editing-panel + - /docs/v3.3/project-user-guide/image-builder/source-to-image + - /docs/v3.3/application-store/app-lifecycle-management + + videos: + title: Popular Videos + description: Watch video tutorials to learn about KubeSphere. + list: + - link: https://www.youtube.com/watch?v=PtVQZVb3AgE + text: All-in-one installation + - link: https://www.youtube.com/watch?v=nYOYk3VTSgo&t=9s + text: Multi-node installation + - link: https://www.youtube.com/watch?v=c3V-2RX9yGY&t=160s + text: A complete walkthrough to the KubeSphere DevOps system + +section3: + title: Run KubeSphere and Kubernetes Stack from the Cloud Service + description: Cloud Providers are providing KubeSphere as a cloud-hosted service for users, helping you to create a highly available Kubernetes cluster managed by KubeSphere within minutes via several clicks. It enables you to use the cloud-hosted Kubernetes services out of the box. + list: + - image: /images/docs/v3.3/aws.jpg + content: AWS Quickstart + link: https://aws.amazon.com/quickstart/architecture/qingcloud-kubesphere/ + - image: /images/docs/v3.3/microsoft-azure.jpg + content: Azure Marketplace + link: https://market.azure.cn/marketplace/apps/qingcloud.kubesphere + - image: /images/docs/v3.3/qingcloud.svg + content: QingCloud QKE + link: https://www.qingcloud.com/products/kubesphereqke/ + + titleRight: Want to host KubeSphere on your cloud or your solution? + btnContent: Partner with us + btnLink: /partner/ +--- \ No newline at end of file diff --git a/content/en/docs/v3.4/access-control-and-account-management/_index.md b/content/en/docs/v3.4/access-control-and-account-management/_index.md new file mode 100644 index 000000000..faf5b7311 --- /dev/null +++ b/content/en/docs/v3.4/access-control-and-account-management/_index.md @@ -0,0 +1,13 @@ +--- +title: "Access Control and Account Management" +description: "Access Control and Account Management" +layout: "second" + +linkTitle: "Access Control and Account Management" +weight: 12000 + +icon: "/images/docs/v3.3/docs.svg" + +--- + +The multi-tenant architecture of KubeSphere underlies many key components running on the container platform. Different tenants are assigned with varied roles so that they can perform related tasks. This chapter outlines the multi-tenant system of KubeSphere and demonstrates how to configure authentication for third-party login. \ No newline at end of file diff --git a/content/en/docs/v3.4/access-control-and-account-management/external-authentication/_index.md b/content/en/docs/v3.4/access-control-and-account-management/external-authentication/_index.md new file mode 100644 index 000000000..097367f09 --- /dev/null +++ b/content/en/docs/v3.4/access-control-and-account-management/external-authentication/_index.md @@ -0,0 +1,8 @@ +--- +title: "External Authentication" +description: "Learn how to configure third-party authentication on KubeSphere." +layout: "single" + +linkTitle: "External Authentication" +weight: 12200 +--- diff --git a/content/en/docs/v3.4/access-control-and-account-management/external-authentication/oidc-identity-provider.md b/content/en/docs/v3.4/access-control-and-account-management/external-authentication/oidc-identity-provider.md new file mode 100644 index 000000000..3f9ee6282 --- /dev/null +++ b/content/en/docs/v3.4/access-control-and-account-management/external-authentication/oidc-identity-provider.md @@ -0,0 +1,62 @@ +--- +title: "OIDC Identity Provider" +keywords: "OIDC, identity provider" +description: "How to use an external OIDC identity provider." + +linkTitle: "OIDC Identity Provider" +weight: 12221 +--- + +## OIDC Identity Provider + +[OpenID Connect](https://openid.net/connect/) is an interoperable authentication protocol based on the OAuth 2.0 family of specifications. It uses straightforward REST/JSON message flows with a design goal of “making simple things simple and complicated things possible”. It’s uniquely easy for developers to integrate, compared to any preceding Identity protocol, such as Keycloak, Okta, Dex, Auth0, Gluu, Casdoor and many more. + +## Prerequisites + +You need to deploy a Kubernetes cluster and install KubeSphere in the cluster. For details, see [Installing on Linux](/docs/v3.3/installing-on-linux/) and [Installing on Kubernetes](/docs/v3.3/installing-on-kubernetes/). + +## Procedure + +1. Log in to KubeSphere as `admin`, move the cursor to icon in the lower-right corner, click **kubectl**, and run the following command to edit `ks-installer` of the CRD `ClusterConfiguration`: + + ```bash + kubectl -n kubesphere-system edit cc ks-installer + ``` + +2. Add the following fields under `spec.authentication.jwtSecret`. + + *Example of using [Google Identity Platform](https://developers.google.com/identity/protocols/oauth2/openid-connect)*: + + ```yaml + spec: + authentication: + jwtSecret: '' + authenticateRateLimiterMaxTries: 10 + authenticateRateLimiterDuration: 10m0s + oauthOptions: + accessTokenMaxAge: 1h + accessTokenInactivityTimeout: 30m + identityProviders: + - name: google + type: OIDCIdentityProvider + mappingMethod: auto + provider: + clientID: '********' + clientSecret: '********' + issuer: https://accounts.google.com + redirectURL: 'https://ks-console/oauth/redirect/google' + ``` + + See description of parameters as below: + + | Parameter | Description | + | -------------------- | ------------------------------------------------------------ | + | clientID | The OAuth2 client ID. | + | clientSecret | The OAuth2 client secret. | + | redirectURL | The redirected URL to ks-console in the following format: `https:///oauth/redirect/`. The `` in the URL corresponds to the value of `oauthOptions:identityProviders:name`. | + | issuer | Defines how Clients dynamically discover information about OpenID Providers. | + | preferredUsernameKey | Configurable key which contains the preferred username claims. This parameter is optional. | + | emailKey | Configurable key which contains the email claims. This parameter is optional. | + | getUserInfo | GetUserInfo uses the userinfo endpoint to get additional claims for the token. This is especially useful where upstreams return "thin" ID tokens. This parameter is optional. | + | insecureSkipVerify | Used to turn off TLS certificate verification. | + diff --git a/content/en/docs/v3.4/access-control-and-account-management/external-authentication/set-up-external-authentication.md b/content/en/docs/v3.4/access-control-and-account-management/external-authentication/set-up-external-authentication.md new file mode 100644 index 000000000..a9d3b21e5 --- /dev/null +++ b/content/en/docs/v3.4/access-control-and-account-management/external-authentication/set-up-external-authentication.md @@ -0,0 +1,112 @@ +--- +title: "Set Up External Authentication" +keywords: "LDAP, external, third-party, authentication" +description: "How to set up external authentication on KubeSphere." + +linkTitle: "Set Up External Authentication" +weight: 12210 +--- + +This document describes how to use an external identity provider such as an LDAP service or Active Directory service on KubeSphere. + +KubeSphere provides a built-in OAuth server. Users can obtain OAuth access tokens to authenticate themselves to the KubeSphere API. As a KubeSphere administrator, you can edit `ks-installer` of the CRD `ClusterConfiguration` to configure OAuth and specify identity providers. + +## Prerequisites + +You need to deploy a Kubernetes cluster and install KubeSphere in the cluster. For details, see [Installing on Linux](/docs/v3.3/installing-on-linux/) and [Installing on Kubernetes](/docs/v3.3/installing-on-kubernetes/). + + +## Procedure + +1. Log in to KubeSphere as `admin`, move the cursor to icon in the lower-right corner, click **kubectl**, and run the following command to edit `ks-installer` of the CRD `ClusterConfiguration`: + + ```bash + kubectl -n kubesphere-system edit cc ks-installer + ``` + +2. Add the following fields under `spec.authentication.jwtSecret`. + + Example: + + ```yaml + spec: + authentication: + jwtSecret: '' + authenticateRateLimiterMaxTries: 10 + authenticateRateLimiterDuration: 10m0s + loginHistoryRetentionPeriod: 168h + maximumClockSkew: 10s + multipleLogin: true + oauthOptions: + accessTokenMaxAge: 1h + accessTokenInactivityTimeout: 30m + identityProviders: + - name: LDAP + type: LDAPIdentityProvider + mappingMethod: auto + provider: + host: 192.168.0.2:389 + managerDN: uid=root,cn=users,dc=nas + managerPassword: ******** + userSearchBase: cn=users,dc=nas + loginAttribute: uid + mailAttribute: mail + ``` + + The fields are described as follows: + + * `jwtSecret`: Secret used to sign user tokens. In a multi-cluster environment, all clusters must [use the same Secret](../../../multicluster-management/enable-multicluster/direct-connection/#prepare-a-member-cluster). + * `authenticateRateLimiterMaxTries`: Maximum number of consecutive login failures allowed during a period specified by `authenticateRateLimiterDuration`. If the number of consecutive login failures of a user reaches the limit, the user will be blocked. + * `authenticateRateLimiterDuration`: Period during which `authenticateRateLimiterMaxTries` applies. + * `loginHistoryRetentionPeriod`: Retention period of login records. Outdated login records are automatically deleted. + * `maximumClockSkew`: Maximum clock skew for time-sensitive operations such as token expiration validation. The default value is `10s`. + * `multipleLogin`: Whether multiple users are allowed to log in from different locations. The default value is `true`. + * `oauthOptions`: OAuth settings. + * `accessTokenMaxAge`: Access token lifetime. For member clusters in a multi-cluster environment, the default value is `0h`, which means access tokens never expire. For other clusters, the default value is `2h`. + * `accessTokenInactivityTimeout`: Access token inactivity timeout period. An access token becomes invalid after it is idle for a period specified by this field. After an access token times out, the user needs to obtain a new access token to regain access. + * `identityProviders`: Identity providers. + * `name`: Identity provider name. + * `type`: Identity provider type. + * `mappingMethod`: Account mapping method. The value can be `auto` or `lookup`. + * If the value is `auto` (default), you need to specify a new username. KubeSphere automatically creates a user according to the username and maps the user to a third-party account. + * If the value is `lookup`, you need to perform step 3 to manually map an existing KubeSphere user to a third-party account. + * `provider`: Identity provider information. Fields in this section vary according to the identity provider type. + +3. If `mappingMethod` is set to `lookup`, run the following command and add the labels to map a KubeSphere user to a third-party account. Skip this step if `mappingMethod` is set to `auto`. + + ```bash + kubectl edit user + ``` + + ```yaml + labels: + iam.kubesphere.io/identify-provider: + iam.kubesphere.io/origin-uid: + ``` + +4. After the fields are configured, save your changes, and wait until the restart of ks-installer is complete. + + {{< notice note >}} + + In a multi-cluster environment, you only need to configure the host cluster. + + {{}} + + +## Identity provider + +You can configure multiple identity providers (IdPs) in the 'identityProviders' section. The identity provider authenticates the user and provides an identity token to kubesphere. + +Kubesphere provides the following types of identity providers by default: + +* [LDAP Identity Provider](../use-an-ldap-service) + +* [OIDC Identity Provider](../oidc-identity-provider) + +* GitHub Identity Provider + +* CAS Identity Provider + +* Aliyun IDaaS Provider + +You can also expand the kubesphere [OAuth2 authentication plug-in](../use-an-oauth2-identity-provider) to integrate with your account system. diff --git a/content/en/docs/v3.4/access-control-and-account-management/external-authentication/use-an-ldap-service.md b/content/en/docs/v3.4/access-control-and-account-management/external-authentication/use-an-ldap-service.md new file mode 100644 index 000000000..0c6192132 --- /dev/null +++ b/content/en/docs/v3.4/access-control-and-account-management/external-authentication/use-an-ldap-service.md @@ -0,0 +1,104 @@ +--- +title: "Use an LDAP Service" +keywords: "LDAP, identity provider, external, authentication" +description: "How to use an LDAP service." + +linkTitle: "Use an LDAP Service" +weight: 12220 +--- + +This document describes how to use an LDAP service as an external identity provider, which allows you to authenticate users against the LDAP service. + +## Prerequisites + +* You need to deploy a Kubernetes cluster and install KubeSphere in the cluster. For details, see [Installing on Linux](/docs/v3.3/installing-on-linux/) and [Installing on Kubernetes](/docs/v3.3/installing-on-kubernetes/). +* You need to obtain the manager distinguished name (DN) and manager password of an LDAP service. + +## Procedure + +1. Log in to KubeSphere as `admin`, move the cursor to icon in the lower-right corner, click **kubectl**, and run the following command to edit `ks-installer` of the CRD `ClusterConfiguration`: + + ```bash + kubectl -n kubesphere-system edit cc ks-installer + ``` + + Example: + + ```yaml + spec: + authentication: + jwtSecret: '' + maximumClockSkew: 10s + multipleLogin: true + oauthOptions: + accessTokenMaxAge: 1h + accessTokenInactivityTimeout: 30m + identityProviders: + - name: LDAP + type: LDAPIdentityProvider + mappingMethod: auto + provider: + host: 192.168.0.2:389 + managerDN: uid=root,cn=users,dc=nas + managerPassword: ******** + userSearchBase: cn=users,dc=nas + loginAttribute: uid + mailAttribute: mail + ``` + +2. Configure fields other than `oauthOptions:identityProviders` in the `spec:authentication` section. For details, see [Set Up External Authentication](../set-up-external-authentication/). + +3. Configure fields in `oauthOptions:identityProviders` section. + + * `name`: User-defined LDAP service name. + * `type`: To use an LDAP service as an identity provider, you must set the value to `LDAPIdentityProvider`. + * `mappingMethod`: Account mapping method. The value can be `auto` or `lookup`. + * If the value is `auto` (default), you need to specify a new username. KubeSphere automatically creates a user according to the username and maps the user to an LDAP user. + * If the value is `lookup`, you need to perform step 4 to manually map an existing KubeSphere user to an LDAP user. + * `provider`: + * `host`: Address and port number of the LDAP service. + * `managerDN`: DN used to bind to the LDAP directory. + * `managerPassword`: Password corresponding to `managerDN`. + * `userSearchBase`: User search base. Set the value to the DN of the directory level below which all LDAP users can be found. + * `loginAttribute`: Attribute that identifies LDAP users. + * `mailAttribute`: Attribute that identifies email addresses of LDAP users. + +4. If `mappingMethod` is set to `lookup`, run the following command and add the labels to map a KubeSphere user to an LDAP user. Skip this step if `mappingMethod` is set to `auto`. + + ```bash + kubectl edit user + ``` + + ```yaml + labels: + iam.kubesphere.io/identify-provider: + iam.kubesphere.io/origin-uid: + ``` + +5. After the fields are configured, save your changes, and wait until the restart of ks-installer is complete. + + {{< notice note >}} + + The KubeSphere web console is unavailable during the restart of ks-installer. Please wait until the restart is complete. + + {{}} + +6. If you are using KubeSphere 3.2.0, run the following command after configuring LDAP and wait until `ks-installer` is up and running: + + ```bash + kubectl -n kubesphere-system set image deployment/ks-apiserver *=kubesphere/ks-apiserver:v3.2.1 + ``` + + {{< notice note >}} + + If you are using KubeSphere 3.2.1, skip this step. + + {{}} + +7. Go to the KubeSphere login page and enter the username and password of an LDAP user to log in. + + {{< notice note >}} + + The username of an LDAP user is the value of the attribute specified by `loginAttribute`. + + {{}} diff --git a/content/en/docs/v3.4/access-control-and-account-management/external-authentication/use-an-oauth2-identity-provider.md b/content/en/docs/v3.4/access-control-and-account-management/external-authentication/use-an-oauth2-identity-provider.md new file mode 100644 index 000000000..e7ae81348 --- /dev/null +++ b/content/en/docs/v3.4/access-control-and-account-management/external-authentication/use-an-oauth2-identity-provider.md @@ -0,0 +1,130 @@ +--- +title: "Use an OAuth 2.0 Identity Provider" +keywords: 'Kubernetes, KubeSphere, OAuth2, Identity Provider' +description: 'How to use an external OAuth2 identity provider.' +linkTitle: "Use an OAuth 2.0 Identity Provider" +weight: 12230 +--- + +This document describes how to use an external identity provider based on the OAuth 2.0 protocol. + +The following figure shows the authentication process between KubeSphere and an external OAuth 2.0 identity provider. + +![oauth2](/images/docs/v3.3/access-control-and-account-management/external-authentication/use-an-oauth2-identity-provider/oauth2.svg) + +## Prerequisites + +You need to deploy a Kubernetes cluster and install KubeSphere in the cluster. For details, see [Installing on Linux](/docs/v3.3/installing-on-linux/) and [Installing on Kubernetes](/docs/v3.3/installing-on-kubernetes/). + +## Develop an OAuth 2.0 Plugin + +{{< notice note >}} + +KubeSphere provides two built-in OAuth 2.0 plugins: [GitHubIdentityProvider](https://github.com/kubesphere/kubesphere/blob/release-3.1/pkg/apiserver/authentication/identityprovider/github/github.go) for GitHub and [AliyunIDaasProvider](https://github.com/kubesphere/kubesphere/blob/release-3.1/pkg/apiserver/authentication/identityprovider/github/github.go) for Alibaba Cloud IDaaS. You can develop other plugins according to the built-in plugins. + +{{}} + +1. Clone the [KubeSphere repository](https://github.com/kubesphere/kubesphere) on your local machine, go to the local KubeSphere repository, and create a package for your plugin in the `/pkg/apiserver/authentication/identityprovider/` directory. + +2. In the plugin package, implement the following interfaces: + + ```go + // /pkg/apiserver/authentication/identityprovider/oauth_provider.go + type OAuthProvider interface { + // Exchange identity with a remote server. + IdentityExchange(code string) (Identity, error) + } + + type OAuthProviderFactory interface { + // Return the identity provider type. + Type() string + // Apply settings from kubesphere-config. + Create(options oauth.DynamicOptions) (OAuthProvider, error) + } + ``` + + ```go + // /pkg/apiserver/authentication/identityprovider/identity_provider.go + type Identity interface { + // (Mandatory) Return the identifier of the user at the identity provider. + GetUserID() string + // (Optional) Return the name of the user to be referred as on KubeSphere. + GetUsername() string + // (Optional) Return the email address of the user. + GetEmail() string + } + ``` + +3. Register the plugin in the `init()` function of the plugin package. + + ```go + // Custom plugin package + func init() { + // Change to the actual name of the struct that + // implements the OAuthProviderFactory interface. + identityprovider.RegisterOAuthProvider(&{}) + } + ``` + +4. Import the plugin package in `/pkg/apiserver/authentication/options/authenticate_options.go`. + + ```go + // Change to the actual name of your plugin package. + import ( + ... + _ "kubesphere.io/kubesphere/pkg/apiserver/authentication/identityprovider/" + ... + ) + ``` + +5. [Build the image of ks-apiserver](https://github.com/kubesphere/community/blob/104bab42f67094930f2ca87c603b7c6365cd092a/developer-guide/development/quickstart.md) and deploy it in your cluster. + +## Integrate an Identity Provider with KubeSphere + +1. Log in to KubeSphere as `admin`, move the cursor to icon in the lower-right corner, click **kubectl**, and run the following command to edit `ks-installer` of the CRD `ClusterConfiguration`: + + ```bash + kubectl -n kubesphere-system edit cc ks-installer + ``` + +2. Confiother than `oauthOptions:identityProviders` in the `spec:authentication` section. For details, see [Set Up External Authentication](../set-up-external-authentication/). + +3. Configure fields in `oauthOptions:identityProviders` section according to the identity provider plugin you have developed. + + The following is a configuration example that uses GitHub as an external identity provider. For details, see the [official GitHub documentation](https://docs.github.com/en/developers/apps/building-oauth-apps) and the [source code of the GitHubIdentityProvider](https://github.com/kubesphere/kubesphere/blob/release-3.1/pkg/apiserver/authentication/identityprovider/github/github.go) plugin. + + ```yaml + spec: + authentication: + jwtSecret: '' + authenticateRateLimiterMaxTries: 10 + authenticateRateLimiterDuration: 10m0s + oauthOptions: + accessTokenMaxAge: 1h + accessTokenInactivityTimeout: 30m + identityProviders: + - name: github + type: GitHubIdentityProvider + mappingMethod: auto + provider: + clientID: '******' + clgure fields ientSecret: '******' + redirectURL: 'https://ks-console/oauth/redirect/github' + ``` + + Similarly, you can also use Alibaba Cloud IDaaS as an external identity provider. For details, see the official [Alibaba IDaaS documentation](https://www.alibabacloud.com/help/product/111120.htm?spm=a3c0i.14898238.2766395700.1.62081da1NlxYV0) and the [source code of the AliyunIDaasProvider](https://github.com/kubesphere/kubesphere/blob/release-3.1/pkg/apiserver/authentication/identityprovider/github/github.go) plugin. + +4. After the fields are configured, save your changes, and wait until the restart of ks-installer is complete. + + {{< notice note >}} + + The KubeSphere web console is unavailable during the restart of ks-installer. Please wait until the restart is complete. + + {{}} + +5. Go to the KubeSphere login page, click **Log In with XXX** (for example, **Log In with GitHub**). + +6. On the login page of the external identity provider, enter the username and password of a user configured at the identity provider to log in to KubeSphere. + + ![github-login-page](/images/docs/v3.3/access-control-and-account-management/external-authentication/use-an-oauth2-identity-provider/github-login-page.png) + diff --git a/content/en/docs/v3.4/access-control-and-account-management/multi-tenancy-in-kubesphere.md b/content/en/docs/v3.4/access-control-and-account-management/multi-tenancy-in-kubesphere.md new file mode 100644 index 000000000..fbe355bf9 --- /dev/null +++ b/content/en/docs/v3.4/access-control-and-account-management/multi-tenancy-in-kubesphere.md @@ -0,0 +1,57 @@ +--- +title: "Kubernetes Multi-tenancy in KubeSphere" +keywords: "Kubernetes, KubeSphere, multi-tenancy" +description: "Understand the multi-tenant architecture in KubeSphere." +linkTitle: "Multi-tenancy in KubeSphere" +weight: 12100 +--- + +Kubernetes helps you orchestrate applications and schedule containers, greatly improving resource utilization. However, there are various challenges facing both enterprises and individuals in resource sharing and security as they use Kubernetes, which is different from how they managed and maintained clusters in the past. + +The first and foremost challenge is how to define multi-tenancy in an enterprise and the security boundary of tenants. [The discussion about multi-tenancy](https://docs.google.com/document/d/1fj3yzmeU2eU8ZNBCUJG97dk_wC7228-e_MmdcmTNrZY) has never stopped in the Kubernetes community, while there is no definite answer to how a multi-tenant system should be structured. + +## Challenges in Kubernetes Multi-tenancy + +Multi-tenancy is a common software architecture. Resources in a multi-tenant environment are shared by multiple users, also known as "tenants", with their respective data isolated from each other. The administrator of a multi-tenant Kubernetes cluster must minimize the damage that a compromised or malicious tenant can do to others and make sure resources are fairly allocated. + +No matter how an enterprise multi-tenant system is structured, it always comes with the following two building blocks: logical resource isolation and physical resource isolation. + +Logically, resource isolation mainly entails API access control and tenant-based permission control. [Role-based access control (RBAC)](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) in Kubernetes and namespaces provide logic isolation. Nevertheless, they are not applicable in most enterprise environments. Tenants in an enterprise often need to manage resources across multiple namespaces or even clusters. Besides, the ability to provide auditing logs for isolated tenants based on their behavior and event queries is also a must in multi-tenancy. + +The isolation of physical resources includes nodes and networks, while it also relates to container runtime security. For example, you can create [NetworkPolicy](../../pluggable-components/network-policy/) resources to control traffic flow and use PodSecurityPolicy objects to control container behavior. [Kata Containers](https://katacontainers.io/) provides a more secure container runtime. + +## Kubernetes Multi-tenancy in KubeSphere + +To solve the issues above, KubeSphere provides a multi-tenant management solution based on Kubernetes. + +![multi-tenancy-architecture](/images/docs/v3.3/access-control-and-account-management/multi-tanancy-in-kubesphere/multi-tenancy-architecture.png) + +In KubeSphere, the [workspace](../../workspace-administration/what-is-workspace/) is the smallest tenant unit. A workspace enables users to share resources across clusters and projects. Workspace members can create projects in an authorized cluster and invite other members to cooperate in the same project. + +A **user** is the instance of a KubeSphere account. Users can be appointed as platform administrators to manage clusters or added to workspaces to cooperate in projects. + +Multi-level access control and resource quota limits underlie resource isolation in KubeSphere. They decide how the multi-tenant architecture is built and administered. + +### Logical isolation + +Similar to Kubernetes, KubeSphere uses RBAC to manage permissions granted to users, thus logically implementing resource isolation. + +The access control in KubeSphere is divided into three levels: platform, workspace and project. You use roles to control what permissions users have at different levels for different resources. + +1. [Platform roles](/docs/v3.3/quick-start/create-workspace-and-project/): Control what permissions platform users have for platform resources, such as clusters, workspaces and platform members. +2. [Workspace roles](/docs/v3.3/workspace-administration/role-and-member-management/): Control what permissions workspace members have for workspace resources, such as projects (i.e. namespaces) and DevOps projects. +3. [Project roles](/docs/v3.3/project-administration/role-and-member-management/): Control what permissions project members have for project resources, such as workloads and pipelines. + +### Network isolation + +Apart from logically isolating resources, KubeSphere also allows you to set [network isolation policies](../../pluggable-components/network-policy/) for workspaces and projects. + +### Auditing + +KubeSphere also provides [auditing logs](../../pluggable-components/auditing-logs/) for users. + +### Authentication and authorization + +For a complete authentication and authorization chain in KubeSphere, see the following diagram. KubeSphere has expanded RBAC rules using the Open Policy Agent (OPA). The KubeSphere team looks to integrate [Gatekeeper](https://github.com/open-policy-agent/gatekeeper) to provide more security management policies. + +![request-chain](/images/docs/v3.3/access-control-and-account-management/multi-tanancy-in-kubesphere/request-chain.jpg) diff --git a/content/en/docs/v3.4/application-store/_index.md b/content/en/docs/v3.4/application-store/_index.md new file mode 100644 index 000000000..348390088 --- /dev/null +++ b/content/en/docs/v3.4/application-store/_index.md @@ -0,0 +1,16 @@ +--- +title: "App Store" +description: "Getting started with the App Store of KubeSphere" +layout: "second" + + +linkTitle: "App Store" +weight: 14000 + +icon: "/images/docs/v3.3/docs.svg" + +--- + +The KubeSphere App Store, powered by [OpenPitrix](https://github.com/openpitrix/openpitrix), an open-source platform that manages apps across clouds, provides users with enterprise-ready containerized solutions. You can upload your own apps through app templates or add app repositories that serve as an application tool for tenants to choose the app they want. + +The App Store features a highly productive integrated system for application lifecycle management, allowing users to quickly upload, release, deploy, upgrade and remove apps in ways that best suit them. This is how KubeSphere empowers developers to spend less time setting up and more time developing. diff --git a/content/en/docs/v3.4/application-store/app-developer-guide/_index.md b/content/en/docs/v3.4/application-store/app-developer-guide/_index.md new file mode 100644 index 000000000..3d1da2629 --- /dev/null +++ b/content/en/docs/v3.4/application-store/app-developer-guide/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Application Developer Guide" +weight: 14400 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/application-store/app-developer-guide/helm-developer-guide.md b/content/en/docs/v3.4/application-store/app-developer-guide/helm-developer-guide.md new file mode 100644 index 000000000..b7dc2f393 --- /dev/null +++ b/content/en/docs/v3.4/application-store/app-developer-guide/helm-developer-guide.md @@ -0,0 +1,157 @@ +--- +title: "Helm Developer Guide" +keywords: 'Kubernetes, KubeSphere, helm, development' +description: 'Develop your own Helm-based app.' +linkTitle: "Helm Developer Guide" +weight: 14410 +--- + +You can upload the Helm chart of an app to KubeSphere so that tenants with necessary permissions can deploy it. This tutorial demonstrates how to prepare Helm charts using NGINX as an example. + +## Install Helm + +If you have already installed KubeSphere, then Helm is deployed in your environment. Otherwise, refer to the [Helm documentation](https://helm.sh/docs/intro/install/) to install Helm first. + +## Create a Local Repository + +Execute the following commands to create a repository on your machine. + +```bash +mkdir helm-repo +``` + +```bash +cd helm-repo +``` + +## Create an App + +Use `helm create` to create a folder named `nginx`, which automatically creates YAML templates and directories for your app. Generally, it is not recommended to change the name of files and directories in the top level directory. + +```bash +$ helm create nginx +$ tree nginx/ +nginx/ +├── charts +├── Chart.yaml +├── templates +│ ├── deployment.yaml +│ ├── _helpers.tpl +│ ├── ingress.yaml +│ ├── NOTES.txt +│ └── service.yaml +└── values.yaml +``` + +`Chart.yaml` is used to define the basic information of the chart, including name, API, and app version. For more information, see [Chart.yaml File](../helm-specification/#chartyaml-file). + +An example of the `Chart.yaml` file: + +```yaml +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: nginx +version: 0.1.0 +``` + +When you deploy Helm-based apps to Kubernetes, you can edit the `values.yaml` file on the KubeSphere console directly. + +An example of the `values.yaml` file: + +```yaml +# Default values for test. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: nginx + tag: stable + pullPolicy: IfNotPresent + +nameOverride: "" +fullnameOverride: "" + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + path: / + hosts: + - chart-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} +``` + +Refer to [Helm Specifications](../helm-specification/) to edit files in the `nginx` folder and save them when you finish editing. + +## Create an Index File (Optional) + +To add a repository with an HTTP or HTTPS URL in KubeSphere, you need to upload an `index.yaml` file to the object storage in advance. Use Helm to create the index file by executing the following command in the previous directory of `nginx`. + +```bash +helm repo index . +``` + +```bash +$ ls +index.yaml nginx +``` + +{{< notice note >}} + +- If the repository URL is S3-styled, an index file will be created automatically in the object storage when you add apps to the repository. + +- For more information about how to add repositories to KubeSphere, see [Import an Helm Repository](../../../workspace-administration/app-repository/import-helm-repository/). + +{{}} + +## Package the Chart + +Go to the previous directory of `nginx` and execute the following command to package your chart which creates a .tgz package. + +```bash +helm package nginx +``` + +```bash +$ ls +nginx nginx-0.1.0.tgz +``` + +## Upload Your App + +Now that you have your Helm-based app ready, you can load it to KubeSphere and test it on the platform. + +## See Also + +[Helm Specifications](../helm-specification/) + +[Import an Helm Repository](../../../workspace-administration/app-repository/import-helm-repository/) diff --git a/content/en/docs/v3.4/application-store/app-developer-guide/helm-specification.md b/content/en/docs/v3.4/application-store/app-developer-guide/helm-specification.md new file mode 100644 index 000000000..ab16d028a --- /dev/null +++ b/content/en/docs/v3.4/application-store/app-developer-guide/helm-specification.md @@ -0,0 +1,130 @@ +--- +title: "Helm Specifications" +keywords: 'Kubernetes, KubeSphere, Helm, specifications' +description: 'Understand the chart structure and specifications.' +linkTitle: "Helm Specifications" +weight: 14420 +--- + +Helm charts serve as a packaging format. A chart is a collection of files that describe a related set of Kubernetes resources. For more information, see the [Helm documentation](https://helm.sh/docs/topics/charts/). + +## Structure + +All related files of a chart is stored in a directory which generally contains: + +```text +chartname/ + Chart.yaml # A YAML file containing basic information about the chart, such as version and name. + LICENSE # (Optional) A plain text file containing the license for the chart. + README.md # (Optional) The description of the app and how-to guide. + values.yaml # The default configuration values for this chart. + values.schema.json # (Optional) A JSON Schema for imposing a structure on the values.yaml file. + charts/ # A directory containing any charts upon which this chart depends. + crds/ # Custom Resource Definitions. + templates/ # A directory of templates that will generate valid Kubernetes configuration files with corresponding values provided. + templates/NOTES.txt # (Optional) A plain text file with usage notes. +``` + +## Chart.yaml File + +You must provide the `chart.yaml` file for a chart. Here is an example of the file with explanations for each field. + +```yaml +apiVersion: (Required) The chart API version. +name: (Required) The name of the chart. +version: (Required) The version, following the SemVer 2 standard. +kubeVersion: (Optional) The compatible Kubernetes version, following the SemVer 2 standard. +description: (Optional) A single-sentence description of the app. +type: (Optional) The type of the chart. +keywords: + - (Optional) A list of keywords about the app. +home: (Optional) The URL of the app. +sources: + - (Optional) A list of URLs to source code for this app. +dependencies: (Optional) A list of the chart requirements. + - name: The name of the chart, such as nginx. + version: The version of the chart, such as "1.2.3". + repository: The repository URL ("https://example.com/charts") or alias ("@repo-name"). + condition: (Optional) A yaml path that resolves to a boolean, used for enabling/disabling charts (for example, subchart1.enabled ). + tags: (Optional) + - Tags can be used to group charts for enabling/disabling together. + import-values: (Optional) + - ImportValues holds the mapping of source values to parent key to be imported. Each item can be a string or pair of child/parent sublist items. + alias: (Optional) Alias to be used for the chart. It is useful when you have to add the same chart multiple times. +maintainers: (Optional) + - name: (Required) The maintainer name. + email: (Optional) The maintainer email. + url: (Optional) A URL for the maintainer. +icon: (Optional) A URL to an SVG or PNG image to be used as an icon. +appVersion: (Optional) The app version. This needn't be SemVer. +deprecated: (Optional, boolean) Whether this chart is deprecated. +annotations: + example: (Optional) A list of annotations keyed by name. +``` + +{{< notice note >}} + +- The field `dependencies` is used to define chart dependencies which were located in a separate file `requirements.yaml` for `v1` charts. For more information, see [Chart Dependencies](https://helm.sh/docs/topics/charts/#chart-dependencies). +- The field `type` is used to define the type of chart. Allowed values are `application` and `library`. For more information, see [Chart Types](https://helm.sh/docs/topics/charts/#chart-types). + +{{}} + +## Values.yaml and Templates + +Written in the [Go template language](https://golang.org/pkg/text/template/), Helm chart templates are stored in the `templates` folder of a chart. There are two ways to provide values for the templates: + +1. Make a `values.yaml` file inside of a chart with default values that can be referenced. +2. Make a YAML file that contains necessary values and use the file through the command line with `helm install`. + +Here is an example of the template in the `templates` folder. + +```yaml +apiVersion: v1 +kind: ReplicationController +metadata: + name: deis-database + namespace: deis + labels: + app.kubernetes.io/managed-by: deis +spec: + replicas: 1 + selector: + app.kubernetes.io/name: deis-database + template: + metadata: + labels: + app.kubernetes.io/name: deis-database + spec: + serviceAccount: deis-database + containers: + - name: deis-database + image: {{.Values.imageRegistry}}/postgres:{{.Values.dockerTag}} + imagePullPolicy: {{.Values.pullPolicy}} + ports: + - containerPort: 5432 + env: + - name: DATABASE_STORAGE + value: {{default "minio" .Values.storage}} +``` + +The above example defines a ReplicationController template in Kubernetes. There are some values referenced in it which are defined in `values.yaml`. + +- `imageRegistry`: The Docker image registry. +- `dockerTag`: The Docker image tag. +- `pullPolicy`: The image pulling policy. +- `storage`: The storage backend. It defaults to `minio`. + +An example `values.yaml` file: + +```text +imageRegistry: "quay.io/deis" +dockerTag: "latest" +pullPolicy: "Always" +storage: "s3" +``` + +## Reference + +[Helm Documentation](https://helm.sh/docs/) + +[Charts](https://helm.sh/docs/topics/charts/) \ No newline at end of file diff --git a/content/en/docs/v3.4/application-store/app-lifecycle-management.md b/content/en/docs/v3.4/application-store/app-lifecycle-management.md new file mode 100644 index 000000000..7d2cd0003 --- /dev/null +++ b/content/en/docs/v3.4/application-store/app-lifecycle-management.md @@ -0,0 +1,220 @@ +--- +title: "Kubernetes Application Lifecycle Management" +keywords: 'Kubernetes, KubeSphere, app-store' +description: 'Manage your app across the entire lifecycle, including submission, review, test, release, upgrade and removal.' +linkTitle: 'Application Lifecycle Management' +weight: 14100 +--- + +KubeSphere integrates [OpenPitrix](https://github.com/openpitrix/openpitrix), an open-source multi-cloud application management platform, to set up the App Store, managing Kubernetes applications throughout their entire lifecycle. The App Store supports two kinds of application deployment: + +- **Template-Based Apps** provide a way for developers and independent software vendors (ISVs) to share applications with users in a workspace. You can also import third-party app repositories within a workspace. +- **Composed Apps** help users quickly build a complete application using multiple microservices to compose it. KubeSphere allows users to select existing services or create new services to create a composed app on the one-stop console. + +Using [Redis](https://redis.io/) as an example application, this tutorial demonstrates how to manage the Kubernetes app throughout the entire lifecycle, including submission, review, test, release, upgrade and removal. + +## Prerequisites + +- You need to enable the [KubeSphere App Store (OpenPitrix)](../../pluggable-components/app-store/). +- You need to create a workspace, a project and a user (`project-regular`). For more information, see [Create Workspaces, Projects, Users and Roles](../../quick-start/create-workspace-and-project/). + +## Hands-on Lab + +### Step 1: Create a customized role and two users + +You need to create two users first, one for ISVs (`isv`) and the other (`reviewer`) for app technical reviewers. + +1. Log in to the KubeSphere console with the user `admin`. Click **Platform** in the upper-left corner and select **Access Control**. In **Platform Roles**, click **Create**. + +2. Set a name for the role, such as `app-review`, and click **Edit Permissions**. + +3. In **App Management**, choose **App Template Management** and **App Template Viewing** in the permission list, and then click **OK**. + + {{< notice note >}} + + The user who is granted the role `app-review` has the permission to view the App Store on the platform and manage apps, including review and removal. + + {{}} + +4. As the role is ready now, you need to create a user and grant the role `app-review` to it. In **Users**, click **Create**. Provide the required information and click **OK**. + +5. Similarly, create another user `isv`, and grant the role of `platform-regular` to it. + +6. Invite both users created above to an existing workspace such as `demo-workspace`, and grant them the role of `workspace-admin`. + +### Step 2: Upload and submit an application + +1. Log in to KubeSphere as `isv` and go to your workspace. You need to upload the example app Redis to this workspace so that it can be used later. First, download the app [Redis 11.3.4](https://github.com/kubesphere/tutorial/raw/master/tutorial%205%20-%20app-store/redis-11.3.4.tgz) and click **Upload Template** in **App Templates**. + + {{< notice note >}} + + In this example, a new version of Redis will be uploaded later to demonstrate the upgrade feature. + + {{}} + +2. In the dialog that appears, click **Upload Helm Chart** to upload the chart file. Click **OK** to continue. + +3. Basic information of the app displays under **App Information**. To upload an icon for the app, click **Upload Icon**. You can also skip it and click **OK** directly. + + {{< notice note >}} + + The maximum accepted resolution of the app icon is 96 x 96 pixels. + + {{}} + +4. The app displays in the template list with the status **Developing** after it is successfully uploaded, which means this app is under development. The uploaded app is visible to all members in the same workspace. + +5. Go to the detail page of the app template by clicking Redis from the list. You can edit the basic information of this app by clicking **Edit**. + +6. You can customize the app's basic information by specifying the fields in the pop-up window. + +7. Click **OK** to save your changes, then you can test this application by deploying it to Kubernetes. Click the draft version to expand the menu and click **Install**. + + {{< notice note >}} + + If you don't want to test the app, you can submit it for review directly. However, it is recommended that you test your app deployment and function first before you submit it for review, especially in a production environment. This helps you detect any problems in advance and accelerate the review process. + + {{}} + +8. Select the cluster and project to which you want to deploy the app, set up different configurations for the app, and then click **Install**. + + {{< notice note >}} + + Some apps can be deployed with all configurations set in a form. You can use the toggle switch to see its YAML file, which contains all parameters you need to specify in the form. + + {{}} + +9. Wait for a few minutes, then switch to the tab **App Instances**. You will find that Redis has been deployed successfully. + +10. After you test the app with no issues found, you can click **Submit for Release** to submit this application for release. + + {{< notice note >}} + +The version number must start with a number and contain decimal points. + +{{}} + +11. After the app is submitted, the app status will change to **Submitted**. Now app reviewers can release it. + +### Step 3: Release the application + +1. Log out of KubeSphere and log back in as `app-reviewer`. Click **Platform** in the upper-left corner and select **App Store Management**. On the **App Release** page, the app submitted in the previous step displays under the tab **Unreleased**. + +2. To release this app, click it to inspect the app information, introduction, chart file and update logs from the pop-up window. + +3. The reviewer needs to decide whether the app meets the release criteria on the App Store. Click **Pass** to approve it or **Reject** to deny an app submission. + +### Step 4: Release the application to the App Store + +After the app is approved, `isv` can release the Redis application to the App Store, allowing all users on the platform to find and deploy this application. + +1. Log out of KubeSphere and log back in as `isv`. Go to your workspace and click Redis on the **Template-Based Apps** page. On its details page, expand the version menu, then click **Release to Store**. In the pop-up prompt, click **OK** to confirm. + +2. Under **App Release**, you can see the app status. **Activated** means it is available in the App Store. + +3. Click **View in Store** to go to its **Versions** page in the App Store. Alternatively, click **App Store** in the upper-left corner, and you can also see the app. + + {{< notice note >}} + + You may see two Redis apps in the App Store, one of which is a built-in app in KubeSphere. Note that a newly-released app displays at the beginning of the list in the App Store. + + {{}} + +4. Now, users in the workspace can install Redis from the App Store. To install the app to Kubernetes, click the app to go to its **App Information** page, and click **Install**. + + {{< notice note >}} + + If you have trouble installing an application and the **Status** column shows **Failed**, you can hover your cursor over the **Failed** icon to see the error message. + + {{}} + +### Step 5: Create an application category + +`app-reviewer` can create multiple categories for different types of applications based on their function and usage. It is similar to setting tags and categories can be used in the App Store as filters, such as Big Data, Middleware, and IoT. + +1. Log in to KubeSphere as `app-reviewer`. To create a category, go to the **App Store Management** page and click in **App Categories**. + +2. Set a name and icon for the category in the dialog, then click **OK**. For Redis, you can enter `Database` for the field **Name**. + + {{< notice note >}} + + Usually, an app reviewer creates necessary categories in advance and ISVs select the category in which an app appears before submitting it for review. A newly-created category has no app in it. + + {{}} + +3. As the category is created, you can assign the category to your app. In **Uncategorized**, select Redis and click **Change Category**. + +4. In the dialog, select the category (**Database**) from the drop-down list and click **OK**. + +5. The app displays in the category as expected. + +### Step 6: Add a new version + +To allow workspace users to upgrade apps, you need to add new app versions to KubeSphere first. Follow the steps below to add a new version for the example app. + +1. Log in to KubeSphere as `isv` again and navigate to **Template-Based Apps**. Click the app Redis in the list. + +2. Download [Redis 12.0.0](https://github.com/kubesphere/tutorial/raw/master/tutorial%205%20-%20app-store/redis-12.0.0.tgz), which is a new version of Redis for demonstration in this tutorial. On the tab **Versions**, click **New Version** on the right to upload the package you just downloaded. + +3. Click **Upload Helm Chart** and click **OK** after it is uploaded. + +4. The new app version displays in the version list. You can click it to expand the menu and test the new version. Besides, you can also submit it for review and release it to the App Store, which is the same as the steps shown above. + +### Step 7: Upgrade an application + +After a new version is released to the App Store, all users can upgrade this application to the new version. + +{{< notice note >}} + +To follow the steps below, you must deploy an app of one of its old versions first. In this example, Redis 11.3.4 was already deployed in the project `demo-project` and its new version 12.0.0 was released to the App Store. + +{{}} + +1. Log in to KubeSphere as `project-regular`, navigate to the **Apps** page of the project, and click the app to upgrade. + +2. Click **More** and select **Edit Settings** from the drop-down list. + +3. In the window that appears, you can see the YAML file of application configurations. Select the new version from the drop-down list on the right. You can customize the YAML file of the new version. In this tutorial, click **Update** to use the default configurations directly. + + {{< notice note >}} + + You can select the same version from the drop-down list on the right as that on the left to customize current application configurations through the YAML file. + + {{}} + +4. On the **Apps** page, you can see that the app is being upgraded. The status will change to **Running** when the upgrade finishes. + +### Step 8: Suspend an application + +You can choose to remove an app entirely from the App Store or suspend a specific app version. + +1. Log in to KubeSphere as `app-reviewer`. Click **Platform** in the upper-left corner and select **App Store Management**. On the **App Store** page, click Redis. + +2. On the detail page, click **Suspend App** and select **OK** in the dialog to confirm the operation to remove the app from the App Store. + + {{< notice note >}} + + Removing an app from the App Store does not affect tenants who are using the app. + + {{}} + +3. To make the app available in the App Store again, click **Activate App**. + +4. To suspend a specific app version, expand the version menu and click **Suspend Version**. In the dialog that appears, click **OK** to confirm. + + {{< notice note >}} + + After an app version is suspended, this version is not available in the App Store. Suspending an app version does not affect tenants who are using this version. + + {{}} + +5. To make the app version available in the App Store again, click **Activate Version**. + + + + + + + + + diff --git a/content/en/docs/v3.4/application-store/built-in-apps/_index.md b/content/en/docs/v3.4/application-store/built-in-apps/_index.md new file mode 100644 index 000000000..2ee1bc0ca --- /dev/null +++ b/content/en/docs/v3.4/application-store/built-in-apps/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Built-in Applications" +weight: 14200 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/application-store/built-in-apps/deploy-chaos-mesh.md b/content/en/docs/v3.4/application-store/built-in-apps/deploy-chaos-mesh.md new file mode 100644 index 000000000..9e10be832 --- /dev/null +++ b/content/en/docs/v3.4/application-store/built-in-apps/deploy-chaos-mesh.md @@ -0,0 +1,82 @@ +--- +title: 'Deploy Chaos Mesh on KubeSphere' +tag: 'KubeSphere, Kubernetes, Applications, Chaos Engineering, Chaos experiments, Chaos Mesh' +keywords: 'Chaos Mesh, Kubernetes, Helm, KubeSphere' +description: 'Learn how to deploy Chaos Mesh on KubeSphere and start running chaos experiments.' +linkTitle: "Deploy Chaos Mesh on KubeSphere" +--- + +[Chaos Mesh](https://github.com/chaos-mesh/chaos-mesh) is a cloud-native Chaos Engineering platform that orchestrates chaos in Kubernetes environments. With Chaos Mesh, you can test your system's resilience and robustness on Kubernetes by injecting various types of faults into Pods, network, file system, and even the kernel. + +![Chaos Mesh architecture](/images/docs/v3.3/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-architecture-v2.png) + +## Enable App Store on KubeSphere + +1. Make sure you have installed and enabled the [KubeSphere App Store](../../../pluggable-components/app-store/). + +2. You need to create a workspace, a project, and a user account (project-regular) for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the operator role. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Chaos experiments with Chaos Mesh + +### Step 1: Deploy Chaos Mesh + +1. Login KubeSphere as `project-regular`, search for **chaos-mesh** in the **App Store**, and click on the search result to enter the app. + + ![Chaos Mesh app](/images/docs/v3.3/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-app.png) + +2. In the **App Information** page, click **Install** on the upper right corner. + + ![Install Chaos Mesh](/images/docs/v3.3/appstore/built-in-apps/deploy-chaos-mesh/install-chaos-mesh.png) + +3. In the **App Settings** page, set the application **Name,** **Location** (as your Namespace), and **App Version**, and then click **Next** on the upper right corner. + + ![Chaos Mesh basic information](/images/docs/v3.3/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-basic-info.png) + +4. Configure the `values.yaml` file as needed, or click **Install** to use the default configuration. + + ![Chaos Mesh configurations](/images/docs/v3.3/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-config.png) + +5. Wait for the deployment to be finished. Upon completion, Chaos Mesh will be shown as **Running** in KubeSphere. + + ![Chaos Mesh deployed](/images/docs/v3.3/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-deployed.png) + + +### Step 2: Visit Chaos Dashboard + +1. In the **Resource Status** page, copy the **NodePort **of `chaos-dashboard`. + + ![Chaos Mesh NodePort](/images/docs/v3.3/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-nodeport.png) + +2. Access the Chaos Dashboard by entering `${NodeIP}:${NODEPORT}` in your browser. Refer to [Manage User Permissions](https://chaos-mesh.org/docs/manage-user-permissions/) to generate a Token and log into Chaos Dashboard. + + ![Login to Chaos Dashboard](/images/docs/v3.3/appstore/built-in-apps/deploy-chaos-mesh/login-to-dashboard.png) + +### Step 3: Create a chaos experiment + +Before creating a chaos experiment, you should identify and deploy your experiment target, for example, to test how an application works under network latency. Here, we use a demo application `web-show` as the target application to be tested, and the test goal is to observe the system network latency. You can deploy a demo application `web-show` with the following command: `web-show`. + +```bash +curl -sSL https://mirrors.chaos-mesh.org/latest/web-show/deploy.sh | bash +``` + +> Note: The network latency of the Pod can be observed directly from the web-show application pad to the kube-system pod. + +1. From your web browser, visit ${NodeIP}:8081 to access the **Web Show** application. + + ![Chaos Mesh web show app](/images/docs/v3.3/appstore/built-in-apps/deploy-chaos-mesh/web-show-app.png) + +2. Log in to Chaos Dashboard to create a chaos experiment. To observe the effect of network latency on the application, we set the **Target **as "Network Attack" to simulate a network delay scenario. + + ![Chaos Dashboard](/images/docs/v3.3/appstore/built-in-apps/deploy-chaos-mesh/chaos-dashboard-networkchaos.png) + + The **Scope** of the experiment is set to `app: web-show`. + + ![Chaos Experiment scope](/images/docs/v3.3/appstore/built-in-apps/deploy-chaos-mesh/chaos-experiment-scope.png) + +3. Start the chaos experiment by submitting it. + + ![Submit Chaos Experiment](/images/docs/v3.3/appstore/built-in-apps/deploy-chaos-mesh/start-chaos-experiment.png) + +Now, you should be able to visit **Web Show** to observe experiment results: + +![Chaos Experiment result](/images/docs/v3.3/appstore/built-in-apps/deploy-chaos-mesh/experiment-result.png) \ No newline at end of file diff --git a/content/en/docs/v3.4/application-store/built-in-apps/etcd-app.md b/content/en/docs/v3.4/application-store/built-in-apps/etcd-app.md new file mode 100644 index 000000000..f34455ffa --- /dev/null +++ b/content/en/docs/v3.4/application-store/built-in-apps/etcd-app.md @@ -0,0 +1,58 @@ +--- +title: "Deploy etcd on KubeSphere" +keywords: 'Kubernetes, KubeSphere, etcd, app-store' +description: 'Learn how to deploy etcd from the App Store of KubeSphere and access its service.' +linkTitle: "Deploy etcd on KubeSphere" +weight: 14210 +--- + +Written in Go, [etcd](https://etcd.io/) is a distributed key-value store to store data that needs to be accessed by a distributed system or cluster of machines. In Kubernetes, it is the backend for service discovery and stores cluster states and configurations. + +This tutorial walks you through an example of deploying etcd from the App Store of KubeSphere. + +## Prerequisites + +- Please make sure you [enable the OpenPitrix system](../../../pluggable-components/app-store/). +- You need to create a workspace, a project, and a user account (`project-regular`) for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Hands-on Lab + +### Step 1: Deploy etcd from the App Store + +1. On the **Overview** page of the project `demo-project`, click **App Store** in the upper-left corner. + +2. Find etcd and click **Install** on the **App Information** page. + +3. Set a name and select an app version. Make sure etcd is deployed in `demo-project` and click **Next**. + +4. On the **App Settings** page, specify the size of the persistent volume for etcd and click **Install**. + + {{< notice note >}} + + To specify more values for etcd, use the toggle switch to see the app's manifest in YAML format and edit its configurations. + + {{}} + +5. In **Template-Based Apps** of the **Apps** page, wait until etcd is up and running. + +### Step 2: Access the etcd service + +After the app is deployed, you can use etcdctl, a command-line tool for interacting with the etcd server, to access etcd on the KubeSphere console directly. + +1. Navigate to **StatefulSets** in **Workloads**, and click the service name of etcd. + +2. Under **Pods**, expand the menu to see container details, and then click the **Terminal** icon. + +3. In the terminal, you can read and write data directly. For example, execute the following two commands respectively. + + ```bash + etcdctl set /name kubesphere + ``` + + ```bash + etcdctl get /name + ``` + +4. For clients within the KubeSphere cluster, the etcd service can be accessed through `..svc.:2379` (for example, `etcd-bqe0g4.demo-project.svc.cluster.local:2379` in this guide). + +5. For more information, see [the official documentation of etcd](https://etcd.io/docs/v3.4.0/). \ No newline at end of file diff --git a/content/en/docs/v3.4/application-store/built-in-apps/harbor-app.md b/content/en/docs/v3.4/application-store/built-in-apps/harbor-app.md new file mode 100644 index 000000000..39e0f9c1f --- /dev/null +++ b/content/en/docs/v3.4/application-store/built-in-apps/harbor-app.md @@ -0,0 +1,123 @@ +--- +title: "Deploy Harbor on KubeSphere" +keywords: 'Kubernetes, KubeSphere, Harbor, app-store' +description: 'Learn how to deploy Harbor from the App Store of KubeSphere and access its service.' +linkTitle: "Deploy Harbor on KubeSphere" +weight: 14220 +--- +[Harbor](https://goharbor.io/) is an open-source registry that secures artifacts with policies and role-based access control, ensures images are scanned and free from vulnerabilities, and signs images as trusted. + +This tutorial walks you through an example of deploying [Harbor](https://goharbor.io/) from the App Store of KubeSphere. + +## Prerequisites + +- Please make sure you [enable the OpenPitrix system](../../../pluggable-components/app-store/). +- You need to create a workspace, a project, and a user account for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Hands-on Lab + +### Step 1: Deploy Harbor from the App Store + +1. On the **Overview** page of the project `demo-project`, click **App Store** in the upper-left corner. + +2. Find Harbor and click **Install** on the **App Information** page. + +3. Set a name and select an app version. Make sure Harbor is deployed in `demo-project` and click **Next**. + +4. On the **App Settings** page, edit the configuration file of Harbor. Pay attention to the following fields. + + `type`: The method you use to access the Harbor Service. This example uses `nodePort`. + + `tls`: Specify whether you want to enable HTTPS. Set it to `false` for most cases. + + `externalURL`: The URL exposed to tenants. + + {{< notice note >}} + + - Don't forget to specify `externalURL`. This field can be very helpful if you have trouble accessing Harbor. + + - Make sure you use the HTTP protocol and its corresponding `nodePort` in this tutorial. For more information, see [the example configuration](#faq) in FAQ. + + {{}} + + When you finish editing the configuration, click **Install** to continue. + +5. Wait until Harbor is up and running. + +### Step 2: Access Harbor + +1. Based on the field `expose.type` you set in the configuration file, the access method may be different. As this example uses `nodePort` to access Harbor, visit `http://:30002` as set in the previous step. + + ![harbor-login](/images/docs/v3.3/appstore/built-in-apps/harbor-app/harbor-login.jpg) + + {{< notice note >}} + + You may need to open the port in your security groups and configure related port forwarding rules depending on your where your Kubernetes cluster is deployed. + + {{}} + +2. Log in to Harbor using the default account and password (`admin/Harbor12345`). The password is defined in the field `harborAdminPassword` in the configuration file. + + ![harbor-dashboard](/images/docs/v3.3/appstore/built-in-apps/harbor-app/harbor-dashboard.jpg) + +## FAQ + +1. How to enable HTTP login? + + Set `tls.enabled` to `false` in step 1 above. The protocol of `externalURL` must be the same as `expose.nodePort.ports`. + + If you use Docker login, set `externalURL` to one of `insecure-registries` in `daemon.json`, then reload Docker. + + Here is an example configuration file for your reference. Pay special attention to the comments. + + ```yaml + ## NOTICE 192.168.0.9 is the example IP address and you must use your own. + expose: + type: nodePort + tls: + enabled: false + secretName: "" + notarySecretName: "" + commonName: "192.168.0.9" # Change commonName to your own. + nodePort: + # The name of NodePort service + name: harbor + ports: + http: + # The service port Harbor listens on when serving with HTTP + port: 80 + # The node port Harbor listens on when serving with HTTP + nodePort: 30002 + https: + # The service port Harbor listens on when serving with HTTPS + port: 443 + # The node port Harbor listens on when serving with HTTPS + nodePort: 30003 + # Only needed when notary.enabled is set to true + notary: + # The service port Notary listens on + port: 4443 + # The node port Notary listens on + nodePort: 30004 + + externalURL: http://192.168.0.9:30002 # Use your own IP address. + + # The initial password of Harbor admin. Change it from portal after launching Harbor + harborAdminPassword: "Harbor12345" + # The secret key used for encryption. Must be a string of 16 chars. + secretKey: "not-a-secure-key" + ``` + +2. How to enable HTTPS login? + + a. Use self-signed certificates. + * Set `tls.enabled` to `true` in the configuration file in step 1, and edit `externalURL` accordingly. + * Copy the CA certificates stored in the Pod `harbor-core` \'s `/etc/core/ca` to your host. + * Trust the CA certificates by your host first, then restart Docker. + + b. Use public SSL. + * Add certificates as a Secret. + * Set `tls.enabled` to `true` in the configuration file in step 1, and edit `externalURL` accordingly. + * Edit `tls.secretName`. + +For more information, see [the documentation of Harbor](https://goharbor.io/docs/2.1.0/). \ No newline at end of file diff --git a/content/en/docs/v3.4/application-store/built-in-apps/memcached-app.md b/content/en/docs/v3.4/application-store/built-in-apps/memcached-app.md new file mode 100644 index 000000000..bfc5828d6 --- /dev/null +++ b/content/en/docs/v3.4/application-store/built-in-apps/memcached-app.md @@ -0,0 +1,49 @@ +--- +title: "Deploy Memcached on KubeSphere" +keywords: 'Kubernetes, KubeSphere, Memcached, app-store' +description: 'Learn how to deploy Memcached from the App Store of KubeSphere and access its service.' +linkTitle: "Deploy Memcached on KubeSphere" +weight: 14230 +--- +[Memcached](https://memcached.org/) is an in-memory key-value store for small chunks of arbitrary data (strings, objects) from results of database calls, API calls, or page rendering. Its API is available for the majority of popular languages. + +This tutorial walks you through an example of deploying Memcached from the App Store of KubeSphere. + +## Prerequisites + +- Please make sure you [enable the OpenPitrix system](../../../pluggable-components/app-store/). +- You need to create a workspace, a project, and a user account (`project-regular`) for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Hands-on Lab + +### Step 1: Deploy Memcached from the App Store + +1. On the **Overview** page of the project `demo-project`, click **App Store** in the upper-left corner. + +2. Find Memcached and click **Install** on the **App Information** page. + +3. Set a name and select an app version. Make sure Memcached is deployed in `demo-project` and click **Next**. + +4. In **App Settings**, you can use the default configuration or customize the configuration by editing the YAML file directly. Click **Install** to continue. + +5. Wait until Memcached is up and running. + +### Step 2: Access Memcached + +1. Navigate to **Services**, and click the service name of Memcached. + +2. On the detail page, you can find the port number and Pod's IP address under **Ports** and **Pods** respectively. + +3. As the Memcached service is headless, access it inside the cluster through the Pod IP and port number. The basic syntax of Memcached `telnet` command is `telnet HOST PORT`. For example: + + ```bash + # telnet 10.10.235.3 11211 + Trying 10.10.235.3... + Connected to 10.10.235.3. + Escape character is '^]'. + set runoob 0 900 9 + memcached + STORED + ``` + +4. For more information, see [Memcached](https://memcached.org/). \ No newline at end of file diff --git a/content/en/docs/v3.4/application-store/built-in-apps/meshery-app.md b/content/en/docs/v3.4/application-store/built-in-apps/meshery-app.md new file mode 100644 index 000000000..ab72037ac --- /dev/null +++ b/content/en/docs/v3.4/application-store/built-in-apps/meshery-app.md @@ -0,0 +1,53 @@ +--- +title: "Deploy Meshery on KubeSphere" +keywords: 'Kubernetes, KubeSphere, Meshery,Serive Mesh, Layer5, app-store' +description: 'Learn how to deploy Meshery from the App Store of KubeSphere and access its service.' +linkTitle: "Deploy Meshery on KubeSphere" +weight: 14240 +--- +[Meshery](https://meshery.io/) is the open source, cloud native management plane that enables the adoption, operation, and management of Kubernetes, any service mesh, and their workloads. + +This tutorial walks you through an example of deploying Meshery from the App Store of KubeSphere. + +## Prerequisites + +- Please make sure you [enable the OpenPitrix system](../../../pluggable-components/app-store/). +- You need to create a workspace, a project, and a user account (`project-regular`) for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Hands-on Lab + +### Step 1: Deploy Meshery from the App Store + +1. On the **Overview** page of the project `demo-project`, click **App Store** in the upper-left corner. +2. Search for **Meshery** in the App Store, and click on the search result to enter the app. + + ![meshery-app](/images/docs/v3.3/appstore/built-in-apps/meshery-app/meshery-app.png) + +3. In the **App Information** page, click **Install** on the upper right corner. + + ![meshery-install](/images/docs/v3.3/appstore/built-in-apps/meshery-app/Meshery-install.png) + +4. In the App Settings page, set the application **Name**, **Location** (as your Namespace), and App Version, and then click Next on the upper right corner. + + ![meshery-info](/images/docs/v3.3/appstore/built-in-apps/meshery-app/Meshery-info.png) + +5. Configure the **values.yaml** file as needed, or click **Install** to use the default configuration. + + ![meshery-yaml](/images/docs/v3.3/appstore/built-in-apps/meshery-app/Meshery-yaml.png) + +6. Wait for the deployment to be finished. Upon completion, **Meshery** will be shown as **Running** in KubeSphere. + + ![meshery-app-running](/images/docs/v3.3/appstore/built-in-apps/meshery-app/Meshery-app-running.png) + +### Step 2: Access the Meshery Dashboard + +1. Go to **Services** and click the service name of Meshery. +2. In the **Resource Status** page, copy the **NodePort** of Meshery. + + ![meshery-service](/images/docs/v3.3/appstore/built-in-apps/meshery-app/Meshery-service.png) + +3. Access the Meshery Dashboard by entering **${NodeIP}:${NODEPORT}** in your browser. + + ![meshery-dashboard](/images/docs/v3.3/appstore/built-in-apps/meshery-app/meshery-dashboard.png) + +4. For more information about Meshery, refer to [the official documentation of Meshery](https://docs.meshery.io/). \ No newline at end of file diff --git a/content/en/docs/v3.4/application-store/built-in-apps/minio-app.md b/content/en/docs/v3.4/application-store/built-in-apps/minio-app.md new file mode 100644 index 000000000..128503fe9 --- /dev/null +++ b/content/en/docs/v3.4/application-store/built-in-apps/minio-app.md @@ -0,0 +1,57 @@ +--- +title: "Deploy MinIO on KubeSphere" +keywords: 'Kubernetes, KubeSphere, Minio, app-store' +description: 'Learn how to deploy Minio from the App Store of KubeSphere and access its service.' +linkTitle: "Deploy MinIO on KubeSphere" +weight: 14240 +--- +[MinIO](https://min.io/) object storage is designed for high performance and the S3 API. It is ideal for large, private cloud environments with stringent security requirements and delivers mission-critical availability across a diverse range of workloads. + +This tutorial walks you through an example of deploying MinIO from the App Store of KubeSphere. + +## Prerequisites + +- Please make sure you [enable the OpenPitrix system](../../../pluggable-components/app-store/). +- You need to create a workspace, a project, and a user account (`project-regular`) for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Hands-on Lab + +### Step 1: Deploy MinIO from the App Store + +1. On the **Overview** page of the project `demo-project`, click **App Store** in the upper-left corner. + +2. Find MinIO and click **Install** on the **App Information** page. + +3. Set a name and select an app version. Make sure MinIO is deployed in `demo-project` and click **Next**. + +4. In **App Settings**, you can use the default configuration or customize the configuration by editing the YAML file directly. Click **Install** to continue. + +5. Wait until MinIO is up and running. + +### Step 2: Access the MinIO browser + +To access MinIO outside the cluster, you need to expose the app through a NodePort first. + +1. Go to **Services** and click the service name of MinIO. + +2. Click **More** and select **Edit External Access** from the drop-down menu. + +3. Select **NodePort** for **Access Method** and click **OK**. For more information, see [Project Gateway](../../../project-administration/project-gateway/). + +4. On the **Services** page, click **MinIO**. On the page that appears, under **Ports**, you can see the port is exposed. + +5. To access the MinIO browser, you need `accessKey` and `secretKey`, which are specified in the configuration file of MinIO. Go to **Template-Based Apps** in **Apps**, click MinIO, and you can find the value of these two fields under the tab **Chart Files**. + +6. Access the MinIO browser through `:` using `accessKey` and `secretKey`. + + ![minio-browser](/images/docs/v3.3/appstore/built-in-apps/minio-app/minio-browser.png) + + ![minio-browser-interface](/images/docs/v3.3/appstore/built-in-apps/minio-app/minio-browser-interface.png) + + {{< notice note >}} + + You may need to open the port in your security groups and configure related port forwarding rules depending on where your Kubernetes cluster is deployed. + + {{}} + +7. For more information about MinIO, refer to [the official documentation of MinIO](https://docs.min.io/). \ No newline at end of file diff --git a/content/en/docs/v3.4/application-store/built-in-apps/mongodb-app.md b/content/en/docs/v3.4/application-store/built-in-apps/mongodb-app.md new file mode 100644 index 000000000..6a5fe6fd2 --- /dev/null +++ b/content/en/docs/v3.4/application-store/built-in-apps/mongodb-app.md @@ -0,0 +1,54 @@ +--- +title: "Deploy MongoDB on KubeSphere" +keywords: 'KubeSphere, Kubernetes, Installation, MongoDB' +description: 'Learn how to deploy MongoDB from the App Store of KubeSphere and access its service.' +linkTitle: "Deploy MongoDB on KubeSphere" +weight: 14250 +--- + +[MongoDB](https://www.mongodb.com/) is a general purpose, document-based, distributed database built for modern application developers and for the cloud era. + +This tutorial walks you through an example of deploying MongoDB from the App Store of KubeSphere. + +## Prerequisites + +- Please make sure you [enable the OpenPitrix system](../../../pluggable-components/app-store/). +- You need to create a workspace, a project, and a user account (`project-regular`) for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Hands-on Lab + +### Step 1: Deploy MongoDB from the App Store + +1. On the **Overview** page of the project `demo-project`, click **App Store** in the upper-left corner. + +2. Find MongoDB and click **Install** on the **App Information** page. + +3. Set a name and select an app version. Make sure MongoDB is deployed in `demo-project` and click **Next**. + +4. In **App Settings**, specify persistent volumes for the app and record the username and the password which will be used to access the app. When you finish, click **Install**. + + {{< notice note >}} + + To specify more values for MongoDB, use the toggle switch to see the app's manifest in YAML format and edit its configurations. + + {{}} + +5. Wait until MongoDB is up and running. + +### Step 2: Access the MongoDB Terminal + +1. Go to **Services** and click the service name of MongoDB. + +2. Under **Pods**, expand the menu to see container details, and then click the **Terminal** icon. + +3. In the pop-up window, enter commands in the terminal directly to use the app. + + ![mongodb-service-terminal](/images/docs/v3.3/appstore/built-in-apps/mongodb-app/mongodb-service-terminal.jpg) + + {{< notice note >}} + + If you want to access MongoDB outside the cluster, click **More** and select **Edit External Access**. In the dialog that appears, select **NodePort** as the access mode. Use the port number to access MongoDB after it is exposed. You may need to open the port in your security groups and configure related port forwarding rules depending on where your Kubernetes cluster is deployed. + + {{}} + +4. For more information, see [the official documentation of MongoDB](https://docs.mongodb.com/manual/). diff --git a/content/en/docs/v3.4/application-store/built-in-apps/mysql-app.md b/content/en/docs/v3.4/application-store/built-in-apps/mysql-app.md new file mode 100644 index 000000000..085c18084 --- /dev/null +++ b/content/en/docs/v3.4/application-store/built-in-apps/mysql-app.md @@ -0,0 +1,66 @@ +--- +title: "Deploy MySQL on KubeSphere" +keywords: 'KubeSphere, Kubernetes, Installation, MySQL' +description: 'Learn how to deploy MySQL from the App Store of KubeSphere and access its service.' + +link title: "Deploy MySQL" +weight: 14260 +--- +[MySQL](https://www.mysql.com/) is an open-source relational database management system (RDBMS), which uses the most commonly used database management language - Structured Query Language (SQL) for database management. It provides a fully managed database service to deploy cloud-native applications using the world's most popular open-source database. + +This tutorial walks you through an example of deploying MySQL from the App Store of KubeSphere. + +## Prerequisites + +- Please make sure you [enable the OpenPitrix system](../../../pluggable-components/app-store/). +- You need to create a workspace, a project, and a user account for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Hands-on Lab + +### Step 1: Deploy MySQL from the App Store + +1. On the **Overview** page of the project `demo-project`, click **App Store** in the upper-left corner. + +2. Find MySQL and click **Install** on the **App Information** page. + +3. Set a name and select an app version. Make sure MySQL is deployed in `demo-project` and click **Next**. + +4. In **App Settings**, uncomment the `mysqlRootPassword` field and customize the password. Click **Install** to continue. + +5. Wait until MySQL is up and running. + +### Step 2: Access the MySQL terminal + +1. Go to **Workloads** and click the workload name of MySQL. + +2. Under **Pods**, expand the menu to see container details, and then click the **Terminal** icon. + +3. In the terminal, execute `mysql -uroot -ptesting` to log in to MySQL as the root user. + + ![log-in-mysql](/images/docs/v3.3/appstore/built-in-apps/mysql-app/log-in-mysql.png) + +### Step 3: Access the MySQL database outside the cluster + +To access MySQL outside the cluster, you need to expose the app through a NodePort first. + +1. Go to **Services** and click the service name of MySQL. + +2. Click **More** and select **Edit External Access** from the drop-down list. + +3. Select **NodePort** for **Access Method** and click **OK**. For more information, see [Project Gateway](../../../project-administration/project-gateway/). + +4. Under **Ports**, you can see the port is exposed. The port and public IP address will be used in the next step to access the MySQL database. + +5. To access your MySQL database, you need to use the MySQL client or install a third-party application such as SQLPro Studio for the connection. The following example demonstrates how to access the MySQL database through SQLPro Studio. + + ![login](/images/docs/v3.3/appstore/built-in-apps/mysql-app/login.png) + + ![access-mysql-success](/images/docs/v3.3/appstore/built-in-apps/mysql-app/access-mysql-success.png) + + {{< notice note >}} + + You may need to open the port in your security groups and configure related port forwarding rules depending on where your Kubernetes cluster is deployed. + + {{}} + +6. For more information about MySQL, refer to [the official documentation of MySQL](https://dev.mysql.com/doc/). diff --git a/content/en/docs/v3.4/application-store/built-in-apps/nginx-app.md b/content/en/docs/v3.4/application-store/built-in-apps/nginx-app.md new file mode 100644 index 000000000..0eb997edc --- /dev/null +++ b/content/en/docs/v3.4/application-store/built-in-apps/nginx-app.md @@ -0,0 +1,60 @@ +--- +title: "Deploy NGINX on KubeSphere" +keywords: 'KubeSphere, Kubernetes, Installation, NGINX' +description: 'Learn how to deploy NGINX from the App Store of KubeSphere and access its service.' +linkTitle: "Deploy NGINX on KubeSphere" +weight: 14270 +--- + +[NGINX](https://www.nginx.com/) is an open-source software application for web serving, reverse proxying, caching, load balancing, media streaming, and more. + +This tutorial walks you through an example of deploying NGINX from the App Store of KubeSphere. + +## Prerequisites + +- Please make sure you [enable the OpenPitrix system](../../../pluggable-components/app-store/). +- You need to create a workspace, a project, and a user account (`project-regular`) for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Hands-on Lab + +### Step 1: Deploy NGINX from the App Store + +1. On the **Overview** page of the project `demo-project`, click **App Store** in the upper-left corner. + +2. Find NGINX and click **Install** on the **App Information** page. + +3. Set a name and select an app version. Make sure NGINX is deployed in `demo-project` and click **Next**. + +4. In **App Settings**, specify the number of replicas to deploy for the app and enable Ingress based on your needs. When you finish, click **Install**. + + {{< notice note >}} + + To specify more values for NGINX, use the toggle switch to see the app’s manifest in YAML format and edit its configurations. + + {{}} + +5. Wait until NGINX is up and running. + +### Step 2: Access NGINX + +To access NGINX outside the cluster, you need to expose the app through a NodePort first. + +1. Go to **Services** and click the service name of NGINX. + +2. On the service details page, click **More** and select **Edit External Access** from the drop-down list. + +3. Select **NodePort** for **Access Method** and click **OK**. For more information, see [Project Gateway](../../../project-administration/project-gateway/). + +4. Under **Ports**, you can see the port is exposed. + +5. Access NGINX through `:`. + + ![access-nginx](/images/docs/v3.3/appstore/built-in-apps/nginx-app/access-nginx.png) + + {{< notice note >}} + + You may need to open the port in your security groups and configure related port forwarding rules depending on where your Kubernetes cluster is deployed. + + {{}} + +6. For more information, see [the official documentation of NGINX](https://docs.nginx.com/?_ga=2.48327718.1445131049.1605510038-1186152749.1605510038). diff --git a/content/en/docs/v3.4/application-store/built-in-apps/postgresql-app.md b/content/en/docs/v3.4/application-store/built-in-apps/postgresql-app.md new file mode 100644 index 000000000..191961b70 --- /dev/null +++ b/content/en/docs/v3.4/application-store/built-in-apps/postgresql-app.md @@ -0,0 +1,60 @@ +--- +title: "Deploy PostgreSQL on KubeSphere" +keywords: 'Kubernetes, KubeSphere, PostgreSQL, app-store' +description: 'Learn how to deploy PostgreSQL from the App Store of KubeSphere and access its service.' +linkTitle: "Deploy PostgreSQL on KubeSphere" +weight: 14280 +--- + +[PostgreSQL](https://www.postgresql.org/) is a powerful, open-source object-relational database system, which is famous for reliability, feature robustness, and performance. + +This tutorial walks you through an example of how to deploy PostgreSQL from the App Store of KubeSphere. + +## Prerequisites + +- Please make sure you [enable the OpenPitrix system](../../../pluggable-components/app-store/). +- You need to create a workspace, a project, and a user account (`project-regular`) for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Hands-on Lab + +### Step 1: Deploy PostgreSQL from the App Store + +1. On the **Overview** page of the project `demo-project`, click **App Store** in the upper-left corner. + +2. Find PostgreSQL and click **Install** on the **App Information** page. + +3. Set a name and select an app version. Make sure PostgreSQL is deployed in `demo-project` and click **Next**. + +4. In **App Settings**, specify persistent volumes for the app and record the username and the password, which will be used later to access the app. When you finish, click **Install**. + + {{< notice note >}} + + To specify more values for PostgreSQL, use the toggle switch to see the app's manifest in YAML format and edit its configurations. + + {{}} + +5. Wait until PostgreSQL is up and running. + +### Step 2: Access the PostgreSQL database + +To access PostgreSQL outside the cluster, you need to expose the app through a NodePort first. + +1. Go to **Services** and click the service name of PostgreSQL. + +2. Click **More** and select **Edit External Access** from the drop-down list. + +3. Select **NodePort** for **Access Method** and click **OK**. For more information, see [Project Gateway](../../../project-administration/project-gateway/). + +4. Under **Ports**, you can see the port is exposed, which will be used in the next step to access the PostgreSQL database. + +5. Expand the Pod menu under **Pods** and click the **Terminal** icon. In the pop-up window, enter commands directly to access the database. + + ![postgresql-output](/images/docs/v3.3/appstore/built-in-apps/postgresql-app/postgresql-output.png) + + {{< notice note >}} + + You can also use a third-party application such as SQLPro Studio to connect to the database. You may need to open the port in your security groups and configure related port forwarding rules depending on where your Kubernetes cluster is deployed. + + {{}} + +6. For more information, see [the official documentation of PostgreSQL](https://www.postgresql.org/docs/). \ No newline at end of file diff --git a/content/en/docs/v3.4/application-store/built-in-apps/rabbitmq-app.md b/content/en/docs/v3.4/application-store/built-in-apps/rabbitmq-app.md new file mode 100644 index 000000000..720b72445 --- /dev/null +++ b/content/en/docs/v3.4/application-store/built-in-apps/rabbitmq-app.md @@ -0,0 +1,61 @@ +--- +title: "Deploy RabbitMQ on KubeSphere" +keywords: 'KubeSphere, RabbitMQ, Kubernetes, Installation' +description: 'Learn how to deploy RabbitMQ from the App Store of KubeSphere and access its service.' +linkTitle: "Deploy RabbitMQ on KubeSphere" +weight: 14290 +--- +[RabbitMQ](https://www.rabbitmq.com/) is the most widely deployed open-source message broker. It is lightweight and easy to deploy on premises and in the cloud. It supports multiple messaging protocols. RabbitMQ can be deployed in distributed and federated configurations to meet high-scale, high-availability requirements. + +This tutorial walks you through an example of how to deploy RabbitMQ from the App Store of KubeSphere. + +## Prerequisites + +- Please make sure you [enable the OpenPitrix system](../../../pluggable-components/app-store/). +- You need to create a workspace, a project, and a user account for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Hands-on Lab + +### Step 1: Deploy RabbitMQ from the App Store + +1. On the **Overview** page of the project `demo-project`, click **App Store** in the top-left corner. + +2. Find RabbitMQ and click **Install** on the **App Information** page. + +3. Set a name and select an app version. Make sure RabbitMQ is deployed in `demo-project` and click **Next**. + +4. In **App Settings**, you can use the default settings directly or customize the settings either by specifying fields in a form or editing the YAML file. Record the value of **Root Username** and the value of **Root Password**, which will be used later for login. Click **Install** to continue. + + {{< notice tip >}} + + To see the manifest file, toggle the **Edit YAML** switch. + + {{}} + +5. Wait until RabbitMQ is up and running. + +### Step 2: Access the RabbitMQ dashboard + +To access RabbitMQ outside the cluster, you need to expose the app through a NodePort first. + +1. Go to **Services** and click the service name of RabbitMQ. + +2. Click **More** and select **Edit External Access** from the drop-down list. + +3. Select **NodePort** for **Access Method** and click **OK**. For more information, see [Project Gateway](../../../project-administration/project-gateway/). + +4. Under **Ports**, you can see ports are exposed. + +5. Access RabbitMQ **management** through `:`. Note that the username and password are those you set in **Step 1**. + + ![rabbitmq-dashboard](/images/docs/v3.3/appstore/built-in-apps/rabbitmq-app/rabbitmq-dashboard.png) + + ![rabbitma-dashboard-detail](/images/docs/v3.3/appstore/built-in-apps/rabbitmq-app/rabbitma-dashboard-detail.png) + + {{< notice note >}} + + You may need to open the port in your security groups and configure related port forwarding rules depending on where your Kubernetes cluster is deployed. + + {{}} + +6. For more information about RabbitMQ, refer to [the official documentation of RabbitMQ](https://www.rabbitmq.com/documentation.html). \ No newline at end of file diff --git a/content/en/docs/v3.4/application-store/built-in-apps/radondb-mysql-app.md b/content/en/docs/v3.4/application-store/built-in-apps/radondb-mysql-app.md new file mode 100644 index 000000000..cb7b8639e --- /dev/null +++ b/content/en/docs/v3.4/application-store/built-in-apps/radondb-mysql-app.md @@ -0,0 +1,50 @@ +--- +title: "Deploy RadonDB MySQL on KubeSphere" +keywords: 'KubeSphere, Kubernetes, Installation, RadonDB MySQL' +description: 'Learn how to deploy RadonDB MySQL from the App Store of KubeSphere and access its service.' +linkTitle: "Deploy RadonDB MySQL on KubeSphere" +weight: 14293 +--- + +[RadonDB MySQL](https://github.com/radondb/radondb-mysql-kubernetes) is an open source, cloud-native, and highly available cluster solution based on [MySQL](https://MySQL.org) database. With the Raft protocol, RadonDB MySQL enables fast failover without losing any transactions. + +This tutorial demonstrates how to deploy RadonDB MySQL from the App Store of KubeSphere. + +{{< notice note >}} + +The version of RadonDB MySQL in **App Store** is v1.0.0, and is no longer maintained. + +We recommend you to use the latest version of RadonDB MySQL. For deployment instructions, please refer to [Deploy RadonDB MySQL Operator and Cluster](../../external-apps/deploy-radondb-mysql/). + +{{}} + +## Prerequisites + +- Please make sure you [enable the OpenPitrix system](../../../pluggable-components/app-store/). +- You need to create a workspace, a project, and a user account (`project-regular`) for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Hands-on Lab + +### Step 1: Deploy RadonDB MySQL from the App Store + +1. On the **Overview** page of the project `demo-project`, click **App Store** in the top-left corner. + +2. Find RadonDB MySQL and click **Install** on the **App Information** page. + +3. Set a name and select an app version. Make sure RadonDB MySQL is deployed in `demo-project` and click **Next**. + +4. In **App Settings**, you can use the default settings or customize the settings by editing the YAML file directly. When you finish, click **Install**. + +5. Wait until RadonDB MySQL is up and running. + +### Step 2: Access RadonDB MySQL + +1. In **Services** under **Application Workloads**, click the Service name of RadonDB MySQL. + +2. Under **Pods**, expand the menu to see container details, and then click the **Terminal** icon. + +3. In the pop-up window, enter commands in the terminal directly to use the app. + + ![Access RadonDB MySQL](/images/docs/v3.3/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-service-terminal.png) + +4. If you want to access RadonDB MySQL outside the cluster, see [the open-source project of RadonDB MySQL](https://github.com/radondb/radondb-mysql-kubernetes) in detail. diff --git a/content/en/docs/v3.4/application-store/built-in-apps/radondb-postgresql-app.md b/content/en/docs/v3.4/application-store/built-in-apps/radondb-postgresql-app.md new file mode 100644 index 000000000..c55538921 --- /dev/null +++ b/content/en/docs/v3.4/application-store/built-in-apps/radondb-postgresql-app.md @@ -0,0 +1,62 @@ +--- +title: "Deploy RadonDB PostgreSQL on KubeSphere" +keywords: 'KubeSphere, Kubernetes, Installation, RadonDB PostgreSQL' +description: 'Learn how to deploy RadonDB PostgreSQL from the App Store of KubeSphere and access its service.' +linkTitle: "Deploy RadonDB PostgreSQL on KubeSphere" +weight: 14294 +--- + +[RadonDB PostgreSQL](https://github.com/radondb/radondb-postgresql-kubernetes) is an open source, cloud-native, and highly available cluster solution based on [PostgreSQL](https://postgresql.org) database system. + +This tutorial demonstrates how to deploy RadonDB PostgreSQL from the App Store of KubeSphere. + +## Prerequisites + +- Please make sure you [enable the OpenPitrix system](../../../pluggable-components/app-store/). +- You need to create a workspace, a project, and a user account (`project-regular`) for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Hands-on Lab + +### Step 1: Deploy RadonDB PostgreSQL from the App Store + +1. On the **Overview** page of the project `demo-project`, click **App Store** in the upper-left corner. + +2. Click **Database & Cache** under **Categories**. + +3. Find RadonDB PostgreSQL and click **Install** on the **App Information** page. + +4. Set a name and select an app version. Make sure RadonDB PostgreSQL is deployed in `demo-project` and click **Next**. + +5. In **App Settings**, you can use the default settings or customize the settings by editing the YAML file. When you finish, click **Install**. + +6. Wait until RadonDB PostgreSQL is up and running. + +### Step 2: View PostgreSQL cluster status + +1. On the **Overview** page of the project `demo-project`, you can see a list of resource usage in the current project. + +2. In **Workloads** under **Application Workloads**, click the **StatefulSets** tab, and then you can see the StatefulSet is up and running. + + Click the StatefulSet to go to its detail page. You can see the metrics in line charts over a period of time under the **Monitoring** tab. + +3. In **Pods** under **Application Workloads**, you can see all the Pods are up and running. + +4. In **Persistent Volume Claims** under **Storage**, you can see the PostgreSQL Cluster components are using persistent volumes. + + Usage of the persistent volume is also monitored. Click a persistent volume to go to its detail page. + +### Step 3: Access RadonDB PostgreSQL + +1. Go to **Pods** under **Application Workloads** and click a Pod to go to its details page. + +2. On the **Resource Status** page, click the **Terminal** icon. + +3. In the displayed dialog box, run the following command and enter the user password in the terminal to use the app. + + ```bash + psql -h -p 5432 -U postgres -d postgres + ``` + + ![Access RadonDB PostgreSQL](/images/docs/v3.3/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-service-terminal.png) + +4. If you want to access RadonDB PostgreSQL outside the cluster, see [the open-source project of RadonDB PostgreSQL](https://github.com/radondb/radondb-postgresql-kubernetes) in detail. diff --git a/content/en/docs/v3.4/application-store/built-in-apps/redis-app.md b/content/en/docs/v3.4/application-store/built-in-apps/redis-app.md new file mode 100644 index 000000000..bb5e6d5c4 --- /dev/null +++ b/content/en/docs/v3.4/application-store/built-in-apps/redis-app.md @@ -0,0 +1,48 @@ +--- +title: "Deploy Redis on KubeSphere" +keywords: 'KubeSphere, Kubernetes, Installation, Redis' +description: 'Learn how to deploy Redis from the App Store of KubeSphere and access its service.' +linkTitle: "Deploy Redis on KubeSphere" +weight: 14291 +--- + +[Redis](https://redis.io/) is an open-source (BSD licensed), in-memory data structure store, used as a database, cache and message broker. + +This tutorial walks you through an example of deploying Redis from the App Store of KubeSphere. + +## Prerequisites + +- Please make sure you [enable the OpenPitrix system](../../../pluggable-components/app-store/). +- You need to create a workspace, a project, and a user account (`project-regular`) for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Hands-on Lab + +### Step 1: Deploy Redis from the App Store + +1. On the **Overview** page of the project `demo-project`, click **App Store** in the upper-left corner. + +2. Find Redis and click **Install** on the **App Information** page. + +3. Set a name and select an app version. Make sure Redis is deployed in `demo-project` and click **Next**. + +4. In **App Settings**, specify persistent volumes and a password for the app. When you finish, click **Install**. + + {{< notice note >}} + + To specify more values for Redis, use the toggle switch to see the app's manifest in YAML format and edit its settings. + + {{}} + +5. Wait until Redis is up and running. + +### Step 2: Access the Redis terminal + +1. Go to **Services** and click the service name of Redis. + +2. Under **Pods**, expand the menu to see container details, and then click the **Terminal** icon. + +3. In the pop-up window, use the `redis-cli` command in the terminal to use the app. + + ![use-redis](/images/docs/v3.3/appstore/built-in-apps/redis-app/use-redis.png) + +4. For more information, see [the official documentation of Redis](https://redis.io/documentation). diff --git a/content/en/docs/v3.4/application-store/built-in-apps/tomcat-app.md b/content/en/docs/v3.4/application-store/built-in-apps/tomcat-app.md new file mode 100644 index 000000000..a038336cd --- /dev/null +++ b/content/en/docs/v3.4/application-store/built-in-apps/tomcat-app.md @@ -0,0 +1,63 @@ +--- +title: "Deploy Tomcat on KubeSphere" +keywords: 'KubeSphere, Kubernetes, Installation, Tomcat' +description: 'Learn how to deploy Tomcat from the App Store of KubeSphere and access its service.' +linkTitle: "Deploy Tomcat on KubeSphere" +weight: 14292 +--- +[Apache Tomcat](https://tomcat.apache.org/index.html) powers numerous large-scale, mission-critical web applications across a diverse range of industries and organizations. Tomcat provides a pure Java HTTP web server environment in which Java code can run. + +This tutorial walks you through an example of deploying Tomcat from the App Store of KubeSphere. + +## Prerequisites + +- Please make sure you [enable the OpenPitrix system](../../../pluggable-components/app-store/). +- You need to create a workspace, a project, and a user account for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Hands-on Lab + +### Step 1: Deploy Tomcat from the App Store + +1. On the **Overview** page of the project `demo-project`, click **App Store** in the upper-left corner. + +2. Find Tomcat and click **Install** on the **App Information** page. + +1. Set a name and select an app version. Make sure Tomcat is deployed in `demo-project` and click **Next**. + +2. In **App Settings**, you can use the default settings or customize the settings by editing the YAML file directly. Click **Install** to continue. + +3. Wait until Tomcat is up and running. + +### Step 2: Access the Tomcat terminal + +1. Go to **Services** and click the service name of Tomcat. + +2. Under **Pods**, expand the menu to see container details, and then click the **Terminal** icon. + +3. You can view deployed projects in `/usr/local/tomcat/webapps`. + + ![view-project](/images/docs/v3.3/appstore/built-in-apps/tomcat-app/view-project.png) + +### Step 3: Access a Tomcat project from your browser + +To access a Tomcat project outside the cluster, you need to expose the app through a NodePort first. + +1. Go to **Services** and click the service name of Tomcat. + +2. Click **More** and select **Edit External Access** from the drop-down list. + +3. Select **NodePort** for **Access Method** and click **OK**. For more information, see [Project Gateway](../../../project-administration/project-gateway/). + +4. Under **Ports**, you can see the port is exposed. + +5. Access the sample Tomcat project through `:/sample` in your browser. + + ![access-tomcat-browser](/images/docs/v3.3/appstore/built-in-apps/tomcat-app/access-tomcat-browser.png) + + {{< notice note >}} + + You may need to open the port in your security groups and configure related port forwarding rules depending on where your Kubernetes cluster is deployed. + + {{}} + +6. For more information about Tomcat, refer to [the official documentation of Tomcat](https://tomcat.apache.org/index.html). \ No newline at end of file diff --git a/content/en/docs/v3.4/application-store/external-apps/_index.md b/content/en/docs/v3.4/application-store/external-apps/_index.md new file mode 100644 index 000000000..e65bb0fad --- /dev/null +++ b/content/en/docs/v3.4/application-store/external-apps/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "External Applications" +weight: 14300 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/application-store/external-apps/deploy-clickhouse.md b/content/en/docs/v3.4/application-store/external-apps/deploy-clickhouse.md new file mode 100644 index 000000000..1a3ea9ab2 --- /dev/null +++ b/content/en/docs/v3.4/application-store/external-apps/deploy-clickhouse.md @@ -0,0 +1,144 @@ +--- +title: "Deploy ClickHouse Operator and a ClickHouse Cluster on KubeSphere" +keywords: 'KubeSphere, Kubernetes, ClickHouse, ClickHouse Operator, ClickHouse Cluster' +description: 'Learn how to deploy ClickHouse Operator and a ClickHouse Cluster on KubeSphere.' +linkTitle: "Deploy RadonDB ClickHouse Operator and Cluster" +weight: 14340 +--- + +[ClickHouse](https://clickhouse.tech/) is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP). [RadonDB ClickHouse](https://github.com/radondb/radondb-clickhouse-kubernetes) is a deeply customized ClickHouse cluster application maintaining ClickHouse cluster functions and featuring automated cluster management, data redistribution in clusters, and excellent performance with less cost. + +This tutorial demonstrates how to deploy ClickHouse Operator and a ClickHouse Cluster on KubeSphere. + +## Prerequisites + +- You need to enable [the OpenPitrix system](../../../pluggable-components/app-store/). +- You need to create a workspace, a project, and two user accounts (`ws-admin` and `project-regular`) for this tutorial. The account `ws-admin` must be granted the role of `workspace-admin` in the workspace, and the account `project-regular` must be invited to the project with the role of `operator`. This tutorial uses `demo-workspace` and `demo-project` for demonstration. If they are not ready, refer to [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). +- You need to enable the gateway in your project to provide external access. If they are not ready, refer to [Project Gateway](../../../project-administration/project-gateway/). + +## Hands-on Lab + +### Step 1: Deploy ClickHouse Operator + +1. Log in to the KubeSphere Web console as `admin`, and use **Kubectl** from the **Toolbox** in the lower-right corner to run the following command to install ClickHouse Operator. It is recommended that you have at least two worker nodes available in your cluster. + + ```bash + $ kubectl apply -f https://raw.githubusercontent.com/radondb/radondb-clickhouse-kubernetes/master/clickhouse-operator-install.yml + ``` + + {{< notice note >}} + + This command will install ClickHouse Operator in the namespace `kube-system`. Therefore, ClickHouse Operator only needs to be installed once in a Kubernetes cluster. + + {{}} + +2. You can see the expected output as below if the installation is successful. + + ```powershell + $ kubectl apply -f https://raw.githubusercontent.com/radondb/radondb-clickhouse-kubernetes/main/clickhouse-operator-install.yml + customresourcedefinition.apiextensions.k8s.io/clickhouseinstallations.clickhouse.radondb.com created + customresourcedefinition.apiextensions.k8s.io/clickhouseinstallationtemplates.clickhouse.radondb.com created + customresourcedefinition.apiextensions.k8s.io/clickhouseoperatorconfigurations.clickhouse.radondb.com created + serviceaccount/clickhouse-operator created + clusterrole.rbac.authorization.k8s.io/clickhouse-operator-kube-system created + clusterrolebinding.rbac.authorization.k8s.io/clickhouse-operator-kube-system created + configmap/etc-clickhouse-operator-files created + configmap/etc-clickhouse-operator-confd-files created + configmap/etc-clickhouse-operator-configd-files created + configmap/etc-clickhouse-operator-templatesd-files created + configmap/etc-clickhouse-operator-usersd-files created + deployment.apps/clickhouse-operator created + service/clickhouse-operator-metrics created + ``` + +3. You can run the following command to view the status of ClickHouse Operator resources. + + ```bash + $ kubectl get all --selector=app=clickhouse-operator -n kube-system + ``` + + Expected output: + + ``` + NAME READY STATUS RESTARTS AGE + pod/clickhouse-operator-6b8494c8f-tmkmn 2/2 Running 0 6m34s + + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + service/clickhouse-operator-metrics ClusterIP 10.233.51.66 8888/TCP 6m34s + + NAME READY UP-TO-DATE AVAILABLE AGE + deployment.apps/clickhouse-operator 1/1 1 1 6m34s + + NAME DESIRED CURRENT READY AGE + replicaset.apps/clickhouse-operator-6b8494c8f 1 1 1 6m34s + ``` + +### Step 2: Add an app repository + +1. Log out of KubeSphere and log back in as `ws-admin`. In `demo-workspace`, go to **App Repositories** under **App Management**, and then click **Add**. + +2. In the dialog that appears, enter `clickhouse` for the app repository name and `https://radondb.github.io/radondb-clickhouse-kubernetes/` for the repository URL. Click **Validate** to verify the URL, and you will see a green check mark next to the URL if it is available. Click **OK** to continue. + +3. Your repository will display in the list after it is successfully imported to KubeSphere. + +### Step 3: Deploy a ClickHouse Cluster + +1. Log out of KubeSphere and log back in as `project-regular`. In `demo-project`, go to **Apps** under **Application Workloads** and click **Create**. + +2. In the dialog that appears, select **From App Template**. + +3. On the new page that appears, select **clickhouse** from the drop-down list and then click **clickhouse-cluster**. + +4. On the **Chart Files** tab, you can view the configuration and download the `values.yaml` file. Click **Install** to continue. + +5. On the **Basic Information** page, confirm the app name, app version, and deployment location. Click **Next** to continue. + +6. On the **App Settings** tab, you can change the YAML file to customize settings. In this tutorial, click **Install** to use the default settings. + +7. After a while, you can see the app is in the **Running** status. + +### Step 4: View ClickHouse cluster status + +1. In **Workloads** under **Application Workloads**, click the **StatefulSets** tab, and you can see the StatefulSets are up and running. + +2. Click a single StatefulSet to go to its detail page. You can see the metrics in line charts over a period of time under the **Monitoring** tab. + +3. In **Pods** under **Application Workloads**, you can see all the Pods are up and running. + +4. In **Persistent Volume Claims** under **Storage**, you can see the ClickHouse Cluster components are using persistent volumes. + +5. Usage of the persistent volume is also monitored. Click a persistent volume to go to its detail page. + +6. On the **Overview** page of the project, you can see a list of resource usage in the current project. + +### Step 5: Access the ClickHouse cluster + +1. Log out of KubeSphere and log back in as `admin`. Hover your cursor over the hammer icon in the lower-right corner, and then select **Kubectl**. + +2. In the window that appears, run the following command and then navigate to the username and password of the ClickHouse cluster. + + ```bash + $ kubectl edit chi clickho-749j8s -n demo-project + ``` + + ![get-username-password](/images/docs/v3.3/appstore/external-apps/deploy-clickhouse/get-username-password.png) + + {{< notice note >}} + + In the above command, `clickho-749j8s` is the ClickHouse application name and `demo-project` is the project name. Make sure you use your own application name and project name. + + {{}} + +3. Run the following command to access the ClickHouse cluster, and then you can use command like `show databases` to interact with it. + + ```bash + $ kubectl exec -it chi-clickho-749j8s-all-nodes-0-0-0 -n demo-project -- clickhouse-client --user=clickhouse --password=c1ickh0use0perator + ``` + + ![use-clickhouse](/images/docs/v3.3/appstore/external-apps/deploy-clickhouse/use-clickhouse.png) + + {{< notice note >}} + + In the above command, `chi-clickho-749j8s-all-nodes-0-0-0` is the Pod name and you can find it in **Pods** under **Application Workloads**. Make sure you use your own Pod name, project name, username, and password. + + {{}} diff --git a/content/en/docs/v3.4/application-store/external-apps/deploy-gitlab.md b/content/en/docs/v3.4/application-store/external-apps/deploy-gitlab.md new file mode 100644 index 000000000..107fae07c --- /dev/null +++ b/content/en/docs/v3.4/application-store/external-apps/deploy-gitlab.md @@ -0,0 +1,119 @@ +--- +title: "Deploy GitLab on KubeSphere" +keywords: 'KubeSphere, Kubernetes, GitLab, App Store' +description: 'Learn how to deploy GitLab on KubeSphere and access its service.' +linkTitle: "Deploy GitLab on KubeSphere" +weight: 14310 +--- + +[GitLab](https://about.gitlab.com/) is an open-source end-to-end software development platform with built-in version control, issue tracking, code review, CI/CD, and more. + +This tutorial demonstrates how to deploy GitLab on KubeSphere. + +## Prerequisites + +- You need to enable [the OpenPitrix system](../../../pluggable-components/app-store/). +- You need to create a workspace, a project, and two accounts (`ws-admin` and `project-regular`) for this tutorial. The account `ws-admin` must be granted the role of `workspace-admin` in the workspace, and the account `project-regular` must be invited to the project with the role of `operator`. If they are not ready, refer to [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Hands-on Lab + +### Step 1: Add an app repository + +1. Log in to KubeSphere as `ws-admin`. In your workspace, go to **App Repositories** under **App Management**, and then click **Add**. + +2. In the displayed dialog box, enter `main` for the app repository name and `https://charts.kubesphere.io/main` for the app repository URL. Click **Validate** to verify the URL and you will see a green check mark next to the URL if it is available. Click **OK** to continue. + +3. The repository displays in the list after it is successfully imported to KubeSphere. + +### Step 2: Deploy GitLab + +1. Log out of KubeSphere and log back in as `project-regular`. In your project, go to **Apps** under **Application Workloads** and click **Create**. + +2. In the dialog box that appears, select **From App Template**. + +3. Select `main` from the drop-down list, then click **gitlab**. + +4. On the **App Information** tab and the **Chart Files** tab, you can view the default settings on the console. Click **Install** to continue. + +5. On the **Basic Information** page, you can view the app name, app version, and deployment location. This tutorial uses the version `4.2.3 [13.2.2]`. Click **Next** to continue. + +6. On the **App Settings** page, use the following settings to replace the default ones, and then click **Install**. + + ```yaml + global: + hosts: + domain: demo-project.svc.cluster.local + gitlab-runner: + install: false + gitlab: + webservice: + helmTests: + enabled: false + ``` + + {{< notice note >}} + + `demo-project` refers to the project name where GitLab is deployed. Make sure you use your own project name. + + {{}} + +7. Wait for GitLab to be up and running. + +8. Go to **Workloads**, and you can see all the Deployments and StatefulSets created for GitLab. + + {{< notice note >}} + + It may take a while before all the Deployments and StatefulSets are up and running. + + {{}} + +### Step 3: Get the root user's password + +1. Go to **Secrets** under **Configuration**, enter `gitlab-initial-root-password` in the search box, and then press **Enter** on your keyboard to search the Secret. + +2. Click the Secret to go to its detail page, and then click icon in the upper-right corner to view the password. Make sure you copy it. + +### Step 4: Edit the hosts file + +1. Find the `hosts` file on your local machine. + + {{< notice note >}} + + The path of the `hosts` file is `/etc/hosts` for Linux, or `c:\windows\system32\drivers\etc\hosts` for Windows. + + {{}} + +2. Add the following item into the `hosts` file. + + ``` + 192.168.4.3 gitlab.demo-project.svc.cluster.local + ``` + + {{< notice note >}} + + - `192.168.4.3` and `demo-project` refer to the NodeIP and project name respectively where GitLab is deployed. Make sure you use your own NodeIP and project name. + - You can use any IP address of the nodes in your Kubernetes cluster. + + {{}} + +### Step 5: Access GitLab + +1. Go to **Services** under **Application Workloads**, enter `nginx-ingress-controller` in the search box, and then press **Enter** on your keyboard to search the Service. You can see the Service has been exposed through port `31246`, which you can use to access GitLab. + + {{< notice note >}} + + The port number shown on your console may be different. Make sure you use your own port number. + + {{}} + +2. Access GitLab through `http://gitlab.demo-project.svc.cluster.local:31246` using the root account and its initial password (`root/ojPWrWECLWN0XFJkGs7aAqtitGMJlVfS0fLEDE03P9S0ji34XDoWmxs2MzgZRRWF`). + + ![access-gitlab](/images/docs/v3.3/appstore/external-apps/deploy-gitlab/access_gitlab.png) + + ![gitlab-console](/images/docs/v3.3/appstore/external-apps/deploy-gitlab/gitlab_console.png) + + {{< notice note >}} + + You may need to open the port in your security groups and configure related port forwarding rules depending on where your Kubernetes cluster is deployed. + + {{}} diff --git a/content/en/docs/v3.4/application-store/external-apps/deploy-litmus.md b/content/en/docs/v3.4/application-store/external-apps/deploy-litmus.md new file mode 100644 index 000000000..bd0b69c3c --- /dev/null +++ b/content/en/docs/v3.4/application-store/external-apps/deploy-litmus.md @@ -0,0 +1,140 @@ +--- +title: 'Deploy Litmus on KubeSphere' +tag: 'KubeSphere, Kubernetes, Applications, Chaos engineering, Chaos experiments, Litmus' +keywords: 'Litmus,LitmusChaos,Kubernetes,Helm,KubeSphere' +description: 'Learn how to deploy Litmus on KubeSphere and create chaos experiments.' +linkTitle: "Deploy Litmus on KubeSphere" +Weight: 14350 +--- + +[Litmus](https://litmuschaos.io/) is an open-source, cloud-native chaos engineering toolkit that focuses on simulating failure tests on Kubernetes clusters. It helps developers and SREs find vulnerabilities in clusters and programs, therefore improving the robustness of the system. + +The Litmus architecture is as follows. For details about the Litmus architecture, see [Litmus Docs](https://litmusdocs-beta.netlify.app/docs/architecture/). + +![](https://pek3b.qingstor.com/kubesphere-community/images/20210601004600.png) + +This tutorial demonstrates how to deploy Litmus on KubeSphere and create chaos experiments. + +## Prerequisites +- You need to enable the [KubeSphere App Store (OpenPitrix)](https://v3-1.docs.kubesphere.io/docs/pluggable-components/app-store/). +- You need to create a workspace, a project, and two accounts (`ws-admin` and `project-regular`). For more information, see [Create Workspaces, Projects, Accounts, and Roles](https://v3-1.docs.kubesphere.io/docs/quick-start/create-workspace-and-project/). + +## Hands-on Lab + +### Step 1: Add an app repository +1. In your workspace, go to **App Repositories** under **App Management**, and then click **Add**. + +2. In the dialog that appears, set a name for the repository (for example, `litmus`) and enter the URL `https://litmuschaos.github.io/litmus-helm/`. Click **Validate** to verify the URL. Click **OK** to continue. + +3. The app repository displays in the list after it is successfully imported. + +### Step 2: Deploy the Litmus portal +1. Log out of the KubeSphere console and log back in as `project-regular`. In your project, go to **Apps** under **Application Workloads**, and then click **Create**. + +2. In the dialog that appears, choose **From App Template**. + + - **From App Store**: Select apps from the official APP Store of Kubephere. + + - **From App Template**: Select apps from workspace app templates and the third-party Helm app templates of App Repository. + +3. In the drop-down list, choose `litmus`, and then choose `litmus-2-0-0-beta`. + +4. You can view the app information and chart files. Under **Versions**, select a specific version and click **Install**. + +5. Under **Basic Information**, set a name for the app. Check the app version and the deployment location, and then click **Next**. + +6. Under **App Settings**, you can edit the yaml file or directly click **Install**. + +7. The app displays in the list after you create it successfully. + + {{< notice note>}} + + It may take a while before Litmus is running. Please wait for the deployment to finish. + + {{}} + +### Step 3: Access Litmus portal + +1. Go to **Services** under **Application Workloads**, copy the `NodePort` of `litmusportal-frontend-service`. + +2. You can access Litmus `Portal` through `${NodeIP}:${NODEPORT}` using the default username and password (`admin`/`litmus`). + + ![litmus-login-page](/images/docs/v3.3/appstore/external-apps/deploy-litmus/litmus-login-page.png) + + ![litmus-login-1](/images/docs/v3.3/appstore/external-apps/deploy-litmus/litmus-login-1.png) + + {{< notice note >}} + You may need to open the port in your security groups and configure port forwarding rules depending on where your Kubernetes cluster is deployed. Make sure you use your own `NodeIP`. + {{}} + +### Step 4: Deploy Agent (optional) + +The Litmus `Agent` can be classified into `Self-Agent` and `External-Agent`. By default, the cluster where Litmus is installed is automatically registered as `Self-Agent`, and `Portal` defaults to run chaos experiments in `Self-Agent`. + +![](https://pek3b.qingstor.com/kubesphere-community/images/20210604162858.png) + +For details about how to deploy External Agent, see [Litmus Docs](https://litmusdocs-beta.netlify.app/docs/agent-install). + +### Step 5: Create chaos experiments + +- **Experiment 1** + +1. You need to create a test application on your cluster. For example, you can deploy `nginx` using the following command: + + ```bash + $ kubectl create deployment nginx --image=nginx --replicas=2 --namespace=default + ``` + +2. Log in to Litmus `Portal`, and then click **Schedule workflow**. + +3. Choose an `Agent` (for example, `Self-Agent`), and then click **Next**. + +4. Choose **Create a new workflow using the experiments from MyHub**. In the drop-down list, select **Chaos Hub**, and then click **Next**. + +5. Set a name for the workflow (for example, `pod-delete`). You can also add a description for the workflow. Click **Next**. + +6. Click **Add a new experiment** to add a chaos experiment to the workflow. Click **Next**. + +7. Select an experiment (for example, `generic/pod-delete`), and then click **Done**. + +8. Select a point to adjust the weight of the experiment in the workflow. Click **Next**. + +9. Choose **Schedule now**, and then click **Next**. + +10. Verify the workflow, and then click **Finish**. + + On the KubeSphere console, you can see that a Pod is being deleted and recreated. + + On the Litmus `Portal`, you can see that the experiment is successful. + + ![litmus-successful](/images/docs/v3.3/appstore/external-apps/deploy-litmus/litmus-successful.png) + + You can click a specific workflow node to view its detailed logs. + + ![](https://pek3b.qingstor.com/kubesphere-community/images/20210604165915.png) + + +- **Experiment 2** + +1. Perform steps 1 to 10 in **Experiment 1** to create a new chaos experiment (`pod-cpu-hog`). + ![](https://pek3b.qingstor.com/kubesphere-community/images/20210604171414.png) + +2. On the KubeSphere console, you can see that the pod CPU usage is close to 1 core. + +- **Experiment 3** + +1. Set the `nginx` replica to `1`. You can see there is only one pod left and view the Pod IP address. + +2. Perform steps 1 to 10 in **Experiment 1** to create a new chaos experiment (`pod-network-loss`). + + ![](https://pek3b.qingstor.com/kubesphere-community/images/20210604174057.png) + + You can ping the Pod IP address to test the packet loss rate. + + ![packet-loss-rate](/images/docs/v3.3/appstore/external-apps/deploy-litmus/packet-loss-rate.png) + + {{< notice note >}} + + The preceding experiments are conducted on Pods. You can also experiment on nodes and other Kubernetes components. For details about the chaos experiments, see [Litmus ChaosHub](https://hub.litmuschaos.io/). + + {{}} diff --git a/content/en/docs/v3.4/application-store/external-apps/deploy-metersphere.md b/content/en/docs/v3.4/application-store/external-apps/deploy-metersphere.md new file mode 100644 index 000000000..1c5385df0 --- /dev/null +++ b/content/en/docs/v3.4/application-store/external-apps/deploy-metersphere.md @@ -0,0 +1,64 @@ +--- +title: "Deploy MeterSphere on KubeSphere" +keywords: 'KubeSphere, Kubernetes, Applications, MeterSphere' +description: 'Learn how to deploy MeterSphere on KubeSphere.' +linkTitle: "Deploy MeterSphere on KubeSphere" +weight: 14330 +--- + +MeterSphere is an open-source, one-stop, and enterprise-level continuous testing platform. It features test tracking, interface testing, and performance testing. + +This tutorial demonstrates how to deploy MeterSphere on KubeSphere. + +## Prerequisites + +- You need to enable [the OpenPitrix system](../../../pluggable-components/app-store/). +- You need to create a workspace, a project, and two user accounts (`ws-admin` and `project-regular`) for this tutorial. The account `ws-admin` must be granted the role of `workspace-admin` in the workspace, and the account `project-regular` must be invited to the project with the role of `operator`. If they are not ready, refer to [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Hands-on Lab + +### Step 1: Add an app repository + +1. Log in to KubeSphere as `ws-admin`. In your workspace, go to **App Repositories** under **App Management**, and then click **Add**. + +2. In the dialog that appears, enter `metersphere` for the app repository name and `https://charts.kubesphere.io/test` for the MeterSphere repository URL. Click **Validate** to verify the URL and you will see a green check mark next to the URL if it is available. Click **OK** to continue. + +3. Your repository displays in the list after it is successfully imported to KubeSphere. + +### Step 2: Deploy MeterSphere + +1. Log out of KubeSphere and log back in as `project-regular`. In your project, go to **Apps** under **Application Workloads** and click **Create**. + +2. In the dialog that appears, select **From App Template**. + +3. Select `metersphere` from the drop-down list, then click **metersphere-chart**. + +4. On the **App Information** tab and the **Chart Files** tab, you can view the default configuration from the console. Click **Install** to continue. + +5. On the **Basic Information** page, you can view the app name, app version, and deployment location. Click **Next** to continue. + +6. On the **App Settings** page, change the value of `imageTag` from `master` to `v1.6`, and then click **Install**. + +7. Wait for MeterSphere to be up and running. + +8. Go to **Workloads**, and you can see two Deployments and three StatefulSets created for MeterSphere. + + {{< notice note >}} + + It may take a while before all the Deployments and StatefulSets are up and running. + + {{}} + +### Step 3: Access MeterSphere + +1. Go to **Services** under **Application Workloads**, and you can see the MeterSphere Service and its type is set to `NodePort` by default. + +2. You can access MeterSphere through `:` using the default account and password (`admin/metersphere`). + + ![login-metersphere](/images/docs/v3.3/appstore/external-apps/deploy-metersphere/login-metersphere.PNG) + + {{< notice note >}} + + You may need to open the port in your security groups and configure related port forwarding rules depending on where your Kubernetes cluster is deployed. Make sure you use your own `NodeIP`. + + {{}} diff --git a/content/en/docs/v3.4/application-store/external-apps/deploy-radondb-mysql.md b/content/en/docs/v3.4/application-store/external-apps/deploy-radondb-mysql.md new file mode 100644 index 000000000..3896bc8f7 --- /dev/null +++ b/content/en/docs/v3.4/application-store/external-apps/deploy-radondb-mysql.md @@ -0,0 +1,160 @@ +--- +title: "Deploy RadonDB MySQL Operator and RadonDB MySQL Cluster on KubeSphere" +keywords: 'KubeSphere, Kubernetes, Deploy, RadonDB MySQL' +description: 'Learn how to deploy RadonDB MySQL Operator and RadonDB MySQL Cluster on KubeSphere.' +linkTitle: "Deploy RadonDB MySQL Operator and Cluster" +weight: 14350 +--- + +[RadonDB MySQL](https://github.com/radondb/radondb-mysql-kubernetes) is an open source, cloud-native, and highly available cluster solution based on [MySQL](https://MySQL.org) database. With the Raft protocol, RadonDB MySQL enables fast failover without losing any transactions. + +This tutorial demonstrates how to deploy RadonDB MySQL Operator and a RadonDB MySQL Cluster on KubeSphere. + +## Prerequisites + +- You need to enable [the OpenPitrix system](../../../pluggable-components/app-store/). +- You need to create a workspace, a project, and a user for this tutorial. In this tutorial, you log in as `admin` and work in the project `demo-project` in the workspace `demo`. If they are not ready, refer to [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). +- You need to enable the gateway in your project to provide external access. If they are not ready, refer to [Project Gateway](../../../project-administration/project-gateway/). + +## Hands-on Lab + +### Step 1: Add an app repository + +1. Log in to the KubeSphere Web console. + +2. In `demo` workspace, go to **App Repositories** under **App Management**, and then click **Create**. + +3. In the dialog that appears, enter an app repository name and URL. + + Enter `radondb-mysql-operator` for the app repository name 。 + Enter `https://radondb.github.io/radondb-mysql-kubernetes/` for the MeterSphere repository URL. Click **Validate** to verify the URL. + +4. You will see a green check mark next to the URL if it is available. Click **OK** to continue. + + Your repository displays in the list after it is successfully imported to KubeSphere. + +![certify URL](/images/docs/v3.3/appstore/external-apps/deploy-radondb-mysql/certify_url.png) + +### Step 2: Deploy RadonDB MySQL Operator + +1. In `demo-project`, go to **Apps** under **Application Workloads** and click **Deploy New App**. + +2. In the dialog that appears, select **From App Template**. + +3. On the new page that appears, select `radondb-mysql-operator` from the drop-down list. + +4. Click **clickhouse-cluster**, check and config RadonDB MySQL Operator. + + On the **Chart Files** tab, you can view the configuration and edit the `.yaml` files. + On the **Version** list, you can view the app versions and select a version. + + ![operator configuration](/images/docs/v3.3/appstore/external-apps/deploy-radondb-mysql/operator_yaml.png) + +5. Click **Deploy**, go to the **Basic Information** page. + + Confirm the app name, app version, and deployment location. + +6. Click **Next** to continue, go to the **App Configuration** page. + + You can change the YAML file to customize settings. + +7. Click **Deploy** to use the default settings. + + After a while, you can see the app is in the **Running** status. + +### Step 3: Deploy a RadonDB MySQL cluster + +You can refer to [RadonDB MySQL template](https://github.com/radondb/radondb-mysql-kubernetes/tree/main/config/samples) to deploy a cluster, or you can customize the yaml file to deploy a cluster. + +Take `mysql_v1alpha1_mysqlcluster.yaml` template as an example to create a RadonDB MySQL cluster. + +1. Hover your cursor over the hammer icon in the lower-right corner, and then select **Kubectl**. + +2. Run the following command to install RadonDB MySQL cluster. + + ```kubectl + kubectl apply -f https://github.com/radondb/radondb-mysql-kubernetes/releases/latest/download/mysql_v1alpha1_mysqlcluster.yaml --namespace= + ``` + + {{< notice note >}} + + When no project is specified, the cluster will be installed in the `kubesphere-controls-system` project by default. To specify a project, the install command needs to add the `--namespace=` field. + + {{}} + + You can see the expected output as below if the installation is successful. + + ```powershell + $ kubectl apply -f https://github.com/radondb/radondb-mysql-kubernetes/releases/latest/download/mysql_v1alpha1_mysqlcluster.yaml --namespace=demo-project + mysqlcluster.mysql.radondb.com/sample created + ``` + +3. You can run the following command to view all services of RadonDB MySQL cluster. + + ```kubectl + kubectl get statefulset,svc + ``` + + **Expected output** + + ```powershell + $ kubectl get statefulset,svc + NAME READY AGE + statefulset.apps/sample-mysql 3/3 10m + + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + service/default-http-backend ClusterIP 10.96.69.202 80/TCP 3h2m + service/sample-follower ClusterIP 10.96.9.162 3306/TCP 10m + service/sample-leader ClusterIP 10.96.255.188 3306/TCP 10m + service/sample-mysql ClusterIP None 3306/TCP 10m + ``` + +### Step 4: View RadonDB MySQL cluster status + +1. In `demo-project` project,go to **Services** under **Application Workloads**, you can see the information of services. + +2. In **Workloads** under **Application Workloads**, click the **StatefulSets** tab, and you can see the StatefulSets are up and running. + + Click a single StatefulSet to go to its detail page. You can see the metrics in line charts over a period of time under the **Monitoring** tab. + +3. In **Pods** under **Application Workloads**, you can see all the Pods are up and running. + +4. In **Persistent Volume Claims** under **Storage**, you can see the ClickHouse Cluster components are using persistent volumes. + + Usage of the persistent volume is also monitored. Click a persistent volume to go to its detail page. + +### Step 5: Access the RadonDB MySQL cluster + +The following demonstrates how to access RadonDB MySQL in KubeSphere Web console. To access RadonDB MySQL from outside the cluster, please refer to [RadonDB MySQL open source project](https://github.com/radondb/radondb-mysql-kubernetes/). + +**Method 1** + +Go to the `demo-project` project management page, access RadonDB MySQL through the terminal. + +1. Go to **Pods** under **Application Workloads**. + +2. Click a pod name to go to the pod management page. + +3. Under the **Container** column box in **Resource Status**, click the terminal icon for the **mysql** container. + +4. In terminal window, run the following command to access the RadonDB MySQL cluster. + +![Access RadonDB MySQL](/images/docs/v3.3/appstore/external-apps/deploy-radondb-mysql/pod_terminal.png) + +**Method 2** + +Hover your cursor over the hammer icon in the lower-right corner, and then select **Kubectl**. + +Run the following command to access the RadonDB MySQL cluster. + +```kubectl +kubectl exec -it -c mysql -n -- mysql --user= --password= +``` + +{{< notice note >}} + +In the blow command, `sample-mysql-0` is the Pod name and `demo-project` is the Project name. Make sure you use your own Pod name, project name, username, and password. + +{{}} + +![Access RadonDB MySQL](/images/docs/v3.3/appstore/external-apps/deploy-radondb-mysql/kubectl_terminal.png) diff --git a/content/en/docs/v3.4/application-store/external-apps/deploy-tidb.md b/content/en/docs/v3.4/application-store/external-apps/deploy-tidb.md new file mode 100644 index 000000000..b331b32a6 --- /dev/null +++ b/content/en/docs/v3.4/application-store/external-apps/deploy-tidb.md @@ -0,0 +1,142 @@ +--- +title: "Deploy TiDB Operator and a TiDB Cluster on KubeSphere" +keywords: 'KubeSphere, Kubernetes, TiDB, TiDB Operator, TiDB Cluster' +description: 'Learn how to deploy TiDB Operator and a TiDB Cluster on KubeSphere.' +linkTitle: "Deploy TiDB Operator and a TiDB Cluster" +weight: 14320 +--- + +[TiDB](https://en.pingcap.com/) is a cloud-native, open-source NewSQL database that supports Hybrid Transactional and Analytical Processing (HTAP) workloads. It features horizontal scalability, strong consistency, and high availability. + +This tutorial demonstrates how to deploy TiDB Operator and a TiDB Cluster on KubeSphere. + +## Prerequisites + +- You need to have at least 3 schedulable nodes. +- You need to enable [the OpenPitrix system](../../../pluggable-components/app-store/). +- You need to create a workspace, a project, and two user accounts (`ws-admin` and `project-regular`) for this tutorial. The account `ws-admin` must be granted the role of `workspace-admin` in the workspace, and the account `project-regular` must be invited to the project with the role of `operator`. If they are not ready, refer to [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Hands-on Lab + +### Step 1: Install TiDB Operator CRD + +1. Log in to the KubeSphere Web console as `admin`, and use **Kubectl** from the **Toolbox** in the bottom-right corner to execute the following command to install TiDB Operator CRD: + + ```bash + kubectl apply -f https://raw.githubusercontent.com/pingcap/tidb-operator/v1.1.6/manifests/crd.yaml + ``` + +2. You can see the expected output as below: + + ```bash + customresourcedefinition.apiextensions.k8s.io/tidbclusters.pingcap.com created + customresourcedefinition.apiextensions.k8s.io/backups.pingcap.com created + customresourcedefinition.apiextensions.k8s.io/restores.pingcap.com created + customresourcedefinition.apiextensions.k8s.io/backupschedules.pingcap.com created + customresourcedefinition.apiextensions.k8s.io/tidbmonitors.pingcap.com created + customresourcedefinition.apiextensions.k8s.io/tidbinitializers.pingcap.com created + customresourcedefinition.apiextensions.k8s.io/tidbclusterautoscalers.pingcap.com created + ``` + +### Step 2: Add an app repository + +1. Log out of KubeSphere and log back in as `ws-admin`. In your workspace, go to **App Repositories** under **App Management**, and then click **Add**. + +2. In the displayed dialog box, enter `pingcap` for the app repository name and `https://charts.pingcap.org` for the PingCAP Helm repository URL. Click **Validate** to verify the URL, and you will see a green check mark next to the URL if it is available. Click **OK** to continue. + +3. Your repository displays in the list after it is successfully imported to KubeSphere. + +### Step 3: Deploy TiDB Operator + +1. Log out of KubeSphere and log back in as `project-regular`. In your project, go to **Apps** under **Application Workloads** and click **Create**. + +2. In the displayed dialog box, select **From App Template**. + +3. Select `pingcap` from the drop-down list, then click **tidb-operator**. + + {{< notice note >}} + + This tutorial only demonstrates how to deploy TiDB Operator and a TiDB cluster. You can also deploy other tools based on your needs. + + {{}} + +4. On the **Chart Files** tab, you can view the configuration on the console directly or download the default `values.yaml` file by clicking the icon in the upper-right corner. Under **Versions**, select a version number from the drop-down list and click **Install**. + +5. On the **Basic Information** page, confirm the app name, app version, and deployment location. Click **Next** to continue. + +6. On the **App Settings** page, you can either edit the `values.yaml` file, or click **Install** directly with the default configurations. + +7. Wait for TiDB Operator to be up and running. + +8. Go to **Workloads**, and you can see two Deployments created for TiDB Operator. + +### Step 4: Deploy a TiDB cluster + +The process of deploying a TiDB cluster is similar to deploying TiDB Operator. + +1. Go to **Apps** under **Application Workloads**, click **Create**, and then select **From App Template**. + +2. From the PingCAP repository, click **tidb-cluster**. + +3. On the **Chart Files** tab, you can view the configuration and download the `values.yaml` file. Click **Install** to continue. + +4. On the **Basic Information** page, confirm the app name, app version, and deployment location. Click **Next** to continue. + +5. Some TiDB components require [storage classes](../../../cluster-administration/storageclass/). You can run the following command to view your storage classes. + + ``` + $ kubectl get sc + NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE + csi-high-capacity-legacy csi-qingcloud Delete Immediate true 71m + csi-high-perf csi-qingcloud Delete Immediate true 71m + csi-ssd-enterprise csi-qingcloud Delete Immediate true 71m + csi-standard (default) csi-qingcloud Delete Immediate true 71m + csi-super-high-perf csi-qingcloud Delete Immediate true 71m + ``` + +6. On the **App Settings** page, change all the default values of the field `storageClassName` from `local-storage` to the name of your storage class. For example, you can change them to `csi-standard` based on the above output. + + {{< notice note >}} + + Only the field `storageClassName` is changed to provide external persistent storage. If you want to deploy each TiDB component, such as [TiKV](https://docs.pingcap.com/tidb/dev/tidb-architecture#tikv-server) and [Placement Driver](https://docs.pingcap.com/tidb/dev/tidb-architecture#placement-driver-pd-server), to individual nodes, specify the field `nodeAffinity`. + + {{}} + +7. Click **Install**, and you can see two apps in the list. + +### Step 5: View TiDB cluster status + +1. Go to **Workloads** under **Application Workloads**, and verify that all TiDB cluster Deployments are up and running. + +2. Switch to the **StatefulSets** tab, and you can see TiDB, TiKV and PD are up and running. + + {{< notice note >}} + + TiKV and TiDB will be created automatically and it may take a while before they display in the list. + + {{}} + +3. Click a single StatefulSet to go to its detail page. You can see the metrics in line charts over a period of time under the **Monitoring** tab. + +4. In **Pods** under **Application Workloads**, you can see the TiDB cluster contains two TiDB Pods, three TiKV Pods, and three PD Pods. + +5. In **Persistent Volume Claims** under **Storage**, you can see TiKV and PD are using persistent volumes. + +6. Volume usage is also monitored. Click a volume item to go to its detail page. + +7. On the **Overview** page of the project, you can see a list of resource usage in the current project. + +### Step 6: Access the TiDB cluster + +1. Go to **Services** under **Application Workloads**, and you can see detailed information of all Services. As the Service type is set to `NodePort` by default, you can access it through the Node IP address outside the cluster. + +2. TiDB integrates Prometheus and Grafana to monitor performance of the database cluster. For example, you can access Grafana through `:` to view metrics. + + ![tidb-grafana](/images/docs/v3.3/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-grafana.PNG) + + {{< notice note >}} + + You may need to open the port in your security groups and configure related port forwarding rules depending on where your Kubernetes cluster is deployed. + + {{}} + diff --git a/content/en/docs/v3.4/cluster-administration/_index.md b/content/en/docs/v3.4/cluster-administration/_index.md new file mode 100644 index 000000000..4d6bfe05c --- /dev/null +++ b/content/en/docs/v3.4/cluster-administration/_index.md @@ -0,0 +1,20 @@ +--- +title: "Cluster Administration" +description: "Understand the basics of administering your clusters." +layout: "second" + +linkTitle: "Cluster Administration" + +weight: 8000 + +icon: "/images/docs/v3.3/docs.svg" + +--- + +In KubeSphere, you set a cluster's configurations and configure its features using the interactive web console or the built-in native command-line tool kubectl. As a cluster administrator, you are responsible for a series of tasks, including cordoning and adding labels to nodes, controlling cluster visibility, monitoring cluster status, setting cluster-wide alerting and notification rules, as well as configuring storage and log collection solutions. + +{{< notice note >}} + +Multi-cluster management is not covered in this chapter. For more information about this feature, see [Multi-cluster Management](../multicluster-management/). + +{{}} diff --git a/content/en/docs/v3.4/cluster-administration/application-resources-monitoring.md b/content/en/docs/v3.4/cluster-administration/application-resources-monitoring.md new file mode 100644 index 000000000..a267ee57d --- /dev/null +++ b/content/en/docs/v3.4/cluster-administration/application-resources-monitoring.md @@ -0,0 +1,30 @@ +--- +title: "Application Resources Monitoring" +keywords: "Kubernetes, KubeSphere, resources, monitoring" +description: "Monitor application resources across the cluster, such as the number of Deployments and CPU usage of different projects." + +linkTitle: "Application Resources Monitoring" +weight: 8300 +--- + +In addition to monitoring data at the physical resource level, cluster administrators also need to keep a close track of application resources across the platform, such as the number of projects and DevOps projects, as well as the number of workloads and services of a specific type. Application resource monitoring provides a summary of resource usage and application-level trends of the platform. + +## Prerequisites + +You need a user granted a role including the authorization of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the authorization and assign it to a user. + +## Resource Usage + +1. Click **Platform** in the upper-left corner and select **Cluster Management**. + +2. If you have enabled the [multi-cluster feature](../../multicluster-management/) with member clusters imported, you can select a specific cluster to view its application resources. If you have not enabled the feature, refer to the next step directly. + +3. Choose **Application Resources** under **Monitoring & Alerting** to see the overview of application resources, including the summary of the usage of all resources in the cluster. + +4. Among them, **Cluster Resource Usage** and **Application Resource Usage** retain the monitoring data of the last 7 days and support custom time range queries. + +5. Click a specific resource to view detailed usage and trends of it during a certain time period, such as **CPU** under **Cluster Resource Usage**. The detail page allows you to view specific monitoring data by project. The highly-interactive dashboard enables users to customize the time range, displaying the exact resource usage at a given time point. + +## Usage Ranking + +**Usage Ranking** supports the sorting of project resource usage, so that platform administrators can understand the resource usage of each project in the current cluster, including **CPU usage**, **memory usage**, **Pod count**, **inbound traffic** and **outbound traffic**. You can sort projects in ascending or descending order by one of the indicators in the drop-down list. This feature is very useful for quickly locating your application (Pod) that is consuming heavy CPU or memory. diff --git a/content/en/docs/v3.4/cluster-administration/cluster-settings/_index.md b/content/en/docs/v3.4/cluster-administration/cluster-settings/_index.md new file mode 100644 index 000000000..0044edd42 --- /dev/null +++ b/content/en/docs/v3.4/cluster-administration/cluster-settings/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Cluster Settings" +weight: 8600 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/cluster-administration/cluster-settings/cluster-gateway.md b/content/en/docs/v3.4/cluster-administration/cluster-settings/cluster-gateway.md new file mode 100644 index 000000000..5497ff55a --- /dev/null +++ b/content/en/docs/v3.4/cluster-administration/cluster-settings/cluster-gateway.md @@ -0,0 +1,82 @@ +--- +title: "Cluster Gateway" +keywords: 'KubeSphere, Kubernetes, Cluster, Gateway, NodePort, LoadBalancer' +description: 'Learn how to create a cluster-scope gateway on KubeSphere.' +linkTitle: "Cluster Gateway" +weight: 8630 +--- + +KubeSphere 3.3 provides cluster-scope gateways to let all projects share a global gateway. This document describes how to set a cluster gateway on KubeSphere. + +## Prerequisites + +You need to prepare a user with the `platform-admin` role, for example, `admin`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Create a Cluster Gateway + +1. Log in to the KubeSphere web console as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. + +2. Go to **Gateway Settings** under **Cluster Settings** from the navigation pane, select the **Cluster Gateway** tab, and click **Enable Gateway**. + +3. In the displayed dialog box, select an access mode for the gateway from the following two options: + + - **NodePort**: Access Services with corresponding node ports through the gateway. The NodePort access mode provides the following configurations: + - **Tracing**: Turn on the **Tracing** toggle to enable the Tracing feature on KubeSphere. Once it is enabled, check whether an annotation (`nginx.ingress.kubernetes.io/service-upstream: true`) is added for your route when the route is inaccessible. If not, add an annotation to your route. + - **Configuration Options**: Add key-value pairs to the cluster gateway. + - **LoadBalancer**: Access Services with a single IP address through the gateway. The LoadBalancer access mode provides the following configurations: + - **Tracing**: Turn on the **Tracing** toggle to enable the Tracing feature on KubeSphere. Once it is enabled, check whether an annotation (`nginx.ingress.kubernetes.io/service-upstream: true`) is added for your route when the route is inaccessible. If not, add an annotation to your route. + - **Load Balancer Provider**: Select a load balancer provider from the drop-down list. + - **Annotations**: Add annotations to the cluster gateway. + - **Configuration Options**: Add key-value pairs to the cluster gateway. + + {{< notice info >}} + + - To use the Tracing feature, turn on **Application Governance** when you create composed applications. + - For more information about how to use configuration options, see [Configuration options](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#configuration-options). + + {{}} + +4. Click **OK** to create the cluster gateway. + +5. The cluster gateway created is displayed and the basic information of the gateway is also shown on the page. + + {{< notice note >}} + + A gateway named `kubesphere-router-kubesphere-system` is also created, which serves as a global gateway for all projects in your cluster. + + {{}} + +6. Click **Manage** to select an operation from the drop-down menu: + + - **View Details**: Go to the details page of the cluster gateway. + - **Edit**: Edit configurations of the cluster gateway. + - **Disable**: Disable the cluster gateway. + +7. After a cluster gateway is created, see [Routes](../../../project-user-guide/application-workloads/routes/#create-a-route) for more information about how to create a route. + +## Cluster Gateway Details Page + +1. Under the **Cluster Gateway** tab, click **Manage** on the right of a cluster gateway and select **View Details** to open its details page. +2. On the details page, click **Edit** to edit configurations of the cluster gateway or click **Disable** to disable the gateway. +3. Click the **Monitoring** tab to view the monitoring metrics of the cluster gateway. +4. Click the **Configuration Options** tab to view configuration options of the cluster gateway. +5. Click the **Gateway Logs** tab to view logs of the cluster gateway. +6. Click the **Resource Status** tab to view workload status of the cluster gateway. Click icon or to scale up or scale down the number of replicas. +7. Click the **Metadata** tab to view annotations of the cluster gateway. + +## View Project Gateways + +On the **Gateway Settings** page, click the **Project Gateway** tab to view project gateways. + +Click icon on the right of a project gateway to select an operation from the drop-down menu: + +- **Edit**: Edit configurations of the project gateway. +- **Disable**: Disable the project gateway. + +{{< notice note >}} + +If a project gateway exists prior to the creation of a cluster gateway, the project gateway address may switch between the address of the cluster gateway and that of the project gateway. It is recommended that you should use either the cluster gateway or project gateway. + +{{}} + +For more information about how to create project gateways, see [Project Gateway](../../../project-administration/project-gateway/). \ No newline at end of file diff --git a/content/en/docs/v3.4/cluster-administration/cluster-settings/cluster-visibility-and-authorization.md b/content/en/docs/v3.4/cluster-administration/cluster-settings/cluster-visibility-and-authorization.md new file mode 100644 index 000000000..e95527e5b --- /dev/null +++ b/content/en/docs/v3.4/cluster-administration/cluster-settings/cluster-visibility-and-authorization.md @@ -0,0 +1,53 @@ +--- +title: "Cluster Visibility and Authorization" +keywords: "Cluster Visibility, Cluster Management" +description: "Learn how to set up cluster visibility and authorization." +linkTitle: "Cluster Visibility and Authorization" +weight: 8610 +--- + +In KubeSphere, you can allocate a cluster to multiple workspaces through authorization so that workspace resources can all run on the cluster. At the same time, a workspace can also be associated with multiple clusters. Workspace users with necessary permissions can create multi-cluster projects using clusters allocated to the workspace. + +This guide demonstrates how to set cluster visibility. + +## Prerequisites +* You need to enable the [multi-cluster feature](../../../multicluster-management/). +* You need to have a workspace and a user that has the permission to create workspaces, such as `ws-manager`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Set Cluster Visibility + +### Select available clusters when you create a workspace + +1. Log in to KubeSphere with a user that has the permission to create a workspace, such as `ws-manager`. + +2. Click **Platform** in the upper-left corner and select **Access Control**. In **Workspaces** from the navigation bar, click **Create**. + +3. Provide the basic information for the workspace and click **Next**. + +4. On the **Cluster Settings** page, you can see a list of available clusters. Select the clusters that you want to allocate to the workspace and click **Create**. + +5. After the workspace is created, workspace members with necessary permissions can create resources that run on the associated cluster. + + {{< notice warning >}} + +Try not to create resources on the host cluster to avoid excessive loads, which can lead to a decrease in the stability across clusters. + +{{}} + +### Set cluster visibility after a workspace is created + +After a workspace is created, you can allocate additional clusters to the workspace through authorization or unbind a cluster from the workspace. Follow the steps below to adjust the visibility of a cluster. + +1. Log in to KubeSphere with a user that has the permission to manage clusters, such as `admin`. + +2. Click **Platform** in the upper-left corner and select **Cluster Management**. Select a cluster from the list to view cluster information. + +3. In **Cluster Settings** from the navigation bar, select **Cluster Visibility**. + +4. You can see the list of authorized workspaces, which means the current cluster is available to resources in all these workspaces. + +5. Click **Edit Visibility** to set the cluster visibility. You can select new workspaces that will be able to use the cluster or unbind it from a workspace. + +### Make a cluster public + +You can check **Set as Public Cluster** so that platform users can access the cluster, in which they are able to create and schedule resources. diff --git a/content/en/docs/v3.4/cluster-administration/cluster-settings/log-collections/_index.md b/content/en/docs/v3.4/cluster-administration/cluster-settings/log-collections/_index.md new file mode 100644 index 000000000..275de2bb0 --- /dev/null +++ b/content/en/docs/v3.4/cluster-administration/cluster-settings/log-collections/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Log Receivers" +weight: 8620 + +_build: + render: false +--- \ No newline at end of file diff --git a/content/en/docs/v3.4/cluster-administration/cluster-settings/log-collections/add-es-as-receiver.md b/content/en/docs/v3.4/cluster-administration/cluster-settings/log-collections/add-es-as-receiver.md new file mode 100644 index 000000000..1b43c5c85 --- /dev/null +++ b/content/en/docs/v3.4/cluster-administration/cluster-settings/log-collections/add-es-as-receiver.md @@ -0,0 +1,35 @@ +--- +title: "Add Elasticsearch as a Receiver" +keywords: 'Kubernetes, log, elasticsearch, pod, container, fluentbit, output' +description: 'Learn how to add Elasticsearch to receive container logs, resource events, or audit logs.' +linkTitle: "Add Elasticsearch as a Receiver" +weight: 8622 +--- +You can use Elasticsearch, Kafka, and Fluentd as log receivers in KubeSphere. This tutorial demonstrates how to add an Elasticsearch receiver. + +## Prerequisites + +- You need a user granted a role including the permission of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the permission and assign it to a user. + +- Before adding a log receiver, you need to enable any of the `logging`, `events` or `auditing` components. For more information, see [Enable Pluggable Components](../../../../pluggable-components/). `logging` is enabled as an example in this tutorial. + +## Add Elasticsearch as a Receiver + +1. Log in to KubeSphere as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. + + {{< notice note >}} + +If you have enabled the [multi-cluster feature](../../../../multicluster-management/), you can select a specific cluster. + +{{}} + +2. On the navigation pane on the left, click **Cluster Settings** > **Log Receivers**. + +3. Click **Add Log Receiver** and choose **Elasticsearch**. + +4. Provide the Elasticsearch service address and port number. + +5. Elasticsearch will appear in the receiver list on the **Log Receivers** page, the status of which is **Collecting**. + +6. To verify whether Elasticsearch is receiving logs sent from Fluent Bit, click **Log Search** in the **Toolbox** in the lower-right corner and search logs on the console. For more information, read [Log Query](../../../../toolbox/log-query/). + diff --git a/content/en/docs/v3.4/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver.md b/content/en/docs/v3.4/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver.md new file mode 100644 index 000000000..b674da974 --- /dev/null +++ b/content/en/docs/v3.4/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver.md @@ -0,0 +1,154 @@ +--- +title: "Add Fluentd as a Receiver" +keywords: 'Kubernetes, log, fluentd, pod, container, fluentbit, output' +description: 'Learn how to add Fluentd to receive logs, events or audit logs.' +linkTitle: "Add Fluentd as a Receiver" +weight: 8624 +--- +You can use Elasticsearch, Kafka and Fluentd as log receivers in KubeSphere. This tutorial demonstrates: + +- How to deploy Fluentd as a Deployment and create the corresponding Service and ConfigMap. +- How to add Fluentd as a log receiver to receive logs sent from Fluent Bit and then output to stdout. +- How to verify if Fluentd receives logs successfully. + +## Prerequisites + +- You need a user granted a role including the permission of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the permission and assign it to a user. + +- Before adding a log receiver, you need to enable any of the `logging`, `events`, or `auditing` components. For more information, see [Enable Pluggable Components](../../../../pluggable-components/). `logging` is enabled as an example in this tutorial. + +## Step 1: Deploy Fluentd as a Deployment + +Usually, Fluentd is deployed as a DaemonSet in Kubernetes to collect container logs on each node. KubeSphere chooses Fluent Bit because of its low memory footprint. Besides, Fluentd features numerous output plugins. Hence, KubeSphere chooses to deploy Fluentd as a Deployment to forward logs it receives from Fluent Bit to more destinations such as S3, MongoDB, Cassandra, MySQL, syslog and Splunk. + +Run the following commands: + +{{< notice note >}} + +- The following commands create the Fluentd Deployment, Service, and ConfigMap in the `default` namespace and add a filter to the Fluentd ConfigMap to exclude logs from the `default` namespace to avoid Fluent Bit and Fluentd loop log collections. +- Change the namespace if you want to deploy Fluentd into a different namespace. + +{{}} + +```yaml +cat < + @type forward + port 24224 + + + # Because this will send logs Fluentd received to stdout, + # to avoid Fluent Bit and Fluentd loop logs collection, + # add a filter here to avoid sending logs from the default namespace to stdout again + + @type grep + + key $.kubernetes.namespace_name + pattern /^default$/ + + + + # Send received logs to stdout for demo/test purpose only + # Various output plugins are supported to output logs to S3, MongoDB, Cassandra, MySQL, syslog, Splunk, etc. + + @type stdout + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: fluentd + name: fluentd + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + app: fluentd + template: + metadata: + labels: + app: fluentd + spec: + containers: + - image: fluentd:v1.9.1-1.0 + imagePullPolicy: IfNotPresent + name: fluentd + ports: + - containerPort: 24224 + name: forward + protocol: TCP + - containerPort: 5140 + name: syslog + protocol: TCP + volumeMounts: + - mountPath: /fluentd/etc + name: config + readOnly: true + volumes: + - configMap: + defaultMode: 420 + name: fluentd-config + name: config +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: fluentd + name: fluentd + namespace: default +spec: + ports: + - name: forward + port: 24224 + protocol: TCP + targetPort: forward + selector: + app: fluentd + sessionAffinity: None + type: ClusterIP +EOF +``` + +## Step 2: Add Fluentd as a Log Receiver + +1. Log in to KubeSphere as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. + + {{< notice note >}} + + If you have enabled the [multi-cluster feature](../../../../multicluster-management/), you can select a specific cluster. + + {{}} + +2. On the **Cluster Management** page, go to **Log Receivers** in **Cluster Settings**. + +3. Click **Add Log Receiver** and choose **Fluentd**. + +4. Provide the Fluentd service address and port number. + +5. Fluentd will appear in the receiver list on the **Log Receivers** page, the status of which is **Collecting**. + + +## Step 3: Verify Fluentd is Receiving Logs Sent from Fluent Bit + +1. Click **Application Workloads** on the **Cluster Management** page. + +2. Select **Workloads** and then select the `default` project on the **Deployments** tab. + +3. Click the **fluentd** item and then select the **fluentd-xxxxxxxxx-xxxxx** Pod. + +4. Click the **fluentd** container. + +5. On the **fluentd** container page, select the **Container Logs** tab. + +6. You can see logs begin to scroll up continuously. \ No newline at end of file diff --git a/content/en/docs/v3.4/cluster-administration/cluster-settings/log-collections/add-kafka-as-receiver.md b/content/en/docs/v3.4/cluster-administration/cluster-settings/log-collections/add-kafka-as-receiver.md new file mode 100644 index 000000000..a4325b5c6 --- /dev/null +++ b/content/en/docs/v3.4/cluster-administration/cluster-settings/log-collections/add-kafka-as-receiver.md @@ -0,0 +1,131 @@ +--- +title: "Add Kafka as a Receiver" +keywords: 'Kubernetes, log, kafka, pod, container, fluentbit, output' +description: 'Learn how to add Kafka to receive container logs, resource events, or audit logs.' +linkTitle: "Add Kafka as a Receiver" +weight: 8623 +--- +You can use Elasticsearch, Kafka and Fluentd as log receivers in KubeSphere. This tutorial demonstrates: + +- Deploy [strimzi-kafka-operator](https://github.com/strimzi/strimzi-kafka-operator) and then create a Kafka cluster and a Kafka topic by creating `Kafka` and `KafkaTopic` CRDs. +- Add Kafka as a log receiver to receive logs sent from Fluent Bit. +- Verify whether the Kafka cluster is receiving logs using [Kafkacat](https://github.com/edenhill/kafkacat). + +## Prerequisites + +- You need a user granted a role including the permission of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the permission and assign it to a user. +- Before adding a log receiver, you need to enable any of the `logging`, `events` or `auditing` components. For more information, see [Enable Pluggable Components](../../../../pluggable-components/). `logging` is enabled as an example in this tutorial. + +## Step 1: Create a Kafka Cluster and a Kafka Topic + +You can use [strimzi-kafka-operator](https://github.com/strimzi/strimzi-kafka-operator) to create a Kafka cluster and a Kafka topic. If you already have a Kafka cluster, you can start from the next step. + +1. Install [strimzi-kafka-operator](https://github.com/strimzi/strimzi-kafka-operator) in the `default` namespace: + + ```bash + helm repo add strimzi https://strimzi.io/charts/ + ``` + + ```bash + helm install --name kafka-operator -n default strimzi/strimzi-kafka-operator + ``` + + +2. Create a Kafka cluster and a Kafka topic in the `default` namespace by running the following commands. The commands create Kafka and Zookeeper clusters with storage type `ephemeral` which is `emptyDir` for demonstration purposes. For other storage types in a production environment, refer to [kafka-persistent](https://github.com/strimzi/strimzi-kafka-operator/blob/0.19.0/examples/kafka/kafka-persistent.yaml). + + ```yaml + cat <}} + + If you have enabled the [multi-cluster feature](../../../../multicluster-management/), you can select a specific cluster. + + {{}} + +2. On the **Cluster Management** page, go to **Log Receivers** in **Cluster Settings**. + +3. Click **Add Log Receiver** and select **Kafka**. Enter the Kafka service address and port number, and then click **OK** to continue. + + | Service Address | Port Number | + | ------------------------------------------------------- | ---- | + | my-cluster-kafka-0.my-cluster-kafka-brokers.default.svc | 9092 | + | my-cluster-kafka-1.my-cluster-kafka-brokers.default.svc | 9092 | + | my-cluster-kafka-2.my-cluster-kafka-brokers.default.svc | 9092 | + +4. Run the following commands to verify whether the Kafka cluster is receiving logs sent from Fluent Bit: + + ```bash + # Start a util container + kubectl run --rm utils -it --generator=run-pod/v1 --image arunvelsriram/utils bash + # Install Kafkacat in the util container + apt-get install kafkacat + # Run the following command to consume log messages from kafka topic: my-topic + kafkacat -C -b my-cluster-kafka-0.my-cluster-kafka-brokers.default.svc:9092,my-cluster-kafka-1.my-cluster-kafka-brokers.default.svc:9092,my-cluster-kafka-2.my-cluster-kafka-brokers.default.svc:9092 -t my-topic + ``` \ No newline at end of file diff --git a/content/en/docs/v3.4/cluster-administration/cluster-settings/log-collections/introduction.md b/content/en/docs/v3.4/cluster-administration/cluster-settings/log-collections/introduction.md new file mode 100644 index 000000000..833e9c790 --- /dev/null +++ b/content/en/docs/v3.4/cluster-administration/cluster-settings/log-collections/introduction.md @@ -0,0 +1,87 @@ +--- +title: "Introduction to Log Receivers" +keywords: 'Kubernetes, log, elasticsearch, kafka, fluentd, pod, container, fluentbit, output' +description: 'Learn the basics of cluster log receivers, including tools, and general steps.' +linkTitle: "Introduction" +weight: 8621 +--- + +KubeSphere provides a flexible log receiver configuration method. Powered by [Fluent Operator](https://github.com/fluent/fluent-operator), users can easily add, modify, delete, enable, or disable Elasticsearch, Kafka and Fluentd receivers. Once a receiver is added, logs will be sent to this receiver. + +This tutorial gives a brief introduction about the general steps of adding log receivers in KubeSphere. + +## Prerequisites + +- You need a user granted a role including the permission of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the permission and assign it to a user. + +- Before adding a log receiver, you need to enable any of the `Logging`, `Events` or `Auditing` components. For more information, see [Enable Pluggable Components](../../../../pluggable-components/). + +## Add a Log Receiver for Container Logs + +To add a log receiver: + +1. Log in to the web console of KubeSphere as `admin`. + +2. Click **Platform** in the upper-left corner and select **Cluster Management**. + + {{< notice note >}} + + If you have enabled the [multi-cluster feature](../../../../multicluster-management/), you can select a specific cluster. + + {{}} + +3. Go to **Log Receivers** under **Cluster Settings** in the sidebar. + +4. On the log receivers list page, click **Add Log Receiver**. + + {{< notice note >}} + +- At most one receiver can be added for each receiver type. +- Different types of receivers can be added simultaneously. + +{{}} + +### Add Elasticsearch as a log receiver + +A default Elasticsearch receiver will be added with its service address set to an Elasticsearch cluster if `logging`, `events`, or `auditing` is enabled in [ClusterConfiguration](https://github.com/kubesphere/kubekey/blob/release-2.2/docs/config-example.md). + +An internal Elasticsearch cluster will be deployed to the Kubernetes cluster if neither `externalElasticsearchHost` nor `externalElasticsearchPort` is specified in [ClusterConfiguration](https://github.com/kubesphere/kubekey/blob/release-2.2/docs/config-example.md) when `logging`, `events`, or `auditing` is enabled. The internal Elasticsearch cluster is for testing and development only. It is recommended that you configure an external Elasticsearch cluster for production. + +Log searching relies on the internal or external Elasticsearch cluster configured. + +If the default Elasticsearch log receiver is deleted, refer to [Add Elasticsearch as a Receiver](../add-es-as-receiver/) to add a new one. + +### Add Kafka as a log receiver + +Kafka is often used to receive logs and serves as a broker to other processing systems like Spark. [Add Kafka as a Receiver](../add-kafka-as-receiver/) demonstrates how to add Kafka to receive Kubernetes logs. + +### Add Fluentd as a log receiver + +If you need to output logs to more places other than Elasticsearch or Kafka, you can add Fluentd as a log receiver. Fluentd has numerous output plugins which can forward logs to various destinations such as S3, MongoDB, Cassandra, MySQL, syslog, and Splunk. [Add Fluentd as a Receiver](../add-fluentd-as-receiver/) demonstrates how to add Fluentd to receive Kubernetes logs. + +## Add a Log Receiver for Resource Events or Audit Logs + +Starting from KubeSphere v3.0.0, resource events and audit logs can be archived in the same way as container logs. The tab **Resource Events** or **Audit Logs** on the **Log Receivers** page will appear if `events` or `auditing` is enabled accordingly in [ClusterConfiguration](https://github.com/kubesphere/kubekey/blob/release-2.2/docs/config-example.md). You can go to the corresponding tab to configure log receivers for resource events or audit logs. + +Container logs, resource events, and audit logs should be stored in different Elasticsearch indices to be searched in KubeSphere. The index is automatically generated in - format. + +## Turn a Log Receiver on or Off + +You can turn a log receiver on or off without adding or deleting it. To turn a log receiver on or off: + +1. On the **Log Receivers** page, click a log receiver and go to the receiver's detail page. +2. Click **More** and select **Change Status**. + +3. Select **Collecting** or **Disabled** to turn the log receiver on or off. + +4. A log receiver's status will be changed to **Disabled** if you turn it off, otherwise the status will be **Collecting** on the **Log Receivers** page. + + +## Edit or Delete a Log Receiver + +You can edit a log receiver or delete it: + +1. On the **Log Receivers** page, click a log receiver and go to the receiver's detail page. +2. Edit a log receiver by clicking **Edit** or **Edit YAML** from the drop-down list. + +3. Delete a log receiver by clicking **Delete**. diff --git a/content/en/docs/v3.4/cluster-administration/cluster-status-monitoring.md b/content/en/docs/v3.4/cluster-administration/cluster-status-monitoring.md new file mode 100644 index 000000000..b01a541c1 --- /dev/null +++ b/content/en/docs/v3.4/cluster-administration/cluster-status-monitoring.md @@ -0,0 +1,134 @@ +--- +title: "Cluster Status Monitoring" +keywords: "Kubernetes, KubeSphere, status, monitoring" +description: "Monitor how a cluster is functioning based on different metrics, including physical resources, etcd, and API server." +linkTitle: "Cluster Status Monitoring" +weight: 8200 +--- + +KubeSphere provides monitoring of related metrics such as CPU, memory, network, and disk of the cluster. You can also review historical monitoring data and sort nodes by different indicators based on their usage in **Cluster Status**. + +## Prerequisites + +You need a user granted a role including the authorization of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the authorization and assign it to a user. + +## Cluster Status Monitoring + +1. Click **Platform** in the upper-left corner and select **Cluster Management**. + +2. If you have enabled the [multi-cluster feature](../../multicluster-management/) with member clusters imported, you can select a specific cluster to view its application resources. If you have not enabled the feature, refer to the next step directly. + +3. Choose **Cluster Status** under **Monitoring & Alerting** to see the overview of cluster status monitoring, including **Cluster Node Status**, **Component Status**, **Cluster Resource Usage**, **etcd Monitoring**, and **Service Component Monitoring**. + +### Cluster node status + +1. **Cluster Nodes Status** displays the status of all nodes, separately marking the active ones. You can go to the **Cluster Nodes** page to view the real-time resource usage of all nodes by clicking **Node Online Status**. + +2. In **Cluster Nodes**, click the node name to view usage details in **Running Status**, including **Resource Usage**, **Allocated Resources**, and **Health Status**. + +3. Click the **Monitoring** tab to view how the node is functioning during a certain period based on different metrics, including **CPU Usage**, **Average CPU Load**, **Memory Usage**, **Disk Usage**, **Inode Usage**, **IOPS**, **Disk Throughput**, and **Network Bandwidth**. + + {{< notice tip >}}You can customize the time range from the drop-down list in the upper-right corner to view historical data. +{{}} + +### Component status + +KubeSphere monitors the health status of various service components in the cluster. When a key component malfunctions, the system may become unavailable. The monitoring mechanism of KubeSphere ensures the platform can notify tenants of any occurring issues in case of a component failure, so that they can quickly locate the problem and take corresponding action. + +1. On the **Cluster Status** page, click a component under **Component Status** to view its status. + +2. You can see all the components are listed in this part. Components marked in green are those functioning normally while those marked in orange require special attention as it signals potential issues. + + {{< notice tip >}}Components marked in orange may turn to green after a period of time, the reasons of which may be different, such as image pulling retries or pod recreations. You can click the component to see its service details. +{{}} + +### Cluster resource usage + +**Cluster Resource Usage** displays the information including **CPU Usage**, **Memory Usage**, **Disk Usage**, and **Pods** of all nodes in the cluster. Click the pie chart on the left to switch indicators, which shows the trend during a period in a line chart on the right. + +## Physical Resource Monitoring + +Monitoring data in **Physical Resource Monitoring** help users better observe their physical resources and establish normal standards for resource and cluster performance. KubeSphere allows users to view cluster monitoring data within the last 7 days, including **CPU Usage**, **Memory Usage**, **Average CPU Load (1 minute/5 minutes/15 minutes)**, **Disk Usage**, **Inode Usage**, **Disk Throughput (read/write)**, **IOPS (read/write)**, **Network Bandwidth**, and **Pod Status**. You can customize the time range and time interval to view historical monitoring data of physical resources in KubeSphere. The following sections briefly introduce each monitoring indicator. + +### CPU usage + +CPU usage shows how CPU resources are used in a period. If you notice that the CPU usage of the platform during a certain period soars, you must first locate the process that is occupying CPU resources the most. For example, for Java applications, you may expect a CPU usage spike in the case of memory leaks or infinite loops in the code. + +### Memory usage + +Memory is one of the important components on a machine, serving as a bridge for communications with the CPU. Therefore, the performance of memory has a great impact on the machine. Data loading, thread concurrency and I/O buffering are all dependent on memory when a program is running. The size of available memory determines whether the program can run normally and how it is functioning. Memory usage reflects how memory resources are used within a cluster as a whole, displayed as a percentage of available memory in use at a given moment. + +### Average CPU load + +Average CPU load is the average number of processes in the system in a runnable state and an uninterruptible state per unit time. Namely, it is the average number of active processes. Note that there is no direct relation between the average CPU load and the CPU usage. Ideally, the average load should be equal to the number of CPUs. Therefore, you need to consider the number of CPUs when you look into the average load. A system is overloaded only when the average load is greater than the number of CPUs. + +KubeSphere provides users with three different time periods to view the average load: 1 minute, 5 minutes, and 15 minutes. Normally, it is recommended that you review all of them to gain a comprehensive understanding of average CPU load: + +- If the curves of 1 minute / 5 minutes / 15 minutes are similar within a certain period, it indicates that the CPU load of the cluster is relatively stable. +- If the value of 1 minute in a certain period, or at a specific time point is much greater than that of 15 minutes, it means that the load in the last 1 minute is increasing, and you need to keep observing. Once the value of 1 minute exceeds the number of CPUs, it may mean that the system is overloaded. You need to further analyze the source of the problem. +- Conversely, if the value of 1 minute in a certain period, or at a specific time point is much less than that of 15 minutes, it means that the load of the system is decreasing in the last 1 minute, and a high load has been generated in the previous 15 minutes. + +### Disk usage + +KubeSphere workloads such as `StatefulSets` and `DaemonSets` all rely on persistent volumes. Some components and services also require a persistent volume. Such backend storage relies on disks, such as block storage or network shared storage. In this connection, providing a real-time monitoring environment for disk usage is an important part of maintaining the high reliability of data. + +In the daily management of the Linux system, platform administrators may encounter data loss or even system crashes due to insufficient disk space. As an essential part of cluster management, they need to pay close attention to the disk usage of the system and ensure that the file system is not filling up or abused. By monitoring the historical data of disk usage, you can evaluate how disks are used during a given period of time. In the case of high disk usage, you can free up disk space by cleaning up unnecessary images or containers. + +### Inode usage + +Each file must have an inode, which is used to store the file's meta-information, such as the file's creator and creation date. The inode will also consume hard disk space, and many small cache files can easily lead to the exhaustion of inode resources. Also, the inode may be used up, but the hard disk is not full. In this case, new files cannot be created on the hard disk. + +In KubeSphere, the monitoring of inode usage can help you detect such situations in advance, as you can have a clear view of cluster inode usage. The mechanism prompts users to clean up temporary files in time, preventing the cluster from being unable to work due to inode exhaustion. + +### Disk throughput + +The monitoring of disk throughput and IOPS is an indispensable part of disk monitoring, which is convenient for cluster administrators to adjust data layout and other management activities to optimize the overall performance of the cluster. Disk throughput refers to the speed of the disk transmission data stream (shown in MB/s), and the transmission data are the sum of data reading and writing. When large blocks of discontinuous data are being transmitted, this indicator is of great importance for reference. + +### IOPS + +**IOPS (Input/Output Operations Per Second)** represents a performance measurement of the number of read and write operations per second. Specifically, the IOPS of a disk is the sum of the number of continuous reads and writes per second. This indicator is of great significance for reference when small blocks of discontinuous data are being transmitted. + +### Network bandwidth + +The network bandwidth is the ability of the network card to receive or send data per second, shown in Mbps (megabits per second). + +### Pod status + +Pod status displays the total number of pods in different states, including **Running**, **Completed** and **Warning**. The pod tagged **Completed** usually refers to a Job or a CronJob. The number of pods marked **Warning**, which means an abnormal state, requires special attention. + +## etcd Monitoring + +etcd monitoring helps you to make better use of etcd, especially to locate performance problems. The etcd service provides metrics interfaces natively, and the KubeSphere monitoring system features a highly graphic and responsive dashboard to display its native data. + +|Indicators|Description| +|---|---| +|Service Status | - **Leader exists** indicates whether the member has a Leader. If a member does not have a Leader, it is completely unavailable. If all members in the cluster do not have any Leader, the entire cluster is completely unavailable.
- **Leader changes in 1 h** refers to the number of Leader changes seen by members of the cluster in 1 hour. Frequent Leader changes will significantly affect the performance of etcd. It also shows that the Leader is unstable, possibly due to network connection issues or excessive loads hitting the etcd cluster. | +|DB Size | The size of the underlying database (in MiB) of etcd. The current graph shows the average size of each member database of etcd. | +|Client Traffic|It includes the total traffic sent to the gRPC client and the total traffic received from the gRPC client. For more information about the indicator, see [etcd Network](https://github.com/etcd-io/etcd/blob/v3.2.17/Documentation/metrics.md#network). | +|gRPC Stream Message|The gRPC streaming message receiving rate and sending rate on the server side, which reflects whether large-scale data read and write operations are happening in the cluster. For more information about the indicator, see [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus#counters).| +|WAL Fsync|The latency of WAL calling fsync. A `wal_fsync` is called when etcd persists its log entries to disk before applying them. For more information about the indicator, see [etcd Disk](https://etcd.io/docs/v3.3.12/metrics/#grpc-requests). | +|DB Fsync|The submission delay distribution of the backend calls. When etcd submits its most recent incremental snapshot to disk, a `backend_commit` will be called. Note that high latency of disk operations (long WAL log synchronization time or library synchronization time) usually indicates disk problems, which may cause high request latency or make the cluster unstable. For more information about the indicator, see [etcd Disk](https://etcd.io/docs/v3.3.12/metrics/#grpc-requests). | +|Raft Proposal|- **Proposal Commit Rate** records the rate of consensus proposals committed. If the cluster is healthy, this indicator should increase over time. Several healthy members of an etcd cluster may have different general proposals at the same time. A continuous large lag between a single member and its leader indicates that the member is slow or unhealthy.
- **Proposal Apply Rate** records the total rate of consensus proposals applied. The etcd server applies each committed proposal asynchronously. The difference between the **Proposal Commit Rate** and the **Proposal Apply Rate** should usually be small (only a few thousands even under high loads). If the difference between them continues to rise, it indicates that the etcd server is overloaded. This can happen when using large-scale queries such as heavy range queries or large txn operations.
- **Proposal Failure Rate** records the total rate of failed proposals, usually related to two issues: temporary failures related to leader election or longer downtime due to a loss of quorum in the cluster.
- **Proposal Pending Total** records the current number of pending proposals. An increase in pending proposals indicates high client loads or members unable to submit proposals.
Currently, the data displayed on the dashboard is the average size of etcd members. For more information about these indicators, see [etcd Server](https://etcd.io/docs/v3.3.12/metrics/#server). | + +## API Server Monitoring + +[API Server](https://kubernetes.io/docs/concepts/overview/kubernetes-api/) is the hub for the interaction of all components in a Kubernetes cluster. The following table lists the main indicators monitored for the API Server. + +|Indicators|Description| +|---|---| +|Request Latency|Classified by HTTP request methods, the latency of resource request response in milliseconds.| +|Request per Second|The number of requests accepted by kube-apiserver per second.| + +## Scheduler Monitoring + +[Scheduler](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler/) monitors the Kubernetes API of newly created pods and determines which nodes these new pods run on. It makes this decision based on available data, including the availability of collected resources and the resource requirements of the Pod. Monitoring data for scheduling delays ensures that you can see any delays facing the scheduler. + +|Indicators|Description| +|---|---| +|Attempt Frequency|Include the number of scheduling successes, errors, and failures.| +|Attempt Rate|Include the scheduling rate of successes, errors, and failures.| +|Scheduling latency|End-to-end scheduling delay, which is the sum of scheduling algorithm delay and binding delay| + +## Resource Usage Ranking + +You can sort nodes in ascending and descending order by indicators such as CPU usage, average CPU load, memory usage, disk usage, inode usage, and Pod usage. This enables administrators to quickly find potential problems or identify a node's insufficient resources. diff --git a/content/en/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/_index.md b/content/en/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/_index.md new file mode 100644 index 000000000..2c511afda --- /dev/null +++ b/content/en/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Cluster-wide Alerting and Notification" +weight: 8500 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/alerting-message.md b/content/en/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/alerting-message.md new file mode 100644 index 000000000..fe59103b8 --- /dev/null +++ b/content/en/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/alerting-message.md @@ -0,0 +1,28 @@ +--- +title: "Alerting Messages (Node Level)" +keywords: 'KubeSphere, Kubernetes, node, Alerting, messages' +description: 'Learn how to view alerting messages for nodes.' +linkTitle: "Alerting Messages (Node Level)" +weight: 8540 +--- + +Alerting messages record detailed information of alerts triggered based on the alerting policy defined. This tutorial demonstrates how to view alerting messages at the node level. + +## Prerequisites + +- You have enabled [KubeSphere Alerting](../../../pluggable-components/alerting/). +- You need to create a user (`cluster-admin`) and grant it the `clusters-admin` role. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/#step-4-create-a-role). +- You have created a node-level alerting policy and an alert has been triggered. For more information, refer to [Alerting Policies (Node Level)](../alerting-policy/). + +## View Alerting Messages + +1. Log in to the KubeSphere console as `cluster-admin` and go to **Alerting Messages** under **Monitoring & Alerting**. + +2. On the **Alerting Messages** page, you can see all alerting messages in the list. The first column displays the summary and details you have defined for the alert. To view details of an alerting message, click the name of the alerting policy and then click the **Alerting History** tab on the alerting policy details page. + +3. On the **Alerting History** tab, you can see alert severity, monitoring target, and activation time. + +## View Notifications + +If you also want to receive alert notifications (for example, email and Slack messages), you need to configure [a notification channel](../../../cluster-administration/platform-settings/notification-management/configure-email/) first. + diff --git a/content/en/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy.md b/content/en/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy.md new file mode 100644 index 000000000..33d379370 --- /dev/null +++ b/content/en/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy.md @@ -0,0 +1,69 @@ +--- +title: "Alerting Policies (Node Level)" +keywords: 'KubeSphere, Kubernetes, Node, Alerting, Policy, Notification' +description: 'Learn how to set alerting policies for nodes.' +linkTitle: "Alerting Policies (Node Level)" +weight: 8530 +--- + +KubeSphere provides alerting policies for nodes and workloads. This tutorial demonstrates how to create alerting policies for nodes in a cluster. See [Alerting Policy (Workload Level)](../../../project-user-guide/alerting/alerting-policy/) to learn how to configure alerting policies for workloads. + +KubeSphere also has built-in policies which will trigger alerts if conditions defined for these policies are met. On the **Built-in Policies** tab, you can click a policy to see its details. Note that they cannot be directly deleted or edited on the console. + +## Prerequisites + +- You have enabled [KubeSphere Alerting](../../../pluggable-components/alerting/). +- To receive alert notifications, you must configure a [notification channel](../../../cluster-administration/platform-settings/notification-management/configure-email/) beforehand. +- You need to create a user (`cluster-admin`) and grant it the `clusters-admin` role. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/#step-4-create-a-role). +- You have workloads in your cluster. If they are not ready, see [Deploy and Access Bookinfo](../../../quick-start/deploy-bookinfo-to-k8s/) to create a sample app. + +## Create an Alerting Policy + +1. Log in to the console as `cluster-admin`. Click **Platform** in the upper-left corner, and then click **Cluster Management**. + +2. Go to **Alerting Policies** under **Monitoring & Alerting**, and then click **Create**. + +3. In the displayed dialog box, provide the basic information as follows. Click **Next** to continue. + + - **Name**. A concise and clear name as its unique identifier, such as `node-alert`. + - **Alias**. Help you distinguish alerting policies better. + - **Threshold Duration (min)**. The status of the alerting policy becomes Firing when the duration of the condition configured in the alerting rule reaches the threshold. + - **Severity**. Allowed values include **Warning**, **Error** and **Critical**, providing an indication of how serious an alert is. + - **Description**. A brief introduction to the alerting policy. + +4. On the **Rule Settings** tab, you can use the rule template or create a custom rule. To use the template, set the following parameters and click **Next** to continue. + + - **Monitoring Targets**. Select at lease a node in your cluster for monitoring. + - **Alerting Rule**. Define a rule for the alerting policy. The rules provided in the drop-down list are based on Prometheus expressions and an alert will be triggered when conditions are met. You can monitor objects such as CPU, and memory. + + {{< notice note >}} + + You can create a custom rule with PromQL by entering an expression in the **Monitoring Metrics** field (autocompletion supported). For more information, see [Querying Prometheus](https://prometheus.io/docs/prometheus/latest/querying/basics/). + + {{}} + +5. On the **Message Settings** tab, enter the summary and details of the alerting message, then click **Create**. + +6. An alerting policy will be **Inactive** when just created. If conditions in the rule expression are met, it will reach **Pending** first, and then turn to **Firing** if conditions keep to be met in the given time range. + +## Edit an Alerting Policy + +To edit an alerting policy after it is created, on the **Alerting Policies** page, click icon on the right of the alerting policy. + +1. Click **Edit** from the drop-down list and edit the alerting policy following the same steps as you create it. Click **OK** on the **Message Settings** page to save it. + +2. Click **Delete** from the drop-down list to delete an alerting policy. + +## View an Alerting Policy + +Click the name of an alerting policy on the **Alerting Policies** page to see its detail information, including the alerting rule and alerting history. You can also see the rule expression which is based on the template you use when creating the alerting policy. + +Under **Monitoring**, the **Alert Monitoring** chart shows the actual usage or amount of resources over time. **Alerting Message** displays the customized message you set in notifications. + +{{< notice note >}} + +You can click icon in the upper-right corner to select or custom a time range for the alert monitoring chart. + +You can also click icon in the upper-right corner to manually refresh the alert monitoring chart. + +{{}} diff --git a/content/en/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/alertmanager.md b/content/en/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/alertmanager.md new file mode 100644 index 000000000..58e624f3c --- /dev/null +++ b/content/en/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/alertmanager.md @@ -0,0 +1,37 @@ +--- +title: "Manage Alerts with Alertmanager in KubeSphere" +keywords: 'Kubernetes, Prometheus, Alertmanager, alerting' +description: 'Learn how to manage alerts with Alertmanager in KubeSphere.' +linkTitle: "Alertmanager in KubeSphere" +weight: 8510 +--- + +Alertmanager handles alerts sent by client applications such as the Prometheus server. It takes care of deduplicating, grouping, and routing them to the correct receiver integration such as email, PagerDuty, or OpsGenie. It also takes care of silencing and inhibition of alerts. For more details, refer to the [Alertmanager guide](https://prometheus.io/docs/alerting/latest/alertmanager/). + +KubeSphere has been using Prometheus as its monitoring service's backend from the first release. Starting from v3.0, KubeSphere adds Alertmanager to its monitoring stack to manage alerts sent from Prometheus as well as other components such as [kube-events](https://github.com/kubesphere/kube-events) and kube-auditing. + +![alertmanager-kubesphere](/images/docs/v3.3/cluster-administration/cluster-wide-alerting-and-notification/alertmanager-in-kubesphere/alertmanager@kubesphere.png) + +## Use Alertmanager to Manage Prometheus Alerts + +Alerting with Prometheus is separated into two parts. Alerting rules in Prometheus servers send alerts to an Alertmanager. The Alertmanager then manages those alerts, including silencing, inhibition, aggregation and sending out notifications via methods such as emails, on-call notification systems, and chat platforms. + +Starting from v3.0, KubeSphere adds popular alert rules in the open source community to its Prometheus offering as built-in alert rules. And by default Prometheus in KubeSphere v3.0 evaluates these built-in alert rules continuously and then sends alerts to Alertmanager. + +## Use Alertmanager to Manage Kubernetes Event Alerts + +Alertmanager can be used to manage alerts sent from sources other than Prometheus. In KubeSphere v3.0 and above, users can use it to manage alerts triggered by Kubernetes events. For more details, refer to [kube-events](https://github.com/kubesphere/kube-events). + +## Use Alertmanager to Manage KubeSphere Auditing Alerts + +In KubeSphere v3.0 and above, users can also use Alertmanager to manage alerts triggered by Kubernetes/KubeSphere auditing events. + +## Receive Notifications for Alertmanager Alerts + +Generally, to receive notifications for Alertmanager alerts, users have to edit Alertmanager's configuration files manually to configure receiver settings such as Email and Slack. + +This is not convenient for Kubernetes users and it breaks the multi-tenant principle/architecture of KubeSphere. More specifically, alerts triggered by workloads in different namespaces, which should have been sent to different tenants, might be sent to the same tenant. + +To use Alertmanager to manage alerts on the platform, KubeSphere offers [Notification Manager](https://github.com/kubesphere/notification-manager), a Kubernetes native notification management tool, which is completely open source. It complies with the multi-tenancy principle, providing user-friendly experiences of Kubernetes notifications. It's installed by default in KubeSphere v3.0 and above. + +For more details about using Notification Manager to receive Alertmanager notifications, refer to [Notification Manager](https://github.com/kubesphere/notification-manager). \ No newline at end of file diff --git a/content/en/docs/v3.4/cluster-administration/nodes.md b/content/en/docs/v3.4/cluster-administration/nodes.md new file mode 100644 index 000000000..19437c55e --- /dev/null +++ b/content/en/docs/v3.4/cluster-administration/nodes.md @@ -0,0 +1,62 @@ +--- +title: "Node Management" +keywords: "Kubernetes, KubeSphere, taints, nodes, labels, requests, limits" +description: "Monitor node status and learn how to add node labels or taints." + +linkTitle: "Node Management" +weight: 8100 +--- + +Kubernetes runs your workloads by placing containers into Pods to run on nodes. A node may be a virtual or physical machine, depending on the cluster. Each node contains the services necessary to run Pods, managed by the control plane. For more information about nodes, see the [official documentation of Kubernetes](https://kubernetes.io/docs/concepts/architecture/nodes/). + +This tutorial demonstrates what a cluster administrator can view and do for nodes within a cluster. + +## Prerequisites + +You need a user granted a role including the authorization of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the authorization and assign it to a user. + +## Node Status + +Cluster nodes are only accessible to cluster administrators. Some node metrics are very important to clusters. Therefore, it is the administrator's responsibility to watch over these numbers and make sure nodes are available. Follow the steps below to view node status. + +1. Click **Platform** in the upper-left corner and select **Cluster Management**. + +2. If you have enabled the [multi-cluster feature](../../multicluster-management/) with member clusters imported, you can select a specific cluster to view its nodes. If you have not enabled the feature, refer to the next step directly. + +3. Choose **Cluster Nodes** under **Nodes**, where you can see detailed information of node status. + + - **Name**: The node name and subnet IP address. + - **Status**: The current status of a node, indicating whether a node is available or not. + - **Role**: The role of a node, indicating whether a node is a worker or the control plane. + - **CPU Usage**: The real-time CPU usage of a node. + - **Memory Usage**: The real-time memory usage of a node. + - **Pods**: The real-time usage of Pods on a node. + - **Allocated CPU**: This metric is calculated based on the total CPU requests of Pods on a node. It represents the amount of CPU reserved for workloads on this node, even if workloads are using fewer CPU resources. This figure is vital to the Kubernetes scheduler (kube-scheduler), which favors nodes with lower allocated CPU resources when scheduling a Pod in most cases. For more details, refer to [Managing Resources for Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). + - **Allocated Memory**: This metric is calculated based on the total memory requests of Pods on a node. It represents the amount of memory reserved for workloads on this node, even if workloads are using fewer memory resources. + + {{< notice note >}} +**CPU** and **Allocated CPU** are different most times, so are **Memory** and **Allocated Memory**, which is normal. As a cluster administrator, you need to focus on both metrics instead of just one. It's always a good practice to set resource requests and limits for each node to match their real usage. Over-allocating resources can lead to low cluster utilization, while under-allocating may result in high pressure on a cluster, leaving the cluster unhealthy. + {{}} + +## Node Management +On the **Cluster Nodes** page, you can perform the following operations: + +- **Cordon/Uncordon**: Click icon on the right of the cluster node, and then click **Cordon** or **Uncordon**. Marking a node as unschedulable is very useful during a node reboot or other maintenance. The Kubernetes scheduler will not schedule new Pods to this node if it's been marked unschedulable. Besides, this does not affect existing workloads already on the node. + +- **Open Terminal**:Click icon on the right of the cluster node, and then click **Open Terminal**. This makes it convenient for you to manage nodes, such as modifying node configurations and downloading images. + +- **Edit Taints**:Taints allow a node to repel a set of pods. To edit a taint, select the check box before the target node. On the **Edit Taints** that is displayed, you can add, delete, or modify taints. + +To view node details, click the node. On the details page, you can perform the following operations: + +- **Edit Labels**: Node labels can be very useful when you want to assign Pods to specific nodes. Label a node first (for example, label GPU nodes with `node-role.kubernetes.io/gpu-node`), and then add the label in **Advanced Settings** [when you create a workload](../../project-user-guide/application-workloads/deployments/#step-5-configure-advanced-settings) so that you can allow Pods to run on GPU nodes explicitly. To add node labels, select **More** > **Edit Labels**. + +- View the running status of nodes, pods, metadata, monitoring data, and events. + + {{< notice note >}} +Be careful when you add taints as they may cause unexpected behavior, leading to services unavailable. For more information, see [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/). + {{}} + +## Add and Remove Nodes + +Currently, you cannot add or remove nodes directly from the KubeSphere console, but you can do it by using [KubeKey](https://github.com/kubesphere/kubekey). For more information, see [Add New Nodes](../../installing-on-linux/cluster-operation/add-new-nodes/) and [Remove Nodes](../../installing-on-linux/cluster-operation/remove-nodes/). diff --git a/content/en/docs/v3.4/cluster-administration/platform-settings/notification-management/_index.md b/content/en/docs/v3.4/cluster-administration/platform-settings/notification-management/_index.md new file mode 100644 index 000000000..4d4e25b09 --- /dev/null +++ b/content/en/docs/v3.4/cluster-administration/platform-settings/notification-management/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Notification Management" +weight: 8720 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-dingtalk.md b/content/en/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-dingtalk.md new file mode 100644 index 000000000..b63db2076 --- /dev/null +++ b/content/en/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-dingtalk.md @@ -0,0 +1,38 @@ +--- +title: "Configure DingTalk Notifications" +keywords: 'KubeSphere, Kubernetes, DingTalk, Alerting, Notification' +description: 'Learn how to configure a Dingtalk conversation or chatbot to receive platform notifications sent by KubeSphere.' +linkTitle: "Configure DingTalk Notifications" +weight: 8723 +--- + +[DingTalk](https://www.dingtalk.com/en) is an enterprise-grade communication and collaboration platform. It integrates messaging, conference calling, task management, and other features into a single application. + +This document describes how to configure a DingTalk conversation or chatbot to receive platform notifications sent by KubeSphere. + +## Prerequisites + +- You need to have a user with the `platform-admin` role, for example, the `admin` user. For more information, see [Create Workspaces, Projects, Users and Roles](../../../../quick-start/create-workspace-and-project/). +- You need to have a DingTalk account. +- You need to create an applet on [DingTalk Admin Panel](https://oa.dingtalk.com/index.htm#/microApp/microAppList) and make necessary configurations according to [DingTalk API documentation](https://developers.dingtalk.com/document/app/create-group-session). + +## Configure DingTalk Conversation or Chatbot + +1. Log in to the KubeSphere console as `admin`. +2. Click **Platform** in the upper-left corner and select **Platform Settings**. +3. In the left navigation pane, click **Notification Configuration** under **Notification Management**. +4. On the **DingTalk** page, select the **Conversation Settings** tab and configure the following parameters: + - **AppKey**: The AppKey of the applet created on DingTalk. + - **AppSecret**: The AppSecret of the applet created on DingTalk. + - **Conversation ID**: The conversation ID obtained on DingTalk. To add a conversation ID, enter your conversation ID and click **Add** to add it. +5. (Optional) On the **DingTalk** page, select the **DingTalk Chatbot** tab and configure the following parameters: + - **Webhook URL**: The webhook URL of your DingTalk robot. + - **Secret**: The secret of your DingTalk robot. + - **Keywords**: The keywords you added to your DingTalk robot. To add a keyword, enter your keyword and click **Add** to add it. +6. To specify notification conditions, select the **Notification Conditions** checkbox. Specify a label, an operator, and values and click **Add** to add it. You will receive only notifications that meet the conditions. +7. After the configurations are complete, click **Send Test Message** to send a test message. +8. If you successfully receive the test message, click **OK** to save the configurations. +9. To enable DingTalk notifications, turn the toggle in the upper-right corner to **Enabled**. + + + diff --git a/content/en/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-email.md b/content/en/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-email.md new file mode 100644 index 000000000..400629d3a --- /dev/null +++ b/content/en/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-email.md @@ -0,0 +1,79 @@ +--- +title: "Configure Email Notifications" +keywords: 'KubeSphere, Kubernetes, custom, platform' +description: 'Configure a email server and add recipients to receive email notifications.' +linkTitle: "Configure Email Notifications" +weight: 8722 +--- + +This tutorial demonstrates how to configure a email server and add recipients to receive email notifications of alerting policies. + +## Configure the Email Server + +1. Log in to the web console with a user granted the role `platform-admin`. + +2. Click **Platform** in the upper-left corner and select **Platform Settings**. + +3. Navigate to **Notification Configuration** under **Notification Management**, and then choose **Email**. + +4. Under **Server Settings**, configure your email server by filling in the following fields. + + - **SMTP Server Address**: The SMTP server address that provides email services. The port is usually `25`. + - **Use SSL Secure Connection**: SSL can be used to encrypt emails, thereby improving the security of information transmitted by email. Usually you have to configure the certificate for the email server. + - **SMTP Username**: The SMTP account. + - **SMTP Password**: The SMTP account password. + - **Sender Email Address**: The sender's email address. + +5. Click **OK**. + +## Recepient Settings + +### Add recipients + +1. Under **Recipient Settings**, enter a recipient's email address and click **Add**. + +2. After it is added, the email address of a recipient will be listed under **Recipient Settings**. You can add up to 50 recipients and all of them will be able to receive email notifications. + +3. To remove a recipient, hover over the email address you want to remove, then click icon. + +### Set notification conditions + +1. Select the checkbox on the left of **Notification Conditions** to set notification conditions. + + - **Label**: Name, severity, or monitoring target of an alerting policy. You can select a label or customize a label. + - **Operator**: Mapping between the label and the values. The operator includes **Includes values**, **Does not include values**, **Exists**, and **Does not exist**. + - **Values**: Values associated with the label. + {{< notice note >}} + + - Operators **Includes values** and **Does not include values** require one or more label values. Use a carriage return to separate values. + - Operators **Exists** and **Does not exist** determine whether a label exists, and do not require a label value. + + {{}} + +2. You can click **Add** to add notification conditions. + +3. You can click icon on the right of a notification condition to delete the condition. + +4. After the configurations are complete, you can click **Send Test Message** for verification. + +5. On the upper-right corner, you can turn on the **Disabled** toggle to enbale notifications, or turn off the **Enabled** toggle to diable them. + + {{< notice note >}} + + - After the notification conditions are set, the recepients will receive only notifications that meet the conditions. + - If you change the existing configuration, you must click **OK** to apply it. + + {{}} + +## Receive Email Notifications + +After you configure the email server and add recipients, you need to enable [KubeSphere Alerting](../../../../pluggable-components/alerting/) and create an alerting policy for workloads or nodes. Once it is triggered, all the recipients can receive email notifications. + +{{< notice note >}} + +- If you update your email server configuration, KubeSphere will send email notifications based on the latest configuration. +- By default, KubeSphere sends notifications for the same alert about every 12 hours. The notification repeat interval is mainly controlled by `repeat_interval` in the Secret `alertmanager-main` in the project `kubesphere-monitoring-system`. You can customize the interval as needed. +- As KubeSphere has built-in alerting policies, if you do not set any customized alerting policies, your recipient can still receive email notifications once a built-in alerting policy is triggered. + +{{}} + diff --git a/content/en/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-slack.md b/content/en/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-slack.md new file mode 100644 index 000000000..3094f65da --- /dev/null +++ b/content/en/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-slack.md @@ -0,0 +1,93 @@ +--- +title: "Configure Slack Notifications" +keywords: 'KubeSphere, Kubernetes, Slack, notifications' +description: 'Configure Slack notifications and add channels to receive notifications from alerting policies, kube-events, and kube-auditing.' +linkTitle: "Configure Slack Notifications" +weight: 8725 +--- + +This tutorial demonstrates how to configure Slack notifications and add channels, which can receive notifications for alerting policies. + +## Prerequisites + +You have an available [Slack](https://slack.com/) workspace. + +## Obtain a Slack OAuth Token + +You need to create a Slack app first so that it can help you send notifications to Slack channels. To authenticate your app, you must create an OAuth token. + +1. Log in to Slack to [create an app](https://api.slack.com/apps). + +2. On the **Your Apps** page, click **Create New App**. + +3. In the dialog that appears, enter your app name and select a Slack workspace for it. Click **Create App** to continue. + +4. From the left navigation bar, select **OAuth & Permissions** under **Features**. On the **Auth & Permissions** page, scroll down to **Scopes** and click **Add an OAuth Scope** under **Bot Token Scopes** and **User Token Scopes** respectively. Select the **chart:write** permission for both scopes. + +5. Scroll up to **OAuth Tokens & Redirect URLs** and click **Install to Workspace**. Grant the permission to access your workspace for the app and you can find created tokens under **OAuth Tokens for Your Team**. + +## Configure Slack Notifications on the KubeSphere Console + +You must provide the Slack token on the console for authentication so that KubeSphere can send notifications to your channel. + +1. Log in to the web console with a user granted the role `platform-admin`. + +2. Click **Platform** in the top-left corner and select **Platform Settings**. + +3. Navigate to **Slack** under **Notification Management**. + +4. For **Slack Token** under **Server Settings**, you can enter either a User OAuth Token or a Bot User OAuth Token for authentication. If you use the User OAuth Token, it is the app owner that will send notifications to your Slack channel. If you use the Bot User OAuth Token, it is the app that will send notifications. + +5. Under **Channel Settings**, enter a Slack channel where you want to receive notifications and click **Add**. + +6. After it is added, the channel will be listed under **Channel List**. You can add up to 20 channels and all of them will be able to receive notifications of alerts. + + {{< notice note >}} + + To remove a channel from the list, click the cross icon next to the channel. + + {{}} + +7. Click **Save**. + +8. Select the checkbox on the left of **Notification Conditions** to set notification conditions. + + - **Label**: Name, severity, or monitoring target of an alerting policy. You can select a label or customize a label. + - **Operator**: Mapping between the label and the values. The operator includes **Includes values**, **Does not include values**, **Exists**, and **Does not exist**. + - **Values**: Values associated with the label. + {{< notice note >}} + + - Operators **Includes values** and **Does not include values** require one or more label values. Use a carriage return to separate values. + - Operators **Exists** and **Does not exist** determine whether a label exists, and do not require a label value. + + {{}} + +9. You can click **Add** to add notification conditions, or click icon on the right of a notification condition to delete the condition. + +10. After the configurations are complete, you can click **Send Test Message** for verification. + +11. To make sure notifications will be sent to a Slack channel, turn on **Receive Notifications** and click **Update**. + + {{< notice note >}} + + - After the notification conditions are set, the recepients will receive only notifications that meet the conditions. + - If you change the existing configuration, you must click **OK** to apply it. + + {{}} + +9. If you want the app to be the notification sender, make sure it is in the channel. To add it in a Slack channel, enter `/invite @` in your channel. + +## Receive Slack Notifications + +After you configure Slack notifications and add channels, you need to enable [KubeSphere Alerting](../../../../pluggable-components/alerting/) and create an alerting policy for workloads or nodes. Once it is triggered, all the channels in the list can receive notifications. + +The image below is a Slack notification example: + +{{< notice note >}} + +- If you update your Slack notification configuration, KubeSphere will send notifications based on the latest configuration. +- By default, KubeSphere sends notifications for the same alert about every 12 hours. The notification repeat interval is mainly controlled by `repeat_interval` in the Secret `alertmanager-main` in the project `kubesphere-monitoring-system`. You can customize the interval as needed. +- As KubeSphere has built-in alerting policies, if you do not set any customized alerting policies, your Slack channel can still receive notifications once a built-in alerting policy is triggered. + +{{}} + diff --git a/content/en/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-webhook.md b/content/en/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-webhook.md new file mode 100644 index 000000000..a5e44a226 --- /dev/null +++ b/content/en/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-webhook.md @@ -0,0 +1,63 @@ +--- +title: "Configure Webhook Notifications" +keywords: 'KubeSphere, Kubernetes, custom, platform, webhook' +description: 'Configure a webhook server to receive platform notifications through the webhook.' +linkTitle: "Configure Webhook Notifications" +weight: 8726 +--- + +A webhook is a way for an app to send notifications triggered by specific events. It delivers information to other applications in real time, allowing users to receive notifications immediately. + +This tutorial describes how to configure a webhook server to receive platform notifications. + +## Prerequisites + +You need to prepare a user granted the `platform-admin` role. For more information, see [Create Workspaces, Projects, Users and Roles](../../../../quick-start/create-workspace-and-project/). + +## Configure the Webhook Server + +1. Log in to the KubeSphere web console as the `platform-admin` user. + +2. Click **Platform** in the upper-left corner and select **Platform Settings**. + +3. In the left nevigation pane, click **Notification Configuration** under **Notification Management**, and select **Webhook**. + +4. On the **Webhook** tab page, set the following parameters: + + - **Webhook URL**: URL of the webhook server. + + - **Verification Type**: Webhook authentication method. + - **No authentication**: Skips authentication. All notifications can be sent to the URL. + - **Bearer token**: Uses a token for authentication. + - **Basic authentication**: Uses a username and password for authentication. + + {{< notice note>}}Currently, KubeSphere does not suppot TLS connections (HTTPS). You need to select **Skip TLS verification (insecure)** if you use an HTTPS URL. + + {{}} + +5. Select the checkbox on the left of **Notification Conditions** to set notification conditions. + + - **Label**: Name, severity, or monitoring target of an alerting policy. You can select a label or customize a label. + - **Operator**: Mapping between the label and the values. The operator includes **Includes values**, **Does not include values**, **Exists**, and **Does not exist**. + - **Values**: Values associated with the label. + {{< notice note >}} + + - Operators **Includes values** and **Does not include values** require one or more label values. Use a carriage return to separate values. + - Operators **Exists** and **Does not exist** determine whether a label exists, and do not require a label value. + + {{}} + +6. You can click **Add** to add notification conditions, or click icon on the right of a notification condition to delete the condition. + +7. After the configurations are complete, you can click **Send Test Message** for verification. + +8. On the upper-right corner, you can turn on the **Disabled** toggle to enbale notifications, or turn off the **Enabled** toggle to diable them. + +9. Click **OK** after you finish. + + {{< notice note >}} + + - After the notification conditions are set, the recepients will receive only notifications that meet the conditions. + - If you change the existing configuration, you must click **OK** to apply it. + + {{}} diff --git a/content/en/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-wecom.md b/content/en/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-wecom.md new file mode 100644 index 000000000..0788a1b0b --- /dev/null +++ b/content/en/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-wecom.md @@ -0,0 +1,33 @@ +--- +title: "Configure WeCom Notifications" +keywords: 'KubeSphere, Kubernetes, WeCom, Alerting, Notification' +description: 'Learn how to configure a WeCom server to receive platform notifications sent by KubeSphere.' +linkTitle: "Configure WeCom Notifications" +weight: 8724 +--- + +[WeCom](https://work.weixin.qq.com/) is a communication platform for enterprises that includes convenient communication and office automation tools. + +This document describes how to configure a WeCom server to receive platform notifications sent by KubeSphere. + +## Prerequisites + +- You need to have a user with the `platform-admin` role, for example, the `admin` user. For more information, see [Create Workspaces, Projects, Users and Roles](../../../../quick-start/create-workspace-and-project/). +- You need to have a [WeCom account](https://work.weixin.qq.com/wework_admin/register_wx?from=myhome). +- You need to create a self-built application on the [WeCom Admin Console](https://work.weixin.qq.com/wework_admin/loginpage_wx) and obtain its AgentId and Secret. + +## Configure WeCom Server + +1. Log in to the KubeSphere console as `admin`. +2. Click **Platform** in the upper-left corner and select **Platform Settings**. +3. In the left navigation pane, click **Notification Configuration** under **Notification Management**. +4. On the **WeCom** page, set the following fields under **Server Settings**: + - **Corporation ID**: The Corporation ID of your WeCom account. + - **App AgentId**: The AgentId of the self-built application. + - **App Secret**: The Secret of the self-built application. +5. To add notification recipients, select **User ID**, **Department ID**, or **Tag ID** under **Recipient Settings**, enter a corresponding ID obtained from your WeCom account, and click **Add** to add it. +6. To specify notification conditions, select the **Notification Conditions** checkbox. Specify a label, an operator, and values and click **Add** to add it. You will receive only notifications that meet the conditions. +7. After the configurations are complete, click **Send Test Message** to send a test message. +8. If you successfully receive the test message, click **OK** to save the configurations. +9. To enable WeCom notifications, turn the toggle in the upper-right corner to **Enabled**. + diff --git a/content/en/docs/v3.4/cluster-administration/platform-settings/notification-management/customize-cluster-name.md b/content/en/docs/v3.4/cluster-administration/platform-settings/notification-management/customize-cluster-name.md new file mode 100644 index 000000000..04c1d60c6 --- /dev/null +++ b/content/en/docs/v3.4/cluster-administration/platform-settings/notification-management/customize-cluster-name.md @@ -0,0 +1,40 @@ +--- +title: "Customize Cluster Name in Notification Messages" +keywords: 'KubeSphere, Kubernetes, Platform, Notification' +description: 'Learn how to customize cluster name in notification messages sent by KubeSphere.' +linkTitle: "Customize Cluster Name in Notification Messages" +weight: 8721 +--- + +This document describes how to customize your cluster name in notification messages sent by KubeSphere. + +## Prerequisites + +You need to have a user with the `platform-admin` role, for example, the `admin` user. For more information, see [Create Workspaces, Projects, Users and Roles](../../../../quick-start/create-workspace-and-project/). + +## Customize Cluster Name in Notification Messages + +1. Log in to the KubeSphere console as `admin`. + +2. Click icon in the lower-right corner and select **Kubectl**. + +3. In the displayed dialog box, run the following command: + + ```bash + kubectl edit nm notification-manager + ``` + +4. Add a field `cluster` under `.spec.receiver.options.global` to customize your cluster name: + + ```yaml + spec: + receivers: + options: + global: + cluster: + ``` + +5. When you finish, save the changes. + + + diff --git a/content/en/docs/v3.4/cluster-administration/shut-down-and-restart-cluster-gracefully.md b/content/en/docs/v3.4/cluster-administration/shut-down-and-restart-cluster-gracefully.md new file mode 100644 index 000000000..78aacb3be --- /dev/null +++ b/content/en/docs/v3.4/cluster-administration/shut-down-and-restart-cluster-gracefully.md @@ -0,0 +1,74 @@ +--- +title: "Cluster Shutdown and Restart" +description: "Learn how to gracefully shut down your cluster and restart it." +layout: "single" + +linkTitle: "Cluster Shutdown and Restart" +weight: 8800 + +icon: "/images/docs/v3.3/docs.svg" +--- +This document describes the process of gracefully shutting down your Kubernetes cluster and how to restart it. You might need to temporarily shut down your cluster for maintenance reasons. + +{{< notice warning >}} +Shutting down a cluster is very dangerous. You must fully understand the operation and its consequences. Please make an etcd backup before you proceed. +Usually, it is recommended to maintain your nodes one by one instead of restarting the whole cluster. +{{}} + +## Prerequisites +- Take an [etcd backup](https://etcd.io/docs/current/op-guide/recovery/#snapshotting-the-keyspace) prior to shutting down a cluster. +- SSH [passwordless login](https://man.openbsd.org/ssh.1#AUTHENTICATION) is set up between hosts. + +## Shut Down a Kubernetes Cluster +{{< notice tip >}} + +- You must back up your etcd data before you shut down the cluster as your cluster can be restored if you encounter any issues when restarting the cluster. +- Using the method in this tutorial can shut down a cluster gracefully, while the possibility of data corruption still exists. + +{{}} + +### Step 1: Get the node list +```bash +nodes=$(kubectl get nodes -o name) +``` +### Step 2: Shut down all nodes +```bash +for node in ${nodes[@]} +do + echo "==== Shut down $node ====" + ssh $node sudo shutdown -h 1 +done +``` +Then you can shut down other cluster dependencies, such as external storage. + +## Restart a Cluster Gracefully +You can restart a cluster gracefully after shutting down the cluster gracefully. + +### Prerequisites +You have shut down your cluster gracefully. + +{{< notice tip >}} +Usually, a cluster can be used after restarting, but the cluster may be unavailable due to unexpected conditions. For example: + +- etcd data corruption during the shutdown. +- Node failures. +- Unexpected network errors. + +{{}} + +### Step 1: Check all cluster dependencies' status +Ensure all cluster dependencies are ready, such as external storage. +### Step 2: Power on cluster machines +Wait for the cluster to be up and running, which may take about 10 minutes. +### Step 3: Check the status of all control plane components +Check the status of core components, such as etcd services, and make sure everything is ready. +```bash +kubectl get nodes -l node-role.kubernetes.io/master +``` + +### Step 4: Check all worker nodes' status +```bash +kubectl get nodes -l node-role.kubernetes.io/worker +``` + +If your cluster fails to restart, please try to [restore the etcd cluster](https://etcd.io/docs/current/op-guide/recovery/#restoring-a-cluster). diff --git a/content/en/docs/v3.4/cluster-administration/snapshotclass.md b/content/en/docs/v3.4/cluster-administration/snapshotclass.md new file mode 100644 index 000000000..3b74948fd --- /dev/null +++ b/content/en/docs/v3.4/cluster-administration/snapshotclass.md @@ -0,0 +1,31 @@ +--- +title: "Volume Snapshot Classes" +keywords: 'KubeSphere, Kubernetes, PVC, PV, Snapshot, Snapshot Classes' +description: 'Learn how to manage snapshot classes on KubeSphere.' +linkTitle: "Volume Snapshot Classes" +weight: 8900 +--- + +Volume snapshot classes provide a way for administrators to define storage types used for volume snapshots. This tutorial describes how to create and use snapshot classes. + +## Prerequisites + +- You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +- You need to install Kubernetes 1.17 or higher. + +- Your underlying storage plugin supports snapshots. + +## Procedures + +1. Log in to the web console of KubeSphere as `project-regular`. On the navigation pane on the left, click **Storage > Volume Snapshot Classes**. + +2. On the **Volume Snapshot Classes** page, click **Create**. + +3. In the displayed **Create Volume Snapshot Class** dialog box, set the name of the volume snapshot, and click **Next**. Also, you can set an alias and add description. + +4. On the **Volume Snapshot Class Settings** tab, select a provisioner and deletion policy, which supports the following types: + + - Delete: The snapshot of the underlying storage will be deleted along with the VolumeSnapshotContent. + - Retain: Both the snapshot of the underlying storage and the VolumeSnapshotContent will be retained. + diff --git a/content/en/docs/v3.4/cluster-administration/storageclass.md b/content/en/docs/v3.4/cluster-administration/storageclass.md new file mode 100644 index 000000000..5ccc1ac37 --- /dev/null +++ b/content/en/docs/v3.4/cluster-administration/storageclass.md @@ -0,0 +1,184 @@ +--- +title: "Storage Classes" +keywords: "Storage, Volume, PV, PVC, storage class, csi, Ceph RBD, GlusterFS, QingCloud" +description: "Learn basic concepts of PVs, PVCs,and storage classes, and demonstrate how to manage storage classes on KubeSphere." +linkTitle: "Storage Classes" +weight: 8800 +--- + +This tutorial demonstrates how a cluster administrator can manage storage classes and persistent volumes in KubeSphere. + +## Introduction + +A Persistent Volume (PV) is a piece of storage in the cluster that has been provisioned by an administrator or dynamically provisioned using storage classes. PVs are volume plugins like volumes, but have a lifecycle independent of any individual Pod that uses the PV. PVs can be provisioned either [statically](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#static) or [dynamically](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#dynamic). + +A Persistent Volume Claim (PVC) is a request for storage by a user. It is similar to a Pod. Pods consume node resources and PVCs consume PV resources. + +KubeSphere supports [dynamic volume provisioning](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/) based on storage classes to create PVs. + +A [storage class](https://kubernetes.io/docs/concepts/storage/storage-classes) provides a way for administrators to describe the classes of storage they offer. Different classes might map to quality-of-service levels, or to backup policies, or to arbitrary policies determined by the cluster administrators. Each storage class has a provisioner that determines what volume plugin is used for provisioning PVs. This field must be specified. For which value to use, please read [the official Kubernetes documentation](https://kubernetes.io/docs/concepts/storage/storage-classes/#provisioner) or check with your storage administrator. + +The table below summarizes common volume plugins for various provisioners (storage systems). + +| Type | Description | +| -------------------- | ------------------------------------------------------------ | +| In-tree | Built-in and run as part of Kubernetes, such as [RBD](https://kubernetes.io/docs/concepts/storage/storage-classes/#ceph-rbd) and [GlusterFS](https://kubernetes.io/docs/concepts/storage/storage-classes/#glusterfs). For more plugins of this kind, see [Provisioner](https://kubernetes.io/docs/concepts/storage/storage-classes/#provisioner). | +| External-provisioner | Deployed independently from Kubernetes, but works like an in-tree plugin, such as [nfs-client](https://github.com/kubernetes-retired/external-storage/tree/master/nfs-client). For more plugins of this kind, see [External Storage](https://github.com/kubernetes-retired/external-storage). | +| CSI | Container Storage Interface, a standard for exposing storage resources to workloads on COs (for example, Kubernetes), such as [QingCloud-csi](https://github.com/yunify/qingcloud-csi) and [Ceph-CSI](https://github.com/ceph/ceph-csi). For more plugins of this kind, see [Drivers](https://kubernetes-csi.github.io/docs/drivers.html). | + +## Prerequisites + +You need a user granted a role including the permission of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the permission and assign it to a user. + +## Create Storage Classes + +1. Click **Platform** in the upper-left corner and select **Cluster Management**. + +2. If you have enabled the [multi-cluster feature](../../multicluster-management/) with member clusters imported, you can select a specific cluster. If you have not enabled the feature, refer to the next step directly. + +3. On the **Cluster Management** page, go to **Storage Classes** under **Storage**, where you can create, update, and delete a storage class. + +4. To create a storage class, click **Create** and enter the basic information in the displayed dialog box. When you finish, click **Next**. + +5. In KubeSphere, you can create storage classes for `QingCloud-CSI`, `GlusterFS`, and `Ceph RBD`. Alternatively, you can also create customized storage classes for other storage systems based on your needs. Select a type and click **Next**. + +### Common Settings + +Some settings are commonly used and shared among storage classes. You can find them as dashboard parameters on the console, which are also indicated by fields or annotations in the StorageClass manifest. You can see the manifest file in YAML format by clicking **Edit YAML** in the upper-right corner. + +Here are parameter descriptions of some commonly used fields in KubeSphere. + +| Parameter | Description | +| :---- | :---- | +| Volume Expansion | Specified by `allowVolumeExpansion` in the manifest. When it is set to `true`, PVs can be configured to be expandable. For more information, see [Allow Volume Expansion](https://kubernetes.io/docs/concepts/storage/storage-classes/#allow-volume-expansion). | +| Reclaim Policy | Specified by `reclaimPolicy` in the manifest. For more information, see [Reclaim Policy](https://kubernetes.io/docs/concepts/storage/storage-classes/#reclaim-policy). | +| Storage System | Specified by `provisioner` in the manifest. It determines what volume plugin is used for provisioning PVs. For more information, see [Provisioner](https://kubernetes.io/docs/concepts/storage/storage-classes/#provisioner). | +| Access Mode | Specified by `metadata.annotations[storageclass.kubesphere.io/supported-access-modes]` in the manifest. It tells KubeSphere which [access mode](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) is supported. | +| Volume Binding Mode | Specified by `volumeBindingMode` in the manifest. It determines what binding mode is used. **Delayed binding** means that a volume, after it is created, is bound to a volume instance when a Pod using this volume is created. **Immediate binding** means that a volume, after it is created, is immediately bound to a volume instance. | + +For other settings, you need to provide different information for different storage plugins, which, in the manifest, are always indicated under the field `parameters`. They will be described in detail in the sections below. You can also refer to [Parameters](https://kubernetes.io/docs/concepts/storage/storage-classes/#parameters) in the official documentation of Kubernetes. + +### QingCloud CSI + +QingCloud CSI is a CSI plugin on Kubernetes for the storage service of QingCloud. Storage classes of QingCloud CSI can be created on the KubeSphere console. + +#### Prerequisites + +- QingCloud CSI can be used on both public cloud and private cloud of QingCloud. Therefore, make sure KubeSphere has been installed on either of them so that you can use cloud storage services. +- QingCloud CSI Plugin has been installed on your KubeSphere cluster. See [QingCloud-CSI Installation](https://github.com/yunify/qingcloud-csi#installation) for more information. + +#### Settings + +| Parameter | Description | +| :---- | :---- | +| Type | On QingCloud Public Cloud Platform, 0 means high performance volume; 2 high capacity volume; 3 ultra-high performance volume; 5 enterprise server SAN (NeonSAN); 100 standard volume; 200 enterprise SSD. | +| Maximum Size | Maximum size of the volume. | +| Step Size | Step size of the volume. | +| Minimum Size | Minimum size of the volume. | +| File System Type | Supports ext3, ext4, and XFS. The default type is ext4. | +| Tag | Add tags to the storage volume. Use commas to separate multiple tags. | + +For more information about storage class parameters, see [QingCloud-CSI user guide](https://github.com/yunify/qingcloud-csi/blob/master/docs/user-guide.md#set-storage-class). + +### GlusterFS + +GlusterFS is an in-tree storage plugin on Kubernetes, which means you don't need to install a volume plugin additionally. + +#### Prerequisites + +The GlusterFS storage system has already been installed. See [GlusterFS Installation Documentation](https://www.gluster.org/install/) for more information. + +#### Settings + +| Parameter | Description | +| :---- | :---- | +| REST URL | Heketi REST URL that provisions volumes, for example, <Heketi Service cluster IP Address>:<Heketi Service port number>. | +| Cluster ID | Gluster cluster ID. | +| REST Authentication | Gluster enables authentication to the REST server. | +| REST User | Username of Gluster REST service or Heketi service. | +| Secret Namespace/Secret Name | Namespace of the Heketi user secret. | +| Secret Name | Name of the Heketi user secret. | +| Minimum GID | Minimum GID of the volume. | +| Maximum GID | Maximum GID of the volume. | +| Volume Type | Type of volume. The value can be none, replicate:<Replicate count>, or disperse:<Data>:<Redundancy count>. If the volume type is not set, the default volume type is replicate:3. | + +For more information about storage class parameters, see [GlusterFS in Kubernetes Documentation](https://kubernetes.io/docs/concepts/storage/storage-classes/#glusterfs). + +### Ceph RBD + +Ceph RBD is also an in-tree storage plugin on Kubernetes. The volume plugin is already in Kubernetes, +but the storage server must be installed before you create the storage class of Ceph RBD. + +As **hyperkube** images were [deprecated since 1.17](https://github.com/kubernetes/kubernetes/pull/85094), in-tree Ceph RBD may not work without **hyperkube**. +Nevertheless, you can use [rbd provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/ceph/rbd) as a substitute, whose format is the same as in-tree Ceph RBD. The only different parameter is `provisioner` (i.e **Storage System** on the KubeSphere console). If you want to use rbd-provisioner, the value of `provisioner` must be `ceph.com/rbd` (Enter this value in **Storage System** in the image below). If you use in-tree Ceph RBD, the value must be `kubernetes.io/rbd`. + +#### Prerequisites + +- The Ceph server has already been installed. See [Ceph Installation Documentation](https://docs.ceph.com/en/latest/install/) for more information. +- Install the plugin if you choose to use rbd-provisioner. Community developers provide [charts for rbd provisioner](https://github.com/kubesphere/helm-charts/tree/master/src/test/rbd-provisioner) that you can use to install rbd-provisioner by helm. + +#### Settings + +| Parameter | Description | +| :---- | :---- | +| MONITORS| IP address of Ceph monitors. | +| ADMINID| Ceph client ID that is capable of creating images in the pool. | +| ADMINSECRETNAME| Secret name of `adminId`. | +| ADMINSECRETNAMESPACE| Namespace of `adminSecretName`. | +| POOL | Name of the Ceph RBD pool. | +| USERID | The Ceph client ID that is used to map the RBD image. | +| USERSECRETNAME | The name of Ceph Secret for `userId` to map RBD image. | +| USERSECRETNAMESPACE | The namespace for `userSecretName`. | +| File System Type | File system type of the storage volume. | +| IMAGEFORMAT | Option of the Ceph volume. The value can be `1` or `2`. `imageFeatures` needs to be filled when you set imageFormat to `2`. | +| IMAGEFEATURES| Additional function of the Ceph cluster. The value should only be set when you set imageFormat to `2`. | + +For more information about StorageClass parameters, see [Ceph RBD in Kubernetes Documentation](https://kubernetes.io/docs/concepts/storage/storage-classes/#ceph-rbd). + +### Custom Storage Classes + +You can create custom storage classes for your storage systems if they are not directly supported by KubeSphere. The following example shows you how to create a storage class for NFS on the KubeSphere console. + +#### NFS Introduction + +NFS (Net File System) is widely used on Kubernetes with the external-provisioner volume plugin +[nfs-client](https://github.com/kubernetes-retired/external-storage/tree/master/nfs-client). You can create the storage class of nfs-client by clicking **Custom**. + +{{< notice note >}} + +NFS is incompatible with some applications, for example, Prometheus, which may result in pod creation failures. If you need to use NFS in the production environment, ensure that you have understood the risks. For more information, contact support@kubesphere.cloud. + +{{}} + +#### Prerequisites + +- An available NFS server. +- The volume plugin nfs-client has already been installed. Community developers provide [charts for nfs-client](https://github.com/kubesphere/helm-charts/tree/master/src/main/nfs-client-provisioner) that you can use to install nfs-client by helm. + +#### Common Settings + +| Parameter | Description | +| :---- | :---- | +| Volume Expansion | Specified by `allowVolumeExpansion` in the manifest. Select `No`. | +| Reclaim Policy | Specified by `reclaimPolicy` in the manifest. The value is `Delete` by default. | +| Storage System | Specified by `provisioner` in the manifest. If you install the storage class by [charts for nfs-client](https://github.com/kubesphere/helm-charts/tree/master/src/main/nfs-client-provisioner), it can be `cluster.local/nfs-client-nfs-client-provisioner`. | +| Access Mode | Specified by `.metadata.annotations.storageclass.kubesphere.io/supported-access-modes` in the manifest. `ReadWriteOnce`, `ReadOnlyMany` and `ReadWriteMany` are all selected by default. | +| Volume Binding Mode | Specified by `volumeBindingMode` in the manifest. It determines what binding mode is used. **Delayed binding** means that a volume, after it is created, is bound to a volume instance when a Pod using this volume is created. **Immediate binding** means that a volume, after it is created, is immediately bound to a volume instance. | + +#### Parameters + +| Key| Description | Value | +| :---- | :---- | :----| +| archiveOnDelete | Archive PVCs during deletion | `true` | + +## Manage Storage Classes + +After you create a storage class, click the name of the storage class to go to its details page. On the details page, click **Edit YAML** to edit the manifest file of the storage class, or click **More** to select an operation from the drop-down menu: + +- **Set as Default Storage Class**: Set the storage class as the default storage class in the cluster. Only one default storage class is allowed in a KubeSphere cluster. +- **Set Authorization Rule**: Set authorization rules so that the storage class can be accessed only in specific projects and workspaces. +- **Set Volume Operations**: Manage volume features, including: **Volume Cloning**, **Volume Snapshot Creation**, and **Volume Expansion**. Before enabling any features, you should contact your system administrator to confirm that the features are supported by the storage system. +- **Set Auto Expansion**: Set the system to automatically expand volumes when the remaining volume space is lower than a threshold. You can also enable **Restart workload automatically**. +- **Delete**: Delete the storage class. + +On the **Persistent Volume Claims** tab, you can view the PVCs associated to the storage class. \ No newline at end of file diff --git a/content/en/docs/v3.4/devops-user-guide/_index.md b/content/en/docs/v3.4/devops-user-guide/_index.md new file mode 100644 index 000000000..f28745e91 --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/_index.md @@ -0,0 +1,14 @@ +--- +title: "DevOps User Guide" +description: "Getting started with KubeSphere DevOps project" +layout: "second" + +linkTitle: "DevOps User Guide" +weight: 11000 + +icon: "/images/docs/v3.3/docs.svg" +--- + +To deploy and manage your CI/CD tasks and related workloads on your Kubernetes clusters, you use the KubeSphere DevOps system. This chapter demonstrates how to manage and work in DevOps projects, including running pipelines, creating credentials, and integrating tools. + +As you install the DevOps component, Jenkins is automatically deployed. KubeSphere provides you with consistent user experience as you can build a pipeline through the Jenkinsfile just as you did before. Besides, KubeSphere features graphical editing panels that visualize the whole process, presenting you with a straightforward view of how your pipeline is running at what stage. diff --git a/content/en/docs/v3.4/devops-user-guide/devops-overview/_index.md b/content/en/docs/v3.4/devops-user-guide/devops-overview/_index.md new file mode 100644 index 000000000..78520f832 --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/devops-overview/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Understand and Manage DevOps Projects" +weight: 11100 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/devops-user-guide/devops-overview/devops-project-management.md b/content/en/docs/v3.4/devops-user-guide/devops-overview/devops-project-management.md new file mode 100644 index 000000000..c1662fd10 --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/devops-overview/devops-project-management.md @@ -0,0 +1,49 @@ +--- +title: "DevOps Project Management" +keywords: 'Kubernetes, KubeSphere, DevOps, Jenkins' +description: 'Create and manage DevOps projects, and understand basic elements in DevOps projects.' +linkTitle: "DevOps Project Management" +weight: 11120 +--- + +This tutorial demonstrates how to create and manage DevOps projects. + +## Prerequisites + +- You need to create a workspace and a user (`project-admin`). The user must be invited to the workspace with the role of `workspace-self-provisioner`. For more information, refer to [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). +- You need to enable the [KubeSphere DevOps system](../../../pluggable-components/devops/). + +## Create a DevOps Project + +1. Log in to the console of KubeSphere as `project-admin`. Go to **DevOps Projects** and click **Create**. + +2. Provide the basic information for the DevOps project and click **OK**. + + - **Name**: A concise and clear name for this DevOps project, which is convenient for users to identify, such as `demo-devops`. + - **Alias**: The alias name of the DevOps project. + - **Description**: A brief introduction to the DevOps project. + - **Cluster Settings**: In the current version, a DevOps project cannot run across multiple clusters at the same time. If you have enabled [the multi-cluster feature](../../../multicluster-management/), you must select the cluster where your DevOps project runs. + +3. A DevOps project is displayed in the list below after created. + +## View a DevOps Project + +Click the DevOps project just created to go to its details page. Tenants with different permissions can perform various tasks in a DevOps project, including creating CI/CD pipelines and credentials, and managing accounts and roles. + +### Pipelines + +A pipeline entails a collection of plugins that allow you to constantly and consistently test and build your code. It combines continuous integration (CI) and continuous delivery (CD) to provide streamlined workflows so that your code can be automatically delivered to any target. + +### Credentials + +A DevOps project user with required permissions can configure credentials for pipelines for the interaction with external environments. Once the user adds these credentials in a DevOps project, the credentials can be used by the DevOps project to interact with third-party applications, such as GitHub, GitLab and Docker Hub. For more information, see [Credential Management](../../../devops-user-guide/how-to-use/devops-settings/credential-management/). + +### Members and Roles + +Similar to a project, a DevOps project also requires users to be granted different roles before they can work in the DevOps project. Project administrators (for example, `project-admin`) are responsible for inviting tenants and granting them different roles. For more information, see [Role and Member Management](../../../devops-user-guide/how-to-use/devops-settings/role-and-member-management/). + +## Edit or Delete a DevOps Project + +1. Click **Basic Information** under **DevOps Project Settings**, and you can see an overview of the current DevOps project, including the number of project roles and members, project name and project creator. + +2. Click **Manage** on the right, and you can edit the basic information of the DevOps project or delete it. diff --git a/content/en/docs/v3.4/devops-user-guide/devops-overview/overview.md b/content/en/docs/v3.4/devops-user-guide/devops-overview/overview.md new file mode 100644 index 000000000..8759abda6 --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/devops-overview/overview.md @@ -0,0 +1,46 @@ +--- +title: "DevOps — Overview" +keywords: 'Kubernetes, KubeSphere, DevOps, overview' +description: 'Develop a basic understanding of DevOps.' +linkTitle: "Overview" +weight: 11110 +--- + +DevOps is a set of practices and tools that automate the processes between IT and software development teams. Among other things, as agile software development sees increasing popularity, continuous integration (CI) and continuous delivery (CD) have become an ideal solution in this connection. In a CI/CD workflow, every integration is tested through automatic building, including coding, releasing and testing. This helps developers to identify any integration errors beforehand and teams can deliver internal software to a production environment with speed, security, and reliability. + +Nevertheless, the traditional controller-agent architecture of Jenkins (i.e. multiple agents work for a controller) has the following shortcomings. + +- The entire CI/CD pipeline will crash once the controller goes down. +- Resources are not allocated equally as some agents see pipeline jobs waiting in queue while others remain idle. +- Different agents may be configured in different environments and require different coding languages. The disparity can cause inconvenience in management and maintenance. + +## Understand KubeSphere DevOps + +KubeSphere DevOps projects support source code management tools such as GitHub, Git, and SVN. Users can build CI/CD pipelines through graphical editing panels (Jenkinsfile out of SCM) or create a Jenkinsfile-based pipeline from the code repository (Jenkinsfile in SCM). + +### Features + +The KubeSphere DevOps system provides you with the following features: + +- Independent DevOps projects for CI/CD pipelines with access control. +- Out-of-the-box DevOps functions with no complex Jenkins configurations. +- [Source-to-image (S2I)](../../../project-user-guide/image-builder/source-to-image/) and [Binary-to-image (B2I)](../../../project-user-guide/image-builder/binary-to-image/) for rapid delivery of images. +- [Jenkinsfile-based pipelines](../../../devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-jenkinsfile/) for consistent user experience which support multiple code repositories. +- [Graphical editing panels](../../../devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-graphical-editing-panel/) to create pipelines with a low learning curve. +- A powerful tool integration mechanism such as [SonarQube](../../../devops-user-guide/how-to-integrate/sonarqube/) for code quality check. + +### KubeSphere CI/CD pipeline workflows + +A KubeSphere CI/CD pipeline runs on the back of the underlying Kubernetes Jenkins agents. These Jenkins agents can be dynamically scaled as they are dynamically provisioned or released based on the job status. The Jenkins controller and agents run as Pods on KubeSphere nodes. The controller runs on one of the nodes with its configuration data stored in a volume. Agents run across nodes while they may not be active all the time because they are created dynamically and deleted automatically as needed. + +When the Jenkins controller receives a building request, it dynamically creates Jenkins agents that run in Pods according to labels. At the same time, Jenkins agents will be registered in the controller. After agents finish their jobs, they will be released and related Pods will be deleted as well. + +### Dynamically provision Jenkins agents + +The advantages of dynamically provisioning Jenkins agents are: + +**Reasonable resource allocation**. KubeSphere dynamically assigns agents created to idle nodes, so that jobs will not be queuing on a single node whose resource utilization is already high. + +**High scalability**. When a KubeSphere cluster has insufficient resources which lead to long waiting time of jobs in the queue, you can add new nodes to the cluster. + +**High availability**. When a Jenkins controller fails, KubeSphere automatically creates a new Jenkins controller container with the volume mounted to the new container. In this way, the data are secured with high availability achieved for the cluster. diff --git a/content/en/docs/v3.4/devops-user-guide/examples/_index.md b/content/en/docs/v3.4/devops-user-guide/examples/_index.md new file mode 100644 index 000000000..25de3b8fc --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/examples/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Examples" +weight: 11400 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/devops-user-guide/examples/a-maven-project.md b/content/en/docs/v3.4/devops-user-guide/examples/a-maven-project.md new file mode 100644 index 000000000..9eb01ab77 --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/examples/a-maven-project.md @@ -0,0 +1,161 @@ +--- +title: "Build and Deploy a Maven Project" +keywords: 'Kubernetes, Docker, DevOps, Jenkins, Maven' +description: 'Learn how to build and deploy a Maven project using a KubeSphere pipeline.' +linkTitle: "Build and Deploy a Maven Project" +weight: 11430 +--- + +## Prerequisites + +- You need to [enable the KubeSphere DevOps System](../../../pluggable-components/devops/). +- You need to have a [Docker Hub](https://www.dockerhub.com/) account. +- You need to create a workspace, a DevOps project, and a user account, and this user needs to be invited into the DevOps project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Workflow for a Maven Project + +As is shown in the graph below, there is the workflow for a Maven project in KubeSphere DevOps, which uses a Jenkins pipeline to build and deploy the Maven project. All steps are defined in the pipeline. + +![maven-project-jenkins](/images/docs/v3.3/devops-user-guide/examples/build-and-deploy-a-maven-project/maven-project-jenkins.png) + +At first, the Jenkins Master creates a Pod to run the pipeline. Kubernetes creates the Pod as the agent of Jenkins Master, and the Pod will be destroyed after the pipeline finished. The main process includes cloning code, building and pushing an image, and deploying the workload. + +## Default Configurations in Jenkins + +### Maven version + +Execute the following command in the Maven builder container to get version information. + +```bash +mvn --version + +Apache Maven 3.5.3 (3383c37e1f9e9b3bc3df5050c29c8aff9f295297; 2018-02-24T19:49:05Z) +Maven home: /opt/apache-maven-3.5.3 +Java version: 1.8.0_232, vendor: Oracle Corporation +Java home: /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.232.b09-0.el7_7.i386/jre +Default locale: en_US, platform encoding: UTF-8 +``` + +### Maven cache + +The Jenkins Agent mounts the directories by Docker Volume on the node. The pipeline can cache some special directories such as `/root/.m2`, which are used for Maven building and the default cache directory for Maven tools in KubeSphere DevOps, so that dependency packages are downloaded and cached on the node. + +### Global Maven settings in the Jenkins Agent + +The default file path of Maven settings is `maven` and the configuration file path is `/opt/apache-maven-3.5.3/conf/settings.xml`. Execute the following command to get the content of Maven settings. + +```bash +kubectl get cm -n kubesphere-devops-worker ks-devops-agent -o yaml +``` + +### Network of Maven Pod + +The Pod labeled `maven` uses the docker-in-docker network to run the pipeline. Namely, `/var/run/docker.sock` in the node is mounted to the Maven container. + +## A Maven Pipeline Example + +### Prepare for the Maven project + +- Ensure you build the Maven project successfully on the development device. +- Add the Dockerfile to the project repository to build the image. For more information, refer to . +- Add the YAML file to the project repository to deploy the workload. For more information, refer to . If there are different environments, you need to prepare multiple deployment files. + +### Create credentials + +| Credential ID | Type | Where to Use | +| --------------- | ------------------- | ---------------------------- | +| dockerhub-id | Username and password | Registry, such as Docker Hub | +| demo-kubeconfig | kubeconfig | Workload deployment | + +For details, refer to the [Credential Management](../../how-to-use/devops-settings/credential-management/). + +### Create a project for workloads + +In this example, all workloads are deployed in `kubesphere-sample-dev`. You must create the project `kubesphere-sample-dev` in advance. + +### Create a pipeline for the Maven project + +1. Go to **Pipelines** of your DevOps project and click **Create** to create a pipeline named `maven`. For more information, see [Create a Pipeline - using Graphical Editing Panel](../../how-to-use/pipelines/create-a-pipeline-using-graphical-editing-panel/). + +2. Go to the details page of the pipeline and click **Edit Jenkinsfile**. + +3. Copy and paste the following content into the displayed dialog box. You must replace the value of `DOCKERHUB_NAMESPACE` with yours. When you finish editing, click **OK** to save the Jenkinsfile. + + ```groovy + pipeline { + agent { + label 'maven' + } + + parameters { + string(name:'TAG_NAME',defaultValue: '',description:'') + } + + environment { + DOCKER_CREDENTIAL_ID = 'dockerhub-id' + KUBECONFIG_CREDENTIAL_ID = 'demo-kubeconfig' + REGISTRY = 'docker.io' + // need to replace by yourself dockerhub namespace + DOCKERHUB_NAMESPACE = 'Docker Hub Namespace' + APP_NAME = 'devops-maven-sample' + BRANCH_NAME = 'dev' + PROJECT_NAME = 'kubesphere-sample-dev' + } + + stages { + stage ('checkout scm') { + steps { + // Please avoid committing your test changes to this repository + git branch: 'master', url: "https://github.com/kubesphere/devops-maven-sample.git" + } + } + + stage ('unit test') { + steps { + container ('maven') { + sh 'mvn clean test' + } + } + } + + stage ('build & push') { + steps { + container ('maven') { + sh 'mvn -Dmaven.test.skip=true clean package' + sh 'docker build -f Dockerfile-online -t $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER .' + withCredentials([usernamePassword(passwordVariable : 'DOCKER_PASSWORD' ,usernameVariable : 'DOCKER_USERNAME' ,credentialsId : "$DOCKER_CREDENTIAL_ID" ,)]) { + sh 'echo "$DOCKER_PASSWORD" | docker login $REGISTRY -u "$DOCKER_USERNAME" --password-stdin' + sh 'docker push $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER' + } + } + } + } + + stage('deploy to dev') { + steps { + container ('maven') { + withCredentials([ + kubeconfigFile( + credentialsId: env.KUBECONFIG_CREDENTIAL_ID, + variable: 'KUBECONFIG') + ]) { + sh 'envsubst < deploy/all-in-one/devops-sample.yaml | kubectl apply -f -' + } + } + } + } + } + } + ``` + +4. You can see stages and steps are automatically created on graphical editing panels. + +### Run and test + +1. Click **Run**, enter `v1` for **TAG_NAME** in the displayed dialog box, and then click **OK** to run the pipeline. + +2. When the pipeline runs successfully, you can go to the **Run Records** tab to view its details. + +3. In the project of `kubesphere-sample-dev`, new workloads were created. + +4. On the **Services** page, view the external access information about the Service created. diff --git a/content/en/docs/v3.4/devops-user-guide/examples/create-multi-cluster-pipeline.md b/content/en/docs/v3.4/devops-user-guide/examples/create-multi-cluster-pipeline.md new file mode 100644 index 000000000..036913ea2 --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/examples/create-multi-cluster-pipeline.md @@ -0,0 +1,253 @@ +--- +title: "Create a Multi-cluster Pipeline" +keywords: 'KubeSphere, Kubernetes, Multi-cluster, Pipeline, DevOps' +description: 'Learn how to create a multi-cluster pipeline on KubeSphere.' +linkTitle: "Create a Multi-cluster Pipeline" +weight: 11440 +--- + +As cloud providers offer different hosted Kubernetes services, DevOps pipelines have to deal with use cases where multiple Kubernetes clusters are involved. + +This tutorial demonstrates how to create a multi-cluster pipeline on KubeSphere. + +## Prerequisites + +- You need to have three Kubernetes clusters with KubeSphere installed. Choose one cluster as your host cluster and the other two as your member clusters. For more information about cluster roles and how to build a multi-cluster environment on KubeSphere, refer to [Multi-cluster Management](../../../multicluster-management/). +- You need to set your member clusters as [public clusters](../../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/#make-a-cluster-public). Alternatively, you can [set cluster visibility after a workspace is created](../../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/#set-cluster-visibility-after-a-workspace-is-created). +- You need to [enable the KubeSphere DevOps system](../../../pluggable-components/devops/) on your host cluster. +- You need to integrate SonarQube into your pipeline. For more information, refer to [Integrate SonarQube into Pipelines](../../how-to-integrate/sonarqube/). +- You need to create four accounts on your host cluster: `ws-manager`, `ws-admin`, `project-admin`, and `project-regular`, and grant these accounts different roles. For more information, refer to [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/#step-1-create-an-account). + +## Workflow Overview + +This tutorial uses three clusters to serve as three isolated environments in the workflow. See the diagram as below. + +![use-case-for-multi-cluster](/images/docs/v3.3/devops-user-guide/examples/create-multi-cluster-pipeline/use-case-for-multi-cluster.png) + +The three clusters are used for development, testing, and production respectively. Once codes get submitted to a Git repository, a pipeline will be triggered to run through the following stages—`Unit Test`, `SonarQube Analysis`, `Build & Push`, and `Deploy to Development Cluster`. Developers use the development cluster for self-testing and validation. When developers give approval, the pipeline will proceed to the stage of `Deploy to Testing Cluster` for stricter validation. Finally, the pipeline, with necessary approval ready, will reach the stage of `Deploy to Production Cluster` to provide services externally. + +## Hands-on Lab + +### Step 1: Prepare clusters + +See the table below for the role of each cluster. + +| Cluster Name | Cluster Role | Usage | +| ------------ | -------------- | ----------- | +| host | Host cluster | Testing | +| shire | Member cluster | Production | +| rohan | Member cluster | Development | + +{{< notice note >}} + +These Kubernetes clusters can be hosted across different cloud providers and their Kubernetes versions can also vary. Recommended Kubernetes versions for KubeSphere 3.3: v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, and * v1.24.x. For Kubernetes versions with an asterisk, some features of edge nodes may be unavailable due to incompatability. Therefore, if you want to use edge nodes, you are advised to install Kubernetes v1.21.x. + +{{}} + +### Step 2: Create a workspace + +1. Log in to the web console of the host cluster as `ws-manager`. On the **Workspaces** page, click **Create**. + +2. On the **Basic Information** page, name the workspace `devops-multicluster`, select `ws-admin` for **Administrator**, and click **Next**. + +3. On the **Cluster Settings** page, select all three clusters and click **Create**. + +4. The workspace created is displayed in the list. You need to log out of the console and log back in as `ws-admin` to invite both `project-admin` and `project-regular` to the workspace and grant them the role `workspace-self-provisioner` and `workspace-viewer` respectively. For more information, refer to [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/#step-2-create-a-workspace). + +### Step 3: Create a DevOps project + +1. Log out of the console and log back in as `project-admin`. Go to the **DevOps Projects** page and click **Create**. + +2. In the displayed dialog box, enter `multicluster-demo` for **Name**, select **host** for **Cluster Settings**, and then click **OK**. + + {{< notice note >}} + + Only clusters with the DevOps component enabled will be available in the drop-down list. + + {{}} + +3. The DevOps project created is displayed in the list. Make sure you invite the `project-regular` user to this project and assign it the `operator` role. For more information, refer to [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/#step-5-create-a-devops-project-optional). + +### Step 4: Create projects on clusters + +You must create the projects as shown in the table below in advance. Make sure you invite the `project-regular` user to these projects and assign it the `operator` role. For more information about how to create a project, refer to [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/#step-3-create-a-project). + +| Cluster Name | Usage | Project Name | +| ------------ | ----------- | ---------------------- | +| host | Testing | kubesphere-sample-prod | +| shire | Production | kubesphere-sample-prod | +| rohan | Development | kubesphere-sample-dev | + +### Step 5: Create credentials + +1. Log out of the console and log back in as `project-regular`. On the **DevOps Projects** page, click the DevOps project `multicluster-demo`. + +2. On the **Credentials** page, you need to create the credentials as shown in the table below. For more information about how to create credentials, refer to [Credential Management](../../how-to-use/devops-settings/credential-management/#create-credentials) and [Create a Pipeline Using a Jenkinsfile](../../how-to-use/pipelines/create-a-pipeline-using-jenkinsfile/#step-1-create-credentials). + + | Credential ID | Type | Where to Use | + | ------------- | ------------------- | ---------------------------------- | + | host | kubeconfig | The host cluster for testing | + | shire | kubeconfig | The member cluster for production | + | rohan | kubeconfig | The member cluster for development | + | dockerhub-id | Username and password | Docker Hub | + | sonar-token | Access token | SonarQube | + + {{< notice note >}} + + You have to manually enter the kubeconfig of your member clusters when creating the kubeconfig credentials `shire` and `rohan`. Make sure your host cluster can access the API Server addresses of your member clusters. + + {{}} + +3. Five credentials are created in total. + +### Step 6: Create a pipeline + +1. Go to the **Pipelines** page and click **Create**. In the displayed dialog box, enter `build-and-deploy-application` for **Name** and click **Next**. + +2. On the **Advanced Settings** tab, click **Create** to use the default settings. + +3. The pipeline created is displayed in the list. Click its name to go to the details page. + +4. Click **Edit Jenkinsfile** and copy and paste the following contents. Make sure you replace the value of `DOCKERHUB_NAMESPACE` with your own value, and then click **OK**. + + ```groovy + pipeline { + agent { + node { + label 'maven' + } + + } + parameters { + string(name:'BRANCH_NAME',defaultValue: 'master',description:'') + } + environment { + DOCKER_CREDENTIAL_ID = 'dockerhub-id' + PROD_KUBECONFIG_CREDENTIAL_ID = 'shire' + TEST_KUBECONFIG_CREDENTIAL_ID = 'host' + DEV_KUBECONFIG_CREDENTIAL_ID = 'rohan' + + REGISTRY = 'docker.io' + DOCKERHUB_NAMESPACE = 'your Docker Hub account ID' + APP_NAME = 'devops-maven-sample' + SONAR_CREDENTIAL_ID = 'sonar-token' + TAG_NAME = "SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER" + } + stages { + stage('checkout') { + steps { + container('maven') { + git branch: 'master', url: 'https://github.com/kubesphere/devops-maven-sample.git' + } + } + } + stage('unit test') { + steps { + container('maven') { + sh 'mvn clean test' + } + } + } + stage('sonarqube analysis') { + steps { + container('maven') { + withCredentials([string(credentialsId: "$SONAR_CREDENTIAL_ID", variable: 'SONAR_TOKEN')]) { + withSonarQubeEnv('sonar') { + sh "mvn sonar:sonar -Dsonar.login=$SONAR_TOKEN" + } + + } + } + + } + } + stage('build & push') { + steps { + container('maven') { + sh 'mvn -Dmaven.test.skip=true clean package' + sh 'docker build -f Dockerfile-online -t $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER .' + withCredentials([usernamePassword(passwordVariable : 'DOCKER_PASSWORD' ,usernameVariable : 'DOCKER_USERNAME' ,credentialsId : "$DOCKER_CREDENTIAL_ID" ,)]) { + sh 'echo "$DOCKER_PASSWORD" | docker login $REGISTRY -u "$DOCKER_USERNAME" --password-stdin' + sh 'docker push $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER' + } + } + } + } + stage('push latest') { + steps { + container('maven') { + sh 'docker tag $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:latest ' + sh 'docker push $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:latest ' + } + } + } + stage('deploy to dev') { + steps { + container('maven') { + withCredentials([ + kubeconfigFile( + credentialsId: env.DEV_KUBECONFIG_CREDENTIAL_ID, + variable: 'KUBECONFIG') + ]) { + sh 'envsubst < deploy/dev-all-in-one/devops-sample.yaml | kubectl apply -f -' + } + } + } + } + stage('deploy to staging') { + steps { + container('maven') { + input(id: 'deploy-to-staging', message: 'deploy to staging?') + withCredentials([ + kubeconfigFile( + credentialsId: env.TEST_KUBECONFIG_CREDENTIAL_ID, + variable: 'KUBECONFIG') + ]) { + sh 'envsubst < deploy/prod-all-in-one/devops-sample.yaml | kubectl apply -f -' + } + } + } + } + stage('deploy to production') { + steps { + container('maven') { + input(id: 'deploy-to-production', message: 'deploy to production?') + withCredentials([ + kubeconfigFile( + credentialsId: env.PROD_KUBECONFIG_CREDENTIAL_ID, + variable: 'KUBECONFIG') + ]) { + sh 'envsubst < deploy/prod-all-in-one/devops-sample.yaml | kubectl apply -f -' + } + } + } + } + } + } + ``` + + {{< notice note >}} + + The flag `-o` in the `mvn` commands indicates that the offline mode is enabled. If you have relevant maven dependencies and caches ready locally, you can keep the offline mode on to save time. + + {{}} + +5. After the pipeline is created, you can view its stages and steps on the graphical editing panel as well. + +### Step 7: Run the pipeline and check the results + +1. Click **Run** to run the pipeline. The pipeline will pause when it reaches the stage **deploy to staging** as resources have been deployed to the cluster for development. You need to manually click **Proceed** twice to deploy resources to the testing cluster `host` and the production cluster `shire`. + +2. After a while, you can see the pipeline status shown as **Successful**. + +3. Check the pipeline running logs by clicking **View Logs** in the upper-right corner. For each stage, you click it to inspect logs, which can be downloaded to your local machine for further analysis. + +4. Once the pipeline runs successfully, click **Code Check** to check the results through SonarQube. + +5. Go to the **Projects** page, and you can view the resources deployed in different projects across the clusters by selecting a specific cluster from the drop-down list. + + + + + + diff --git a/content/en/docs/v3.4/devops-user-guide/examples/go-project-pipeline.md b/content/en/docs/v3.4/devops-user-guide/examples/go-project-pipeline.md new file mode 100644 index 000000000..58d0acacb --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/examples/go-project-pipeline.md @@ -0,0 +1,132 @@ +--- +title: "Build and Deploy a Go Project" +keywords: 'Kubernetes, docker, DevOps, Jenkins, Go, KubeSphere' +description: 'Learn how to build and deploy a Go project using a KubeSphere pipeline.' +linkTitle: "Build and Deploy a Go Project" +weight: 11410 +--- + +## Prerequisites + +- You need to [enable the KubeSphere DevOps System](../../../pluggable-components/devops/). +- You need to have a [Docker Hub](https://hub.docker.com/) account. +- You need to create a workspace, a DevOps project, a project, and a user (`project-regular`). This account needs to be invited to the DevOps project and the project for deploying your workload with the role `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Create a Docker Hub Access Token + +1. Log in to [Docker Hub](https://hub.docker.com/), click your account in the upper-right corner, and select **Account Settings** from the menu. + +2. Click **Security** in the left navigation pane and then click **New Access Token**. + +3. In the displayed dialog box, enter a token name (`go-project-token`) and click **Create**. + +4. Click **Copy and Close** and make sure you save the access token. + +## Create Credentials + +You need to create credentials in KubeSphere for the access token created so that the pipeline can interact with Docker Hub for imaging pushing. Besides, you also create kubeconfig credentials for the access to the Kubernetes cluster. + +1. Log in to the web console of KubeSphere as `project-regular`. In your DevOps project, go to **Credentials** under **DevOps Project Settings** and then click **Create** on the **Credentials** page. + +2. In the displayed dialog box, set a **Name**, which is used later in the Jenkinsfile, and select **Username and password** for **Type**. Enter your Docker Hub account name for **Username** and the access token just created for **Password/Token**. When you finish, click **OK**. + + {{< notice tip >}} + +For more information about how to create credentials, see [Credential Management](../../../devops-user-guide/how-to-use/devops-settings/credential-management/). + + {{}} + +3. Click **Create** again and select **kubeconfig** for **Type**. Note that KubeSphere automatically populates the **Content** field, which is the kubeconfig of the current user account. Set a **Name** and click **OK**. + +## Create a Pipeline + +With the above credentials ready, you can create a pipeline using an example Jenkinsfile as below. + +1. To create a pipeline, click **Create** on the **Pipelines** page. + +2. Set a name in the displayed dialog box and click **Next**. + +3. In this tutorial, you can use default values for all the fields. On the **Advanced Settings** tab, click **Create**. + +## Edit the Jenkinsfile + +1. In the pipeline list, click the pipeline name to go to its details page. Click **Edit Jenkinsfile** to define a Jenkinsfile and your pipeline runs based on it. + +2. Copy and paste all the content below to the displayed dialog box as an example Jenkinsfile for your pipeline. You must replace the value of `DOCKERHUB_USERNAME`, `DOCKERHUB_CREDENTIAL`, `KUBECONFIG_CREDENTIAL_ID`, and `PROJECT_NAME` with yours. When you finish, click **OK**. + + ```groovy + pipeline { + agent { + label 'go' + } + + environment { + // the address of your Docker Hub registry + REGISTRY = 'docker.io' + // your Docker Hub username + DOCKERHUB_USERNAME = 'Docker Hub Username' + // Docker image name + APP_NAME = 'devops-go-sample' + // 'dockerhubid' is the credentials ID you created in KubeSphere with Docker Hub Access Token + DOCKERHUB_CREDENTIAL = credentials('dockerhubid') + // the kubeconfig credentials ID you created in KubeSphere + KUBECONFIG_CREDENTIAL_ID = 'go' + // the name of the project you created in KubeSphere, not the DevOps project name + PROJECT_NAME = 'devops-go' + } + + stages { + stage('docker login') { + steps{ + container ('go') { + sh 'echo $DOCKERHUB_CREDENTIAL_PSW | docker login -u $DOCKERHUB_CREDENTIAL_USR --password-stdin' + } + } + } + + stage('build & push') { + steps { + container ('go') { + sh 'git clone https://github.com/yuswift/devops-go-sample.git' + sh 'cd devops-go-sample && docker build -t $REGISTRY/$DOCKERHUB_USERNAME/$APP_NAME .' + sh 'docker push $REGISTRY/$DOCKERHUB_USERNAME/$APP_NAME' + } + } + } + stage ('deploy app') { + steps { + container ('go') { + withCredentials([ + kubeconfigFile( + credentialsId: env.KUBECONFIG_CREDENTIAL_ID, + variable: 'KUBECONFIG') + ]) { + sh 'envsubst < devops-go-sample/manifest/deploy.yaml | kubectl apply -f -' + } + } + } + } + } + } + ``` + + {{< notice note >}} + +If your pipeline runs successfully, images will be pushed to Docker Hub. If you are using Harbor, you cannot pass the parameter to `docker login -u` via the Jenkins credential with environment variables. This is because every Harbor robot account username contains a `$` character, which will be converted to `$$` by Jenkins when used by environment variables. [Learn more](https://number1.co.za/rancher-cannot-use-harbor-robot-account-imagepullbackoff-pull-access-denied/). + + {{}} + +## Run the Pipeline + +1. After you finish the Jenkinsfile, you can see graphical panels are displayed on the dashboard. Click **Run** to run the pipeline. + +2. In **Run Records**, you can see the status of the pipeline. It may take a while before it successfully runs. + + +## Verify Results + +1. A **Deployment** is created in the project specified in the Jenkinsfile if the pipeline runs successfully. + +2. Check the image that is pushed to Docker Hub. + + \ No newline at end of file diff --git a/content/en/docs/v3.4/devops-user-guide/examples/multi-cluster-project-example.md b/content/en/docs/v3.4/devops-user-guide/examples/multi-cluster-project-example.md new file mode 100644 index 000000000..a99a89217 --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/examples/multi-cluster-project-example.md @@ -0,0 +1,132 @@ +--- +title: "Deploy Apps in a Multi-cluster Project Using a Jenkinsfile" +keywords: 'Kubernetes, KubeSphere, Docker, DevOps, Jenkins, Multi-cluster' +description: 'Learn how to deploy apps in a multi-cluster project using a Jenkinsfile-based pipeline.' +linkTitle: "Deploy Apps in a Multi-cluster Project Using a Jenkinsfile" +weight: 11420 +--- + +## Prerequisites + +- You need to [enable the multi-cluster feature](../../../multicluster-management/) and create a workspace with your multiple clusters. +- You need to have a [Docker Hub](https://hub.docker.com/) account. +- You need to [enable the KubeSphere DevOps System](../../../pluggable-components/devops/) on your host cluster. +- You need to use a user (for example, `project-admin`) with the role of `workspace-self-provisioner` to create a multi-cluster project and a DevOps project on the host cluster. This tutorial creates a multi-cluster project on the host cluster and one member cluster. +- You need to invite a user (for example, `project-regular`) to the DevOps project and grant it the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/), [Multi-cluster Management](../../../multicluster-management/) and [Multi-cluster Projects](../../../project-administration/project-and-multicluster-project/#multi-cluster-projects). + +## Create a Docker Hub Access Token + +1. Log in to [Docker Hub](https://hub.docker.com/), click your account in the upper-right corner, and select **Account Settings** from the menu. + +2. Click **Security** in the left navigation pane and then click **New Access Token**. + +3. In the displayed dialog box, enter a token name (`go-project-token`) and click **Create**. + +4. Click **Copy and Close** and make sure you save the access token. + +## Create Credentials + +You need to create credentials in KubeSphere for the access token created so that the pipeline can interact with Docker Hub for pushing images. Besides, you also need to create kubeconfig credentials for the access to the Kubernetes cluster. + +1. Log in to the web console of KubeSphere as `project-regular`. In your DevOps project, go to **Credentials** under **DevOps Project Settings** and then click **Create** on the **Credentials** page. + +2. In the displayed dialog box, set a **Name**, which is used later in the Jenkinsfile, and select **Username and password** for **Type**. Enter your Docker Hub account name for **Username** and the access token just created for **Password/Token**. When you finish, click **OK**. + + {{< notice tip >}} + + For more information about how to create credentials, see [Credential Management](../../../devops-user-guide/how-to-use/devops-settings/credential-management/). + + {{}} + +3. Log out of the KubeSphere web console and log back in as `project-admin`. Go to your DevOps project and click **Create** in **Credentials**. Select **kubeconfig** for **Type**. Note that KubeSphere automatically populates the **Content** field, which is the kubeconfig of the current account. Set a **Name** and click **OK**. + + {{< notice note >}} + + In future releases, you will be able to invite the account `project-regular` to your multi-cluster project and grant it the necessary role to create the kubeconfig credentials. + + {{}} + +## Create a Pipeline + +With the above credentials ready, you can use the user `project-regular` to create a pipeline with an example Jenkinsfile as below. + +1. To create a pipeline, click **Create** on the **Pipelines** page. + +2. Set a name in the displayed dialog box and click **Next**. + +3. In this tutorial, you can use default values for all the fields. On the **Advanced Settings** tab, click **Create**. + +## Edit the Jenkinsfile + +1. In the pipeline list, click this pipeline to go to its details page. Click **Edit Jenkinsfile** to define a Jenkinsfile and your pipeline runs based on it. + +2. Copy and paste all the content below to the displayed dialog box as an example Jenkinsfile for your pipeline. You must replace the value of `DOCKERHUB_USERNAME`, `DOCKERHUB_CREDENTIAL`, `KUBECONFIG_CREDENTIAL_ID`, `MULTI_CLUSTER_PROJECT_NAME`, and `MEMBER_CLUSTER_NAME` with yours. When you finish, click **OK**. + + ```groovy + pipeline { + agent { + label 'go' + } + + environment { + REGISTRY = 'docker.io' + // Docker Hub username + DOCKERHUB_USERNAME = 'Your Docker Hub username' + APP_NAME = 'devops-go-sample' + // ‘dockerhub’ is the Docker Hub credentials ID you created on the KubeSphere console + DOCKERHUB_CREDENTIAL = credentials('dockerhub') + // the kubeconfig credentials ID you created on the KubeSphere console + KUBECONFIG_CREDENTIAL_ID = 'kubeconfig' + // mutli-cluster project name under your own workspace + MULTI_CLUSTER_PROJECT_NAME = 'demo-multi-cluster' + // the name of the member cluster where you want to deploy your app + // in this tutorial, the apps are deployed on host cluster and only one member cluster + // for more member clusters, please edit manifest/multi-cluster-deploy.yaml + MEMBER_CLUSTER_NAME = 'Your member cluster name' + } + + stages { + stage('docker login') { + steps { + container('go') { + sh 'echo $DOCKERHUB_CREDENTIAL_PSW | docker login -u $DOCKERHUB_CREDENTIAL_USR --password-stdin' + } + } + } + + stage('build & push') { + steps { + container('go') { + sh 'git clone https://github.com/yuswift/devops-go-sample.git' + sh 'cd devops-go-sample && docker build -t $REGISTRY/$DOCKERHUB_USERNAME/$APP_NAME .' + sh 'docker push $REGISTRY/$DOCKERHUB_USERNAME/$APP_NAME' + } + } + } + + stage('deploy app to multi cluster') { + steps { + container('go') { + withCredentials([ + kubeconfigFile( + credentialsId: env.KUBECONFIG_CREDENTIAL_ID, + variable: 'KUBECONFIG') + ]) { + sh 'envsubst < devops-go-sample/manifest/multi-cluster-deploy.yaml | kubectl apply -f -' + } + } + } + } + } + } + ``` + + {{< notice note >}} + + If your pipeline runs successfully, images will be pushed to Docker Hub. If you are using Harbor, you cannot pass the parameter to `docker login -u` via the Jenkins credential with environment variables. This is because every Harbor robot account username contains a `$` character, which will be converted to `$$` by Jenkins when used by environment variables. [Learn more](https://number1.co.za/rancher-cannot-use-harbor-robot-account-imagepullbackoff-pull-access-denied/). + + {{}} + +## Run the Pipeline + +After you save the Jenkinsfile, click **Run**. If everything goes well, you will see the Deployment workload in your multi-cluster project. diff --git a/content/en/docs/v3.4/devops-user-guide/examples/use-nexus-in-pipelines.md b/content/en/docs/v3.4/devops-user-guide/examples/use-nexus-in-pipelines.md new file mode 100644 index 000000000..aed0e8caa --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/examples/use-nexus-in-pipelines.md @@ -0,0 +1,170 @@ +--- +title: "Use Nexus in Pipelines" +keywords: 'KubeSphere, Kubernetes, Pipeline, Nexus, Jenkins' +description: 'Learn how to use Nexus in pipelines on KubeSphere.' +linkTitle: "Use Nexus in Pipelines" +weight: 11450 +--- + +[Nexus](https://www.sonatype.com/products/repository-oss) is a repository manager that stores, organizes, and distributes artifacts. With Nexus, developers can have better control over the artifacts needed in a development process. + +This tutorial demonstrates how to use Nexus in pipelines on KubeSphere. + +## Prerequisites + +- You need to [enable the KubeSphere DevOps System](../../../pluggable-components/devops/). +- You need to [prepare a Nexus instance](https://help.sonatype.com/repomanager3/installation). +- You need to have a [GitHub](https://github.com/) account. +- You need to create a workspace, a DevOps project (for example, `demo-devops`), and a user (for example, `project-regular`). This account needs to be invited into the DevOps project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Hands-on Lab + +### Step 1: Get a Repository URL on Nexus + +1. Log in to the Nexus console as `admin` and click icon on the top navigation bar. + +2. Go to the **Repositories** page and you can see that Nexus provides three types of repository. + + - `proxy`: the proxy for a remote repository to download and store resources on Nexus as cache. + - `hosted`: the repository storing artifacts on Nexus. + - `group`: a group of configured Nexus repositories. + +3. You can click a repository to view its details. For example, click **maven-public** to go to its details page, and you can see its **URL**. + +### Step 2: Modify `pom.xml` in your GitHub repository + +1. Log in to GitHub. Fork [the example repository](https://github.com/devops-ws/learn-pipeline-java) to your own GitHub account. + +2. In your own GitHub repository of **learn-pipeline-java**, click the file `pom.xml` in the root directory. + +3. Click icon to modify the code segment of `` in the file. Set the `` and use the URLs of your own Nexus repositories. + + ![modify-pom](/images/docs/v3.3/devops-user-guide/examples/use-nexus-in-pipeline/modify-pom.png) + +4. When you finish, click **Commit changes** at the bottom of the page. + +### Step 3: Modify the ConfigMap + +1. Log in to the KubeSphere web console as `admin`, click **Platform** in the upper-left corner, and select **Cluster Management**. + +2. Select **ConfigMaps** under **Configuration**. On the **ConfigMaps** page, select `kubesphere-devops-worker` from the drop-down list and click `ks-devops-agent`. + +3. On the details page, click **Edit YAML** from the **More** drop-down menu. + +4. In the displayed dialog box, scroll down, find the code segment of ``, and enter the following code: + + ```yaml + + + nexus + admin + admin + + + ``` + + ![enter-server-code](/images/docs/v3.3/devops-user-guide/examples/use-nexus-in-pipeline/enter-server-code.png) + + {{< notice note >}} + + `` is the unique identifier you set for your Nexus in step 2. `` is your Nexus username. `` is your Nexus password. You can also configure a `NuGet API Key` on Nexus and use it here for better security. + + {{}} + +5. Continue to find the code segment of `` and enter the following code: + + ```yaml + + + nexus + maven-public + http://135.68.37.85:8081/repository/maven-public/ + * + + + ``` + + ![enter-mirror-code](/images/docs/v3.3/devops-user-guide/examples/use-nexus-in-pipeline/enter-mirror-code.png) + + {{< notice note >}} + + `` is the unique identifier you set for your Nexus in step 2. `` is the Nexus repository name. `` is the URL of your Nexus repository. `` is the Maven repository to be mirrored. In this tutorial, enter `*` to mirror all Maven repositories. For more information, refer to [Using Mirrors for Repositories](https://maven.apache.org/guides/mini/guide-mirror-settings.html). + + {{}} + +6. When you finish, click **OK**. + +### Step 4: Create a pipeline + +1. Log out of the KubeSphere web console and log back in as `project-regular`. Go to your DevOps project and click **Create** on the **Pipelines** page. + +2. On the **Basic Information** tab, set a name for the pipeline (for example, `nexus-pipeline`) and click **Next**. + +3. On the **Advanced Settings** tab, click **Create** to use the default settings. + +4. Click the pipeline name to go to its details page and click **Edit Jenkinsfile**. + +5. In the displayed dialog box, enter the Jenkinsfile as follows. When you finish, click **OK**. + + ```groovy + pipeline { + agent { + node { + label 'maven' + } + } + stages { + stage ('clone') { + steps { + git 'https://github.com/Felixnoo/learn-pipeline-java.git' + } + } + + stage ('build') { + steps { + container ('maven') { + sh 'mvn clean package' + } + } + } + + stage ('deploy to Nexus') { + steps { + container ('maven') { + sh 'mvn deploy -DaltDeploymentRepository=nexus::default::http://135.68.37.85:8081/repository/maven-snapshots/' + } + } + } + stage ('upload') { + steps { + archiveArtifacts artifacts: 'target/*.jar', followSymlinks: false + } + } + } + } + ``` + + {{< notice note >}} + + You need to replace the GitHub repository address with your own. In the command from the step in the stage `deploy to Nexus`, `nexus` is the name you set in `` in the ConfigMap and `http://135.68.37.85:8081/repository/maven-snapshots/` is the URL of your Nexus repository. + + {{}} + +### Step 5: Run the pipeline and check results + +1. You can see all the stages and steps shown on the graphical editing panels. Click **Run** to run the pipeline. + +2. After a while, you can see the pipeline status shown as **Successful**. Click the **Successful** record to see its details. + +3. You can click **View Logs** to view the detailed logs. + +4. Log in to Nexus and click **Browse**. Click **maven-public** and you can see all the dependencies have been downloaded. + + ![maven-public](/images/docs/v3.3/devops-user-guide/examples/use-nexus-in-pipeline/maven-public.png) + +5. Go back to the **Browse** page and click **maven-snapshots**. You can see the JAR package has been uploaded to the repository. + + ![maven-snapshots](/images/docs/v3.3/devops-user-guide/examples/use-nexus-in-pipeline/maven-snapshots.png) + + + diff --git a/content/en/docs/v3.4/devops-user-guide/how-to-integrate/_index.md b/content/en/docs/v3.4/devops-user-guide/how-to-integrate/_index.md new file mode 100644 index 000000000..7e4fd97cf --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/how-to-integrate/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Tool Integration" +weight: 11300 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/devops-user-guide/how-to-integrate/harbor.md b/content/en/docs/v3.4/devops-user-guide/how-to-integrate/harbor.md new file mode 100644 index 000000000..150f4d529 --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/how-to-integrate/harbor.md @@ -0,0 +1,144 @@ +--- +title: "Integrate Harbor into Pipelines" +keywords: 'Kubernetes, Docker, DevOps, Jenkins, Harbor' +description: 'Integrate Harbor into your pipeline to push images to your Harbor registry.' +linkTitle: "Integrate Harbor into Pipelines" +weight: 11320 +--- + +This tutorial demonstrates how to integrate Harbor into KubeSphere pipelines. + +## Prerequisites + +- You need to [enable the KubeSphere DevOps System](../../../pluggable-components/devops/). +- You need to create a workspace, a DevOps project, and a user (`project-regular`). This account needs to be invited to the DevOps project with the `operator` role. See [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/) if they are not ready. + +## Install Harbor + +It is highly recommended that you install Harbor through [the App Store of KubeSphere](../../../application-store/built-in-apps/harbor-app/). Alternatively, install Harbor manually through Helm3. + +```bash +helm repo add harbor https://helm.goharbor.io +# For a quick start, you can expose Harbor by nodeport and disable tls. +# Set externalURL to one of your node ip and make sure it can be accessed by jenkins. +helm install harbor-release harbor/harbor --set expose.type=nodePort,externalURL=http://$ip:30002,expose.tls.enabled=false +``` + +## Get Harbor Credentials + +1. After Harbor is installed, visit `:30002` and log in to the console with the default account and password (`admin/Harbor12345`). Click **Projects** in the left navigation pane and click **NEW PROJECT** on the **Projects** page. + +2. In the displayed dialog box, set a name (`ks-devops-harbor`) and click **OK**. + +3. Click the project you just created, and click **NEW ROBOT ACCOUNT** under the **Robot Accounts** tab. + +4. In the displayed dialog box, set a name (`robot-test`) for the robot account and click **SAVE**. Make sure you select the checkbox for pushing artifact in **Permissions**. + +5. In the displayed dialog box, click **EXPORT TO FILE** to save the token. + +## Enable Insecure Registry + +You have to configure Docker to disregard security for your Harbor registry. + +1. Run the `vim /etc/docker/daemon.json` command on your host to edit the `daemon.json` file, enter the following contents, and save the changes. + + ```json + { + "insecure-registries" : ["103.61.38.55:30002"] + } + ``` + + {{< notice note >}} + + Make sure you replace `103.61.38.55:30002` with your Harbor registry address. The default location of the `daemon.json` file is `/etc/docker/daemon.json` on Linux or `C:\ProgramData\docker\config\daemon.json` on Windows. + + {{}} + +2. Run the following commands to restart Docker for the changes to take effect. + + ```bash + sudo systemctl daemon-reload + sudo systemctl restart docker + ``` + + {{< notice note >}} + + It is suggested that you use this solution for isolated testing or in a tightly controlled, air-gapped environment. For more information, refer to [Deploy a plain HTTP registry](https://docs.docker.com/registry/insecure/#deploy-a-plain-http-registry). After you finish the above operations, you can also use the images in your Harbor registry when deploying workloads in your project. You need to create an image Secret for your Harbor registry, and then select your Harbor registry and enter the absolute path of your images in **Container Settings** under the **Container Image** tab to search for your images. + + {{}} + +## Create Credentials + +1. Log in to KubeSphere as `project-regular`, go to your DevOps project and create credentials for Harbor in **Credentials** under **DevOps Project Settings**. + +2. On the **Create Credentials** page, set a credential ID (`robot-test`) and select **Username and password** for **Type**. The **Username** field must be the same as the value of `name` in the JSON file you just downloaded and enter the value of `token` in the file for **Password/Token**. + +3. Click **OK** to save it. + +## Create a Pipeline + +1. Go to the **Pipelines** page and click **Create**. In the **Basic Information** tab, enter a name (`demo-pipeline`) for the pipeline and click **Next**. + +2. Use default values in **Advanced Settings** and click **Create**. + +## Edit the Jenkinsfile + +1. Click the pipeline to go to its details page and click **Edit Jenkinsfile**. + +2. Copy and paste the following contents into the Jenkinsfile. Note that you must replace the values of `REGISTRY`, `HARBOR_NAMESPACE`, `APP_NAME`, and `HARBOR_CREDENTIAL` with your own values. + + ```groovy + pipeline { + agent { + node { + label 'maven' + } + } + + environment { + // the address of your harbor registry + REGISTRY = '103.61.38.55:30002' + // the project name + // make sure your robot account have enough access to the project + HARBOR_NAMESPACE = 'ks-devops-harbor' + // docker image name + APP_NAME = 'docker-example' + // ‘robot-test’ is the credential ID you created on the KubeSphere console + HARBOR_CREDENTIAL = credentials('robot-test') + } + + stages { + stage('docker login') { + steps{ + container ('maven') { + // replace the Docker Hub username behind -u and do not forget ''. You can also use a Docker Hub token. + sh '''echo $HARBOR_CREDENTIAL_PSW | docker login $REGISTRY -u 'robot$robot-test' --password-stdin''' + } + } + } + + stage('build & push') { + steps { + container ('maven') { + sh 'git clone https://github.com/kstaken/dockerfile-examples.git' + sh 'cd dockerfile-examples/rethinkdb && docker build -t $REGISTRY/$HARBOR_NAMESPACE/$APP_NAME:devops-test .' + sh 'docker push $REGISTRY/$HARBOR_NAMESPACE/$APP_NAME:devops-test' + } + } + } + } + } + + + ``` + + {{< notice note >}} + + You can pass the parameter to `docker login -u ` via Jenkins credentials with environment variables. However, every Harbor robot account's username contains a "\$" character, which will be converted into "\$$" by Jenkins when used by environment variables. [Learn more](https://number1.co.za/rancher-cannot-use-harbor-robot-account-imagepullbackoff-pull-access-denied/). + + {{}} + +## Run the Pipeline + +Save the Jenkinsfile and KubeSphere automatically creates all stages and steps on the graphical editing panel. Click **Run** to run the pipeline. If everything goes well, the image is pushed to your Harbor registry by Jenkins. + diff --git a/content/en/docs/v3.4/devops-user-guide/how-to-integrate/sonarqube.md b/content/en/docs/v3.4/devops-user-guide/how-to-integrate/sonarqube.md new file mode 100644 index 000000000..267c82c17 --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/how-to-integrate/sonarqube.md @@ -0,0 +1,273 @@ +--- +title: "Integrate SonarQube into Pipelines" +keywords: 'Kubernetes, KubeSphere, DevOps, Jenkins, SonarQube, Pipeline' +description: 'Integrate SonarQube into your pipeline for code quality analysis.' +linkTitle: "Integrate SonarQube into Pipelines" +weight: 11310 +--- + +[SonarQube](https://www.sonarqube.org/) is a popular continuous inspection tool for code quality. You can use it for static and dynamic analysis of a codebase. After it is integrated into pipelines in KubeSphere [Container Platform](https://kubesphere.io/), you can view common code issues such as bugs and vulnerabilities directly on the dashboard as SonarQube detects issues in a running pipeline. + +This tutorial demonstrates how you can integrate SonarQube into pipelines. Refer to the following steps first before you [create a pipeline using a Jenkinsfile](../../../devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-jenkinsfile/). + +## Prerequisites + +You need to [enable the KubeSphere DevOps System](../../../pluggable-components/devops/). + +## Install the SonarQube Server + +To integrate SonarQube into your pipeline, you must install SonarQube Server first. + +1. Install Helm first so that you can install SonarQube using the tool. For example, run the following command to install Helm 3: + + ```bash + curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash + ``` + + View the Helm version. + + ```bash + helm version + + version.BuildInfo{Version:"v3.4.1", GitCommit:"c4e74854886b2efe3321e185578e6db9be0a6e29", GitTreeState:"clean", GoVersion:"go1.14.11"} + ``` + + {{< notice note >}} + + For more information, see [the Helm documentation](https://helm.sh/docs/intro/install/). + + {{}} + +2. Execute the following command to install SonarQube Server. + + ```bash + helm upgrade --install sonarqube sonarqube --repo https://charts.kubesphere.io/main -n kubesphere-devops-system --create-namespace --set service.type=NodePort + ``` + + {{< notice note >}} + + Make sure you use Helm 3 to install SonarQube Server. + + {{}} + +3. You will get this prompt: + + ![sonarqube-install](/images/docs/v3.3/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/sonarqube-install.png) + +## Get the SonarQube Console Address + +1. Execute the following command to get SonarQube NodePort. + + ```bash + export NODE_PORT=$(kubectl get --namespace kubesphere-devops-system -o jsonpath="{.spec.ports[0].nodePort}" services sonarqube-sonarqube) + export NODE_IP=$(kubectl get nodes --namespace kubesphere-devops-system -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT + ``` + +2. You can get the output as below (`31434` is the port number in this example, which may be different from yours): + + ```bash + http://192.168.0.4:31434 + ``` + +## Configure the SonarQube Server + +### Step 1: Access the SonarQube console + +1. Execute the following command to view the status of SonarQube. Note that the SonarQube console is not accessible until SonarQube is up and running. + + ```bash + $ kubectl get pod -n kubesphere-devops-system + NAME READY STATUS RESTARTS AGE + devops-jenkins-68b8949bb-7zwg4 1/1 Running 0 84m + s2ioperator-0 1/1 Running 1 84m + sonarqube-postgresql-0 1/1 Running 0 5m31s + sonarqube-sonarqube-bb595d88b-97594 1/1 Running 2 5m31s + ``` + +2. Access the SonarQube console `http://:` in your browser. + +3. Click **Log in** in the upper-right corner and log in as the default account `admin/admin`. + + {{< notice note >}} + + You may need to set up necessary port forwarding rules and open the port to access SonarQube in your security groups depending on where your instances are deployed. + + {{}} + +### Step 2: Create a SonarQube admin token + +1. Click the letter **A** and select **My Account** from the menu to go to the **Profile** page. + + ![sonarqube-config-1](/images/docs/v3.3/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/sonarqube-config-1.jpg) + +2. Click **Security** and enter a token name, such as `kubesphere`. + + ![sonarqube-config-2](/images/docs/v3.3/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/sonarqube-config-2.jpg) + +3. Click **Generate** and copy the token. + + ![sonarqube-config-3](/images/docs/v3.3/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/sonarqube-config-3.jpg) + + {{< notice warning >}} + + Make sure you do copy the token because you won't be able to see it again as shown in the prompt. + + {{}} + +### Step 3: Create a webhook server + +1. Execute the following command to get the address of SonarQube Webhook. + + ```bash + export NODE_PORT=$(kubectl get --namespace kubesphere-devops-system -o jsonpath="{.spec.ports[0].nodePort}" services devops-jenkins) + export NODE_IP=$(kubectl get nodes --namespace kubesphere-devops-system -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT/sonarqube-webhook/ + ``` + +2. Expected output: + + ```bash + http://192.168.0.4:30180/sonarqube-webhook/ + ``` + +3. Click **Administration**, **Configuration** and **Webhooks** in turn to create a webhook. + + ![sonarqube-webhook-1](/images/docs/v3.3/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/sonarqube-webhook-1.jpg) + +4. Click **Create**. + + ![sonarqube-webhook-3](/images/docs/v3.3/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/sonarqube-webhook-3.jpg) + +5. Enter **Name** and **Jenkins Console URL** (for example, the SonarQube Webhook address) in the displayed dialog box. Click **Create** to finish. + + ![webhook-page-info](/images/docs/v3.3/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/webhook-page-info.jpg) + +### Step 4: Add the SonarQube configuration to ks-installer + +1. Execute the following command to edit `ks-installer`. + + ```bash + kubectl edit cc -n kubesphere-system ks-installer + ``` + +2. Navigate to `devops`. Add the field `sonarqube` and specify `externalSonarUrl` and `externalSonarToken` under it. + + ```yaml + devops: + enabled: true + jenkinsJavaOpts_MaxRAM: 2g + jenkinsJavaOpts_Xms: 512m + jenkinsJavaOpts_Xmx: 512m + jenkinsMemoryLim: 2Gi + jenkinsMemoryReq: 1500Mi + jenkinsVolumeSize: 8Gi + sonarqube: # Add this field manually. + externalSonarUrl: http://192.168.0.4:31434 # The SonarQube IP address. + externalSonarToken: f75dc3be11fd3d58debfd4e445e3de844683ad93 # The SonarQube admin token created above. + ``` + +3. Save the file after you finish. + +### Step 5: Add the SonarQube server to Jenkins + +1. Execute the following command to get the address of Jenkins. + + ```bash + export NODE_PORT=$(kubectl get --namespace kubesphere-devops-system -o jsonpath="{.spec.ports[0].nodePort}" services devops-jenkins) + export NODE_IP=$(kubectl get nodes --namespace kubesphere-devops-system -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT + ``` + +2. You can get the output as below, which tells you the port number of Jenkins. + + ```bash + http://192.168.0.4:30180 + ``` + +3. Access Jenkins with the address `http://:30180`. When KubeSphere is installed, the Jenkins dashboard is also installed by default. Besides, Jenkins is configured with KubeSphere LDAP, which means you can log in to Jenkins with KubeSphere accounts (for example, `admin/P@88w0rd`) directly. For more information about configuring Jenkins, see [Jenkins System Settings](../../../devops-user-guide/how-to-use/pipelines/jenkins-setting/). + + {{< notice note >}} + + You may need to set up necessary port forwarding rules and open port `30180` to access Jenkins in your security groups depending on where your instances are deployed. + + {{}} + +4. Click **Manage Jenkins** on the left navigation pane. + +5. Scroll down to **Configure System** and click it. + +6. Navigate to **SonarQube servers** and click **Add SonarQube**. + +7. Enter **Name** and **Server URL** (`http://:`). Click **Add**, select **Jenkins**, and then create the credentials with the SonarQube admin token in the displayed dialog box as shown in the second image below. After adding the credentials, select it from the drop-down list for **Server authentication token** and then click **Apply** to finish. + + ![sonarqube-jenkins-settings](/images/docs/v3.3/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/sonarqube-jenkins-settings.png) + + ![add-credentials](/images/docs/v3.3/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/add-credentials.png) + + {{< notice note >}} + + If the **Add** button is not working, which is a known bug from Jenkins, you can navigate to **Manage Credentials** under **Manage Jenkins**, click **Jenkins** under **Stores scoped to Jenkins**, click **Global credentials (unrestricted)**, and then click **Add Credentials** from the left navigation bar to add the credentials with the SonarQube admin token by referencing the second image above. After you add the credentials, you can select it from the drop-down list for **Server authentication token**. + + {{}} + +### Step 6: Add sonarqubeURL to the KubeSphere Console + +You need to specify `sonarqubeURL` so that you can access SonarQube directly from the KubeSphere console. + +1. Execute the following command: + + ```bash + kubectl edit cm -n kubesphere-system ks-console-config + ``` + +2. Go to `data.client.enableKubeConfig` and add the field `devops` with `sonarqubeURL` specified under it. + + ```bash + client: + enableKubeConfig: true + devops: # Add this field manually. + sonarqubeURL: http://192.168.0.4:31434 # The SonarQube IP address. + ``` + +3. Save the file. + +### Step 7: Restart Services + +Execute the following commands. + +```bash +kubectl -n kubesphere-devops-system rollout restart deploy devops-apiserver +``` + +```bash +kubectl -n kubesphere-system rollout restart deploy ks-console +``` + +## Create a SonarQube Token for a New Project + +You need a SonarQube token so that your pipeline can communicate with SonarQube as it runs. + +1. On the SonarQube console, click **Create new project**. + + ![sonarqube-create-project](/images/docs/v3.3/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/sonarqube-create-project.jpg) + +2. Enter a project key, such as `java-demo`, and click **Set Up**. + + ![jenkins-projet-key](/images/docs/v3.3/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/jenkins-projet-key.jpg) + +3. Enter a project name, such as `java-sample`, and click **Generate**. + + ![generate-a-token](/images/docs/v3.3/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/generate-a-token.jpg) + +4. After the token is created, click **Continue**. + + ![token-created](/images/docs/v3.3/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/token-created.jpg) + +5. Choose **Java** and **Maven** respectively. Copy the serial number within the green box in the image below, which needs to be added in the [Credentials](../../../devops-user-guide/how-to-use/devops-settings/credential-management/#create-credentials) section if it is to be used in pipelines. + + ![sonarqube-example](/images/docs/v3.3/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/sonarqube-example.jpg) + +## View Results on the KubeSphere Console + +After you [create a pipeline using the graphical editing panel](../../how-to-use/pipelines/create-a-pipeline-using-graphical-editing-panel/) or [create a pipeline using a Jenkinsfile](../../how-to-use/pipelines/create-a-pipeline-using-jenkinsfile/), you can view the result of code quality analysis. diff --git a/content/en/docs/v3.4/devops-user-guide/how-to-use/_index.md b/content/en/docs/v3.4/devops-user-guide/how-to-use/_index.md new file mode 100644 index 000000000..5821b0d3f --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/how-to-use/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Use DevOps" +weight: 11200 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/devops-user-guide/how-to-use/code-repositories/_index.md b/content/en/docs/v3.4/devops-user-guide/how-to-use/code-repositories/_index.md new file mode 100644 index 000000000..c1fc6a027 --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/how-to-use/code-repositories/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Code Repositories" +weight: 11230 + +_build: + render: false +--- \ No newline at end of file diff --git a/content/en/docs/v3.4/devops-user-guide/how-to-use/code-repositories/import-code-repositories.md b/content/en/docs/v3.4/devops-user-guide/how-to-use/code-repositories/import-code-repositories.md new file mode 100755 index 000000000..16a3f3f44 --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/how-to-use/code-repositories/import-code-repositories.md @@ -0,0 +1,98 @@ +--- +title: "Import a Code Repository" +keywords: 'Kubernetes, GitOps, KubeSphere, Code Repository' +description: 'Describe how to import a code repository on KubeSphere.' +linkTitle: "Import a Code Repository" +weight: 11231 +--- + +In KubeSphere 3.3, you can import a GitHub, GitLab, Bitbucket, or Git-based repository. The following describes how to import a GitHub repository. + +## Prerequisites + +- You have a workspace, a DevOps project and a user (`project-regular`) invited to the DevOps project with the `operator` role. If they are not ready yet, please refer to [Create Workspaces, Projects, Users and Roles](../../../../quick-start/create-workspace-and-project/). + +- You need to [enable the KubeSphere DevOps system](../../../../pluggable-components/devops/). + + +## Procedures + +1. Log in to the KubeSphere console as `project-regular`. In the navigation pane on the left, click **DevOps Projects**. + +2. On the **DevOps Projects** page, click the DevOps project you created. + +3. In the navigation pane on the left, click **Code Repositories**. + +4. On the **Code Repositories** page, click **Import**. + +5. In the **Import Code Repository** dialog box, enter a name of the code repository, and then select a GitHub repository. Optionally, you can set an alias and add description. + + The following table lists supported code repositories and parameters to set. + + + + + + + + + + + + + + + + + + + + + + + +
Code RepositoryParameter
GitHubCredential: Select the credential of the code repository.
GitLab +
    +
  • GitLab Server Address: Select the GitLab server address, and the default value is https:gitlab.com.
  • +
  • Project Group/Owner: Enter the GitLab username.
  • +
  • Credential: Select the credential of the code repository. +
  • Code Repository: Select the code repository.
  • +
+
Bitbucket +
    +
  • Bitbucket Server Address: Set the Bitbucket server address.
  • +
  • Credential: Select the credential of the code repository.
  • +
+
Git +
    +
  • Code Repository URL: Enter the URL of the code repository.
  • +
  • Credential: Select the credential of the code repository.
  • +
+
+ + {{< notice note >}} + + To use a private GitLab repository, please refer to [Create a Multi-branch Pipeline with GitLab-Step 4](../../../../devops-user-guide/how-to-use/pipelines/gitlab-multibranch-pipeline/). + + {{}} + +6. In **Credential**, click **Create Credential**. In the **Create Credential** dialog box, set the following parameters: + - **Name**: Enter a name of the credential, for example, `github-id`. + - **Type**: Optional values include **Username and password**, **SSH key**, **Access token**, and **kubeconfig**. In DevOps projects, **Username and password** is recommended. + - **Username**: The default username is `admin`. + - **Password/Token**: Enter your GitHub token. + - **Description**: Add description. + + {{< notice note >}} + + For more information about how to create a credential, please refer to [Credential Management](../../../../devops-user-guide/how-to-use/devops-settings/credential-management/). + + {{}} + +7. In the GitHub repositories that are displayed, select a repository, and click **OK**. +8. Click icon on the right of the imported code repository, and you can perform the following operations: + + - **Edit**: Edits the alias and description of the code repository and reselects a code repository. + - **Edit YAML**: Edits the YAML file of the code repository. + - **Delete**: Deletes the code repository. + diff --git a/content/en/docs/v3.4/devops-user-guide/how-to-use/continuous-deployments/_index.md b/content/en/docs/v3.4/devops-user-guide/how-to-use/continuous-deployments/_index.md new file mode 100644 index 000000000..659544b43 --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/how-to-use/continuous-deployments/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Continuous Deployments" +weight: 11220 + +_build: + render: false +--- \ No newline at end of file diff --git a/content/en/docs/v3.4/devops-user-guide/how-to-use/continuous-deployments/use-gitops-for-continous-deployment.md b/content/en/docs/v3.4/devops-user-guide/how-to-use/continuous-deployments/use-gitops-for-continous-deployment.md new file mode 100755 index 000000000..affd5a948 --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/how-to-use/continuous-deployments/use-gitops-for-continous-deployment.md @@ -0,0 +1,404 @@ +--- +title: "Use GitOps to Achieve Continuous Deployment of Applications" +keywords: 'Kubernetes, GitOps, KubeSphere, CI, CD' +description: 'Describe how to use GitOps for continuous deployment on KubeSphere.' +linkTitle: "Use GitOps to Achieve Continuous Deployment of Applications" +weight: 11221 +--- + +In KubeSphere 3.3, we introduce the GitOps concept, which is a way of implementing continuous deployment for cloud-native applications. The core component of GitOps is a Git repository that always stores applications and declarative description of the infrastructure for version control. With GitOps and Kubernetes, you can enable CI/CD pipelines to apply changes to any cluster, which ensures consistency in cross-cloud deployment scenarios. + +This section walks you through the process of deploying an application using a continuous deployment. +## Prerequisites + +- You have a workspace, a DevOps project and a user (**project-regular**) invited to the DevOps project with the **operator** role. If they are not ready yet, please refer to [Create Workspaces, Projects, Users and Roles](../../../../quick-start/create-workspace-and-project/). + +- You need to [enable the KubeSphere DevOps system](../../../../pluggable-components/devops/). + + +## Import a Code Repository + +1. Log in to the KubeSphere console as **project-regular**. In the left-side navigation pane, click **DevOps Projects**. + +2. On the **DevOps Projects** page, click the DevOps project you created. + +3. In the left-side navigation pane, click **Code Repositories**. + +4. On the **Code Repositories** page on the left, click **Import**. + +5. In the **Import Code Repository** dialog box, enter the name of code repository, for example, **open-podcasts**, and select a code repository. Optionally, you can set an alias and add description. + +6. In the **Select Code Repository** dialog box, click **Git**. In **Code Repository URL**, enter the URL of the code repository, for example, **https://github.com/kubesphere-sigs/open-podcasts**, and click **OK**. + + {{< notice note >}} + + As the imported code repository is a public repository, it is not necessary to create a credential. However, if you add a private repository, a credential is required. For more information about how to create a credential, please refer to [Credential Management](../../../../devops-user-guide/how-to-use/devops-settings/credential-management/). + + {{}} + +## Create a Continuous Deployment + +1. In the left-side navigation pane, click **Continuous Deployments**. + +2. On the **Continuous Deployments** page, click **Create**. + +3. On the **Basic Information** tab, enter a name of the continuous deployment, for example, **open-podcasts**, and choose a code repository. Then, click **Next**. Optionally, you can set an alias and add description. + +4. In the **Deployment Location** section of the **Deployment Settings** tab, configure the cluster and project for which the continuous deployment will be deployed. + +5. In the **Code Repository Settings** section, specify a branch or tag of the repository and the manifest file path. + + + + + + + + + + + + + + + + + + + + + + +
ParameterDescription
+
+

Revision

+
+
+
+

The commit ID, branch, or tag of the repository. For example, master, v1.2.0, 0a1b2c3, or HEAD.

+
+
+
+

Manifest File Path

+
+
+
+

The manifest file path. For example, config/default.

+
+
+ +6. In the **Sync Strategy** section, select **Auto Sync** or **Manual Sync** as needed. + + - **Auto Sync**: automatically syncs an application when it detects differences between the desired manifests in Git, and the live state in the cluster. The following table describes the parameters. + + + + + + + + + + + + + + + + + + + + + + +
ParameterDescription
+
+

Prune resources

+
+
+
+

If checked, it will delete resources that are no longer defined in Git. By default and as a safety mechanism, auto sync will not delete resources.

+
+
+
+

Self-heal

+
+
+
+

If checked, it will force the state defined in Git into the cluster when a deviation in the cluster is detected. By default, changes that are made to the live cluster will not trigger auto sync.

+
+
+ + - **Manual Sync**: manually triggers application synchronization according to the synchronization options set. The following table describes the parameters. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterDescription
+
+

Prune resources

+
+
+
+

If checked, it will delete resources that are no longer defined in Git. + By default and as a safety mechanism, manual sync will not delete resources, but mark the resource out-of-sync state.

+
+
+
+

Dry run

+
+
+
+

Preview apply without affecting the cluster.

+
+
+
+

Apply only

+
+
+
+

If checked, it will skip pre/post sync hooks and just run kubectl apply for application resources.

+
+
+
+

Force

+
+
+
+

If checked, it will use kubectl apply --force to sync resources.

+
+
+ + {{< notice note >}} + + To configure the preceding parameters, go to the list or details page of continous deployment, select **Sync** from the drop-down list, and then specify parameter values in the **Sync Resource** dialog box. + + {{}} + +7. In the **Sync Settings** section, configure parameters as needed. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterDescription
+
+

Skip schema validation

+
+
+
+

Disables kubectl validation. --validate=false is added when kubectl apply runs.

+
+
+
+

Auto create project

+
+
+
+

Automatically creates projects for application resources if the projects do not exist.

+
+
+
+

Prune last

+
+
+
+

Resource pruning happened as a final, implicit wave of a sync operation, after other resources have been deployed and become healthy.

+
+
+
+

Selective sync

+
+
+
+

Syncs only out-of-sync resources.

+
+
+ +8. In the **Prune Propagation Policy** section, select a policy as needed. + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterDescription
+
+

foreground

+
+
+
+

Deletes dependent resources first, and then deletes the owner resource.

+
+
+
+

background

+
+
+
+

Deletes the owner resource immediately, and then deletes the dependent resources in the background.

+
+
+
+

orphan

+
+
+
+

Deletes the dependent resources that remain orphaned after the owner resource is deleted.

+
+
+ +9. In the **Replace Resource** section, specify whether to replace the resources that already exist. + + + + If checked, the resources will be synced by **kubectl replace/create**. By default, the resources will be synced by **kubectl apply**. + + + +10. Click **Create**. The resource you create will appear in the list of continuous deployments. + +## View the Created Continuous Deployment + +1. On the **Continuous Deployments** page, view the created continuous deployment. The following table describes the parameters. + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ItemDescription
NameName of the continuous deployment.
Health StatusHealth status of the continuous deployment, which includes the following:
+
    +
  • Healthy: Resources are healthy.
  • +
  • Degraded: Resources are degraded.
  • +
  • Progressing: Resources are being synchronized. This is the default state.
  • +
  • Suspended: Resources have been suspended and are waiting to be resumed.
  • +
  • Unknown: The resource state is unknown.
  • +
  • Missing: Resources are missing.
Sync StatusSynchronization status of the continuous deployment, which includes the following:
+
    +
  • Synced: Resources have been synchronized.
  • +
  • Out of sync: The actual running state of resources is not as desired.
  • +
  • Unknown: The resource synchronization state is unknown.
Deployment LocationCluster and project where resources are deployed.
Update TimeTime when resources are updated.
+ +2. Click icon on the right of the continuous deployment, and you can perform the following: + - **Edit Information**: edits the alias and description. + - **Edit YAML**: edits the YAML file. + - **Sync**: triggers resources synchronization. + - **Delete**: deletes the continuous deployment. + + {{< notice warning >}} + + Deleting a continuous deployment also deletes resources associated with the continuous deployment. Therefore, exert caution when deleting the continuous deployment. + + {{}} + + +3. Click the created continuous deployment to go to its details page, where you can view the synchronization status and result. + +## Access the Created Application + +1. Go to the project where the continuous deployment resides, in the left-side navigation pane, click **Services**. + +2. On the **Services** page on the left, click icon on the right of the deployed application, and click **Edit External Access**. + +3. In **Access Mode**, select **NodePort**, and click **OK**. + +4. In the **External Access** column, check the exposed port, and access the application by **nodeIP: nodePort**. + + {{< notice note >}} + Before accessing the service, open the exposed port in security groups. + {{}} \ No newline at end of file diff --git a/content/en/docs/v3.4/devops-user-guide/how-to-use/devops-settings/_index.md b/content/en/docs/v3.4/devops-user-guide/how-to-use/devops-settings/_index.md new file mode 100644 index 000000000..334bea7ee --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/how-to-use/devops-settings/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "DevOps Settings" +weight: 11240 + +_build: + render: false +--- \ No newline at end of file diff --git a/content/en/docs/v3.4/devops-user-guide/how-to-use/devops-settings/add-cd-allowlist.md b/content/en/docs/v3.4/devops-user-guide/how-to-use/devops-settings/add-cd-allowlist.md new file mode 100644 index 000000000..0fdc74a62 --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/how-to-use/devops-settings/add-cd-allowlist.md @@ -0,0 +1,28 @@ +--- +title: "Add a Continuous Deployment Allowlist" +keywords: 'Kubernetes, GitOps, KubeSphere, CI/CD, Allowlist' +description: 'Describe how to add a continuous deployment allowlist on KubeSphere.' +linkTitle: "Add a Continuous Deployment Allowlist" +weight: 11243 +--- +In KubeSphere 3.3, you can set an allowlist so that only specific code repositories and deployment locations can be used for continuous deployment. + +## Prerequisites + +- You have a workspace, a DevOps project and a user (`project-regular`) invited to the DevOps project with the `operator` role. If they are not ready yet, please refer to [Create Workspaces, Projects, Users and Roles](../../../../quick-start/create-workspace-and-project/). + +- You need to [enable the KubeSphere DevOps system](../../../../pluggable-components/devops/). + +- You need to [import an code repository](../../../../devops-user-guide/how-to-use/code-repositories/import-code-repositories/). + +## Procedures + +1. Log in to the KubeSphere console as `project-regular`. In the navigation pane on the left, click **DevOps Projects**. + +2. On the **DevOps Projects** page, click the DevOps project you created. + +3. In the navigation pane on the left, choose **DevOps Project Settings > Basic Information**. + +4. In **Continuous Deployment Allowlist** on the right, click **Enable Allowlist**. + +5. In the **Edit Allowlist** dialog box, choose a code repository and the cluster and project where the code deployment will be deployed, and then click **OK**. You can click **Add** to add multiple code repositories and deployment locations. \ No newline at end of file diff --git a/content/en/docs/v3.4/devops-user-guide/how-to-use/devops-settings/credential-management.md b/content/en/docs/v3.4/devops-user-guide/how-to-use/devops-settings/credential-management.md new file mode 100644 index 000000000..48a8f955c --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/how-to-use/devops-settings/credential-management.md @@ -0,0 +1,93 @@ +--- +title: "Credential Management" +keywords: 'Kubernetes, Docker, Credential, KubeSphere, DevOps' +description: 'Create credentials so that your pipelines can communicate with third-party applications or websites.' +linkTitle: "Credential Management" +weight: 11241 +--- + +Credentials are objects containing sensitive information, such as usernames and passwords, SSH keys, and tokens. When a KubeSphere DevOps pipeline is running, it interacts with objects in external environments to perform a series of tasks, including pulling code, pushing and pulling images, and running scripts. During this process, credentials need to be provided accordingly while they do not appear explicitly in the pipeline. + +A DevOps project user with necessary permissions can configure credentials for Jenkins pipelines. Once the user adds or configures these credentials in a DevOps project, they can be used in the DevOps project to interact with third-party applications. + +Currently, you can create the following 4 types of credentials in a DevOps project: + +- **Username and password**: Username and password which can be handled as separate components or as a colon-separated string in the format `username:password`, such as accounts of GitHub and GitLab. +- **SSH key**: Username with a private key, an SSH public/private key pair. +- **Access token**: a token with certain access. +- **kubeconfig**: It is used to configure cross-cluster authentication. + +This tutorial demonstrates how to create and manage credentials in a DevOps project. For more information about how credentials are used, see [Create a Pipeline Using a Jenkinsfile](../../../../devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-jenkinsfile/) and [Create a Pipeline Using Graphical Editing Panels](../../../../devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-graphical-editing-panel/). + +## Prerequisites + +- You have enabled [KubeSphere DevOps System](../../../../pluggable-components/devops/). +- You have a workspace, a DevOps project and a user (`project-regular`) invited to the DevOps project with the `operator` role. If they are not ready yet, see [Create Workspaces, Projects, Users and Roles](../../../../quick-start/create-workspace-and-project/). + +## Create a Credential + +Log in to the console of KubeSphere as `project-regular`. Navigate to your DevOps project, select **Credentials** and click **Create**. +1. Log in to the console of KubeSphere as `project-regular`. + +2. Navigate to your DevOps project. On the navigation pane on the left, choose **DevOps Project Settings > Credentials**. + +3. In the **Credentials** area on the right, click **Create**. + +4. On the **Create Credential** dialog box, enter the credential name and choose the credetial type. Parameters vary depneding on the type you select. For more information, refer to the following. +### Create a username and password credential + +To set a GitHub credential, you need to set the following parameters: + + - **Name**: Set a credential name, for example, `github-id`. + - **Type**: Select **Username and password**. + - **Username**: Enter your GitHub username. + - **Password/Token**: Enter your GitHub token. + - **Description**: Decribe the credential. + +{{< notice note >}} + +- Since August, 2021, GitHub has announced that it would require two factor authentication for users who contribute code on its service. Therefore, if you want to create a GitHub credential, you need to enter your GitHub token, instead of the password. For details about how to create a token, please refer to [Creating a personal access token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token). + +- If there are any special characters such as `@` and `$` in your account or password, they can cause errors as a pipeline runs because they may not be recognized. In this case, you need to encode your account or password on some third-party websites first, such as [urlencoder](https://www.urlencoder.org/). After that, copy and paste the output for your credential information. + +{{}} +### Create an SSH key credential + +You need to set the following parameters: + +- **Name**: Set a credential name. +- **Type**: Select **SSH key**. +- **Username**: Enter your username. +- **Private Key**: Enter the SSH key. +- **Passphrase**: Enter the passphrase. For enhanced security, you are avised to set this parameter. +- **Description**: Decribe the credential. + +### Create an access token credential + +You need to set the following parameters: + +- **Name**: Set a credential name. +- **Type**: Select **Access token**. +- **Access token**: Enter the access token. +- **Description**: Decribe the credential. + +### Create a kubeconfig credential + +You need to set the following parameters: + +- **Name**: Set a credential name, for example, `demo-kubeconfig`. +- **Type**: Select **kubeconfig**. +- **Content**: The system automatically fills in the content when you access the current Kubernetes cluster. and you do not need to change it. However, if you are accessing other clusters, you may need change the content of kubeconfig. +- **Description**: Decribe the credential. + +{{< notice info >}} + +A file that is used to configure access to clusters is called a kubeconfig file. This is a generic way of referring to configuration files. For more information, see [the official documentation of Kubernetes](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/). + +{{}} + +## View and Manage Credentials + +1. Click any of them to go to its details page, where you can see account details and all the events related to the credentials. + +2. You can also edit or delete credentials on this page. Note that when you edit credentials, KubeSphere does not display the existing username or password information. The previous one will be overwritten if you enter a new username and password. \ No newline at end of file diff --git a/content/en/docs/v3.4/devops-user-guide/how-to-use/devops-settings/role-and-member-management.md b/content/en/docs/v3.4/devops-user-guide/how-to-use/devops-settings/role-and-member-management.md new file mode 100644 index 000000000..4f63d7185 --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/how-to-use/devops-settings/role-and-member-management.md @@ -0,0 +1,76 @@ +--- +title: "Role and Member Management In Your DevOps project" +keywords: 'Kubernetes, KubeSphere, DevOps, role, member' +description: 'Create and manage roles and members in DevOps projects.' +linkTitle: "Role and Member Management" +weight: 11242 +--- + +This guide demonstrates how to manage roles and members in your DevOps project. + +In DevOps project scope, you can grant the following resources' permissions to a role: + +- Pipelines +- Credentials +- DevOps Settings +- Access Control + +## Prerequisites + +At least one DevOps project has been created, such as `demo-devops`. Besides, you need a user of the `admin` role (for example, `devops-admin`) at the DevOps project level. + +## Built-in Roles + +In **DevOps Project Roles**, there are three available built-in roles as shown below. Built-in roles are created automatically by KubeSphere when a DevOps project is created and they cannot be edited or deleted. + +| Built-in Roles | Description | +| ------------------ | ------------------------------------------------------------ | +| viewer | The viewer who can view all resources in the DevOps project. | +| operator | The normal member in a DevOps project who can create pipelines and credentials in the DevOps project. | +| admin | The administrator in the DevOps project who can perform any action on any resource. It gives full control over all resources in the DevOps project. | + +## Create a DevOps Project Role + +1. Log in to the console as `devops-admin` and select a DevOps project (for example, `demo-devops`) on the **DevOps Projects** page. + + {{< notice note >}} + + The account `devops-admin` is used as an example. As long as the account you are using is granted a role including the permissions of **Member Viewing**, **Role Management** and **Role Viewing** in **Access Control** at DevOps project level, it can create a DevOps project role. + + {{}} + +2. Go to **DevOps Project Roles** in **DevOps Project Settings**, click **Create** and set a **Name**. In this example, a role named `pipeline-creator` will be created. Click **Edit Permissions** to continue. + +3. In **Pipeline Management**, select the permissions that you want this role to contain. For example, **Pipeline Management** and **Pipeline Viewing** are selected for this role. Click **OK** to finish. + + {{< notice note >}} + + **Depends on** means the major permission (the one listed after **Depends on**) needs to be selected first so that the affiliated permission can be assigned. + + {{}} + +4. Newly created roles will be listed in **DevOps Project Roles**. You can click icon on the right to edit it. + + {{< notice note >}} + + The role of `pipeline-creator` is only granted **Pipeline Management** and **Pipeline Viewing**, which may not satisfy your need. This example is only for demonstration purpose. You can create customized roles based on your needs. + + {{}} + +## Invite a New Member + +1. In **DevOps Project Settings**, select **DevOps Project Members** and click **Invite**. + +2. Click icon to invite a user to the DevOps project. Grant the role of `pipeline-creator` to the account. + + {{< notice note >}} + + The user must be invited to the DevOps project's workspace first. + + {{}} + +3. After you add a user to the DevOps project, click **OK**. In **DevOps Project Members**, you can see the newly invited member listed. + +4. You can also change the role of an existing member by editing it or remove it from the DevOps project. + + diff --git a/content/en/docs/v3.4/devops-user-guide/how-to-use/devops-settings/set-ci-node.md b/content/en/docs/v3.4/devops-user-guide/how-to-use/devops-settings/set-ci-node.md new file mode 100644 index 000000000..68b4d2791 --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/how-to-use/devops-settings/set-ci-node.md @@ -0,0 +1,49 @@ +--- +title: "Set a CI Node for Dependency Caching" +keywords: 'Kubernetes, docker, KubeSphere, Jenkins, cicd, pipeline, dependency cache' +description: 'Configure a node or a group of nodes specifically for continuous integration (CI) to speed up the building process in a pipeline.' +linkTitle: "Set a CI Node for Dependency Caching" +weight: 11245 +--- + +Generally, different dependencies need to be pulled as applications are being built. This may cause some issues such as long pulling time and network instability, further resulting in build failures. To provide your pipeline with a more enabling and stable environment, you can configure a node or a group of nodes specifically for continuous integration (CI). These CI nodes can speed up the building process by using caches. + +This tutorial demonstrates how to set CI nodes so that KubeSphere schedules tasks of pipelines and S2I/B2I builds to these nodes. + +## Prerequisites + +You need a user granted a role including the permission of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the permission and assign it to a user. + +## Label a CI Node + +1. Click **Platform** in the upper-left corner and select **Cluster Management**. + +2. If you have enabled the [multi-cluster feature](../../../../multicluster-management/) with Member clusters imported, you can select a specific cluster to view its nodes. If you have not enabled the feature, refer to the next step directly. + +3. Navigate to **Cluster Nodes** under **Nodes**, where you can see existing nodes in the current cluster. + +4. Select a node from the list to run CI tasks. Click the node name to go to its details page. Click **More** and select **Edit Labels**. + +5. In the displayed dialog box, you can see a label with the key `node-role.kubernetes.io/worker`. Enter `ci` for its value and click **Save**. + + {{< notice note >}} + + You can also click **Add** to add new labels based on your needs. + + {{}} + +## Add a Taint to a CI Node + +Basically, pipelines and S2I/B2I workflows will be scheduled to this node according to [node affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity). If you want to make the node a dedicated one for CI tasks, which means other workloads are not allowed to be scheduled to it, you can add a [taint](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) to it. + +1. Click **More** and select **Edit Taints**. + +2. Click **Add Taint** and enter a key `node.kubernetes.io/ci` without specifying a value. You can choose `Prevent scheduling`, `Prevent scheduling if possible`, or `Prevent scheduling and evict existing Pods` based on your needs. + +3. Click **Save**. KubeSphere will schedule tasks according to the taint you set. You can go back to work on your DevOps pipeline now. + + {{< notice tip >}} + + This tutorial also covers the operation related to node management. For detailed information, see [Node Management](../../../../cluster-administration/nodes/). + + {{}} diff --git a/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/_index.md b/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/_index.md new file mode 100644 index 000000000..953661d62 --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Pipelines" +weight: 11210 + +_build: + render: false +--- \ No newline at end of file diff --git a/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/choose-jenkins-agent.md b/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/choose-jenkins-agent.md new file mode 100644 index 000000000..b9f1f58bd --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/choose-jenkins-agent.md @@ -0,0 +1,134 @@ +--- +title: "Choose Jenkins Agent" +keywords: 'Kubernetes, KubeSphere, Docker, DevOps, Jenkins, Agent' +description: 'Specify the Jenkins agent and use the built-in podTemplate for your pipeline.' +linkTitle: "Choose Jenkins Agent" +weight: 112190 +--- + +The `agent` section specifies where the entire Pipeline, or a specific stage, will execute in the Jenkins environment depending on where the `agent` section is placed. The section must be defined at the upper-level inside the `pipeline` block, but stage-level usage is optional. For more information, see [the official documentation of Jenkins](https://www.jenkins.io/doc/book/pipeline/syntax/#agent). + +## Built-in podTemplate + +A podTemplate is a template of a Pod that is used to create agents. Users can define a podTemplate to use in the Kubernetes plugin. + +As a pipeline runs, every Jenkins agent Pod must have a container named `jnlp` for communications between the Jenkins controller and Jenkins agent. In addition, users can add containers in the podTemplate to meet their own needs. They can choose to use their own Pod YAML to flexibly control the runtime, and the container can be switched by the `container` command. Here is an example. + +```groovy +pipeline { + agent { + kubernetes { + //cloud 'kubernetes' + label 'mypod' + yaml """ +apiVersion: v1 +kind: Pod +spec: + containers: + - name: maven + image: maven:3.3.9-jdk-8-alpine + command: ['cat'] + tty: true +""" + } + } + stages { + stage('Run maven') { + steps { + container('maven') { + sh 'mvn -version' + } + } + } + } +} +``` + +At the same time, KubeSphere has some built-in podTemplates, so that users can avoid writing YAML files, greatly reducing learning costs. + +In the current version, there are 4 types of built-in podTemplates, i.e. `base`, `nodejs`, `maven` and `go`. KubeSphere also provides an isolated Docker environment in Pods. + +You can use the built-in podTemplate by specifying the label for an agent. For example, to use the nodejs podTemplate, you can set the label to `nodejs` when creating the Pipeline, as shown in the example below. + +![jenkins-agent](/images/docs/v3.3/devops-user-guide/using-devops/jenkins-agent/jenkins-agent.jpg) + +```groovy +pipeline { + agent { + node { + label 'nodejs' + } + } + + stages { + stage('nodejs hello') { + steps { + container('nodejs') { + sh 'yarn -v' + sh 'node -v' + sh 'docker version' + sh 'docker images' + } + } + } + } +} +``` + +### podTemplate base + +| Name | Type / Version | +| --- | --- | +|Jenkins Agent Label | base | +|Container Name | base | +| OS| centos-7 | +|Docker| 18.06.0| +|Helm | 2.11.0 | +|Kubectl| Stable release| +|Built-in tools | unzip, which, make, wget, zip, bzip2, git | + + +### podTemplate nodejs + +| Name | Type / Version | +| --- | --- | +|Jenkins Agent Label | nodejs | +|Container Name | nodejs | +| OS| centos-7 | +|Node | 9.11.2 | +|Yarn | 1.3.2 | +| Docker | 18.06.0 | +| Helm | 2.11.0 | +|Kubectl | Stable release| +|Built-in tools| unzip, which, make, wget, zip, bzip2, git | + + +### podTemplate maven + +| Name | Type / Version | +| --- | --- | +| Jenkins Agent Label | maven | +| Container Name | maven | +| OS| centos-7 | +| Jdk | openjdk-1.8.0 | +| Maven | 3.5.3| +| Docker| 18.06.0 | +| Helm | 2.11.0 | +| Kubectl| Stable release | +| Built-in tools | unzip, which, make, wget, zip, bzip2, git | + + +### podTemplate go + +| Name | Type / Version | +| --- | --- | +| Jenkins Agent Label | go | +| Container Name | go | +| OS| centos-7 | +| Go | 1.11 | +| GOPATH | /home/jenkins/go | +| GOROOT | /usr/local/go | +| Docker | 18.06.0 | +| Helm | 2.11.0 | +| Kubectl | Stable release | +| Built-in tools | unzip, which, make, wget, zip, bzip2, git | diff --git a/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-graphical-editing-panel.md b/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-graphical-editing-panel.md new file mode 100644 index 000000000..5640d4f8a --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-graphical-editing-panel.md @@ -0,0 +1,389 @@ +--- +title: "Create a Pipeline Using Graphical Editing Panels" +keywords: 'KubeSphere, Kubernetes, jenkins, cicd, graphical pipelines' +description: 'Learn how to create and run a pipeline by using the graphical editing panel of KubeSphere.' +linkTitle: 'Create a Pipeline Using Graphical Editing Panels' +weight: 11211 +--- + +A graphical editing panel in KubeSphere contains all the necessary operations used in Jenkins [stages](https://www.jenkins.io/doc/book/pipeline/#stage) and [steps](https://www.jenkins.io/doc/book/pipeline/#step). You can directly define these stages and steps on the interactive panel without creating any Jenkinsfile. + +This tutorial demonstrates how to create a pipeline through graphical editing panels in KubeSphere. During the whole process, you do not need to create any Jenkinsfile manually as KubeSphere will automatically generate one based on your settings on the editing panels. When the pipeline runs successfully, it creates a Deployment and a Service accordingly in your development environment and pushes an image to Docker Hub. + +## Prerequisites + +- You need to [enable the KubeSphere DevOps System](../../../../pluggable-components/devops/). +- You need to have a [Docker Hub](https://www.dockerhub.com/) account. +- You need to create a workspace, a DevOps project, and a user (`project-regular`). This user must be invited to the DevOps project with the `operator` role. See [Create Workspaces, Projects, Users and Roles](../../../../quick-start/create-workspace-and-project/) if they are not ready. +- Set CI dedicated nodes to run the pipeline. For more information, see [Set CI Node for Dependency Cache](../../../../devops-user-guide/how-to-use/devops-settings/set-ci-node/). +- Configure your email server for pipeline notifications (optional). For more information, see [Set Email Server for KubeSphere Pipelines](../../../../devops-user-guide/how-to-use/pipelines/jenkins-email/). +- Configure SonarQube to include code analysis as part of the pipeline (optional). For more information, see [Integrate SonarQube into Pipelines](../../../../devops-user-guide/how-to-integrate/sonarqube/). + +## Pipeline Overview + +This example pipeline includes the following six stages. + +![Pipeline](https://pek3b.qingstor.com/kubesphere-docs/png/20190516091714.png#align=left&display=inline&height=1278&originHeight=1278&originWidth=2190&search=&status=done&width=2190) + +{{< notice note >}} + +- **Stage 1. Checkout SCM**: Pull source code from a GitHub repository. +- **Stage 2. Unit test**: It will not proceed with the next stage until the test is passed. +- **Stage 3. Code analysis**: Configure SonarQube for static code analysis. +- **Stage 4. Build and push**: Build an image and push it to Docker Hub with the tag `snapshot-$BUILD_NUMBER`, the `$BUILD_NUMBER` of which is the record serial number in the pipeline’s activity list. +- **Stage 5. Artifacts**: Generate an artifact (JAR package) and save it. +- **Stage 6. Deploy to DEV**: Create a Deployment and a Service in the development environment. It requires review in this stage. An email notification will be sent after the Deployment is successful. + +{{}} + +## Hands-on Lab + +### Step 1: Create credentials + +1. Log in to the KubeSphere console as `project-regular`. Go to your DevOps project and create the following credentials in **Credentials** under **DevOps Project Settings**. For more information about how to create credentials, see [Credential Management](../../../../devops-user-guide/how-to-use/devops-settings/credential-management/). + + {{< notice note >}} + + If there are any special characters such as `@` and `$` in your account or password, they can cause errors as a pipeline runs because they may not be recognized. In this case, you need to encode your account or password on some third-party websites first, such as [urlencoder](https://www.urlencoder.org/). After that, copy and paste the output for your credential information. + + {{}} + + | Credential ID | Type | Where to use | + | --------------- | --------------------- | ------------ | + | dockerhub-id | Username and password | Docker Hub | + | demo-kubeconfig | kubeconfig | Kubernetes | + +2. You need to create an additional credential ID (`sonar-token`) for SonarQube, which is used in stage 3 (Code analysis) mentioned above. Refer to [Create SonarQube Token for New Project](../../../../devops-user-guide/how-to-integrate/sonarqube/#create-sonarqube-token-for-new-project) to enter your SonarQube token in the **Token** field for a credential of the **Access token** type. Click **OK** to finish. + +3. In total, you have three credentials in the list. + +### Step 2: Create a project + +In this tutorial, the example pipeline will deploy the [sample](https://github.com/kubesphere/devops-maven-sample/tree/sonarqube) app to a project. Hence, you must create the project (for example, `kubesphere-sample-dev`) in advance. The Deployment and Service of the app will be created automatically in the project once the pipeline runs successfully. + +You can use the user `project-admin` to create the project. Besides, this user is also the reviewer of the CI/CD pipeline. Make sure the account `project-regular` is invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../../quick-start/create-workspace-and-project/). + +### Step 3: Create a pipeline + +1. Make sure you have logged in to KubeSphere as `project-regular`, and then go to your DevOps project. Click **Create** on the **Pipelines** page. + +2. In the displayed dialog box, name it `graphical-pipeline` and click **Next**. + +3. On the **Advanced Settings** page, click **Add** to add three string parameters as follows. These parameters will be used in the Docker command of the pipeline. Click **Create** when you finish adding. + + | Parameter Type | Name | Value | Description | + | -------------- | ------------------- | --------------- | ------------------------------------------------------------ | + | String | REGISTRY | `docker.io` | This is the image registry address. This example uses `docker.io`. | + | String | DOCKERHUB_NAMESPACE | Docker ID | You Docker Hub account or the organization name under the account. | + | String | APP_NAME | `devops-sample` | The app name. | + + {{< notice note >}} + + For other fields, use the default values directly or refer to [Pipeline Settings](../pipeline-settings/) to customize the configuration. + + {{}} + +4. The pipeline created is displayed in the list. + +### Step 4: Edit the pipeline + +Click the pipeline to go to its details page. To use graphical editing panels, click **Edit Pipeline** under the tab **Task Status**. In the displayed dialog box, click **Custom Pipeline**. This pipeline consists of six stages. Follow the steps below to set each stage. + +{{< notice note >}} + +- The pipeline details page shows **Sync Status**. It reflects the synchronization result between KubeSphere and Jenkins, and you can see the **Successful** icon if the synchronization is successful. You can also click **Edit Jenkinsfile** to create a Jenkinsfile manually for your pipeline. + +- You can also [use the built-in pipeline templates](../use-pipeline-templates/) provided by KubeSphere. + +{{}} + +#### Stage 1: Pull source code (Checkout SCM) + +A graphical editing panel includes two areas - **canvas** on the left and **content** on the right. It automatically generates a Jenkinsfile based on how you configure different stages and steps, which is much more user-friendly for developers. + +{{< notice note >}} + +Pipelines include [declarative pipelines](https://www.jenkins.io/doc/book/pipeline/syntax/#declarative-pipeline) and [scripted pipelines](https://www.jenkins.io/doc/book/pipeline/syntax/#scripted-pipeline). Currently, you can create declarative pipelines through the panel. For more information about pipeline syntax, see [Jenkins Documentation](https://jenkins.io/doc/book/pipeline/syntax/). + +{{}} + +1. On the graphical editing panel, select **node** from the **Type** drop-down list and select **maven** from the **Label** drop-down list. + + {{< notice note >}} + + `agent` is used to define the execution environment. The `agent` directive tells Jenkins where and how to execute the pipeline. For more information, see [Choose Jenkins Agent](../choose-jenkins-agent/). + + {{}} + + ![graphical-panel](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/graphical-panel.png) + +2. To add a stage, click the plus icon on the left. Click the box above the **Add Step** area and set a name (for example, `Checkout SCM`) for the stage in the field **Name** on the right. + + ![edit-panel](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/edit-panel.png) + +3. Click **Add Step**. Select **git** from the list as the example code is pulled from GitHub. In the displayed dialog box, fill in the required field. Click **OK** to finish. + + - **URL**. Enter the GitHub repository address `https://github.com/kubesphere/devops-maven-sample.git`. Note that this is an example and you need to use your own repository address. + - **Name**. You do not need to enter the Credential ID for this tutorial. + - **Branch**. It defaults to the master branch if you leave it blank. Enter `sonarqube` or leave it blank if you do not need the code analysis stage. + + ![enter-repo-url](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/enter-repo-url.png) + +4. The first stage is now set. + + ![first-stage-set](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/first-stage-set.png) + +#### Stage 2: Unit test + +1. Click the plus icon on the right of stage 1 to add a new stage to perform a unit test in the container. Name it `Unit Test`. + + ![unit-test](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/unit-test.png) + +2. Click **Add Step** and select **container** from the list. Name it `maven` and then click **OK**. + + ![container](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/container.png) + +3. Click **Add Nesting Steps** to add a nested step under the `maven` container. Select **shell** from the list and enter the following command in the command line. Click **OK** to save it. + + ```shell + mvn clean -gs `pwd`/configuration/settings.xml test + ``` + + {{< notice note >}} + + You can specify a series of [steps](https://www.jenkins.io/doc/book/pipeline/syntax/#steps) to be executed in a given stage directive on the graphical editing panel. + + {{}} + + +#### Stage 3: Code analysis (optional) + +This stage uses SonarQube to test your code. You can skip this stage if you do not need the analysis. + +1. Click the plus icon on the right of the `Unit Test` stage to add a stage for SonarQube code analysis in the container. Name it `Code Analysis`. + + ![code-analysis-stage](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/code-analysis-stage.png) + +2. Click **Add Step** under **Task** in **Code Analysis** and select **container**. Name it `maven` and click **OK**. + + ![maven-container](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/maven-container.png) + +3. Click **Add Nesting Steps** under the `maven` container to add a nested step. Click **withCredentials** and select the SonarQube token (`sonar-token`) from the **Name** list. Enter `SONAR_TOKEN` for **Text Variable**, then click **OK**. + + ![sonarqube-credentials](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/sonarqube-credentials.png) + +4. Under the **withCredentials** step, click **Add Nesting Steps** to add a nested step for it. + + ![nested-step](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/nested-step.png) + +5. Click **withSonarQubeEnv**. In the displayed dialog box, do not change the default name `sonar` and click **OK** to save it. + + ![sonar](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/sonar.png) + +6. Under the **withSonarQubeEnv** step, click **Add Nesting Steps** to add a nested step for it. + + ![add-nested-step](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/add-nested-step.png) + +7. Click **shell** and enter the following command in the command line for the sonarqube branch and authentication. Click **OK** to finish. + + ```shell + mvn sonar:sonar -Dsonar.login=$SONAR_TOKEN + ``` + + ![sonarqube-shell-new](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/sonarqube-shell-new.png) + +8. Click **Add Nesting Steps** (the third one) for the **container** step directly and select **timeout**. Enter `1` for time and select **Hours** for unit. Click **OK** to finish. + + ![add-nested-step-2](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/add-nested-step-2.png) + + ![timeout](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/timeout.png) + +9. Click **Add Nesting Steps** for the **timeout** step and select **waitForQualityGate**. Select **Start the follow-up task after the inspection** in the displayed dialog box. Click **OK** to save it. + + ![waitforqualitygate](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/waitforqualitygate.png) + + ![sonar-ready](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/sonar-ready.png) + +#### Stage 4: Build and push the image + +1. Click the plus icon on the right of the previous stage to add a new stage to build and push images to Docker Hub. Name it `Build and Push`. + + ![build-and-push-image](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/build-and-push-image.png) + +2. Click **Add Step** under **Task** and select **container**. Name it `maven`, and then click **OK**. + + ![maven-set](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/maven-set.png) + +3. Click **Add Nesting Steps** under the `maven` container to add a nested step. Select **shell** from the list, and enter the following command in the displayed dialog box. Click **OK** to finish. + + ```shell + mvn -Dmaven.test.skip=true clean package + ``` + + ![nested-step-maven](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/nested-step-maven.png) + +4. Click **Add Nesting Steps** again and select **shell**. Enter the following command in the command line to build a Docker image based on the [Dockerfile](https://github.com/kubesphere/devops-maven-sample/blob/sonarqube/Dockerfile-online). Click **OK** to confirm. + + {{< notice note >}} + + DO NOT omit the dot `.` at the end of the command. + + {{}} + + ```shell + docker build -f Dockerfile-online -t $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BUILD_NUMBER . + ``` + + ![shell-command](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/shell-command.png) + +5. Click **Add Nesting Steps** again and select **withCredentials**. Fill in the following fields in the displayed dialog box. Click **OK** to confirm. + + - **Credential Name**: Select the Docker Hub credentials you created, such as `dockerhub-id`. + - **Password Variable**: Enter `DOCKER_PASSWORD`. + - **Username Variable**: Enter `DOCKER_USERNAME`. + + {{< notice note >}} + + For security reasons, the account information displays as variables in the script. + + {{}} + + ![docker-credential](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/docker-credential.png) + +6. Click **Add Nesting Steps** (the first one) in the **withCredentials** step created above. Select **shell** and enter the following command in the displayed dialog box, which is used to log in to Docker Hub. Click **OK** to confirm. + + ```shell + echo "$DOCKER_PASSWORD" | docker login $REGISTRY -u "$DOCKER_USERNAME" --password-stdin + ``` + + ![login-docker-command](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/login-docker-command.png) + +7. Click **Add nesting steps** in the **withCredentials** step. Select **shell** and enter the following command to push the SNAPSHOT image to Docker Hub. Click **OK** to finish. + + ```shell + docker push $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BUILD_NUMBER + ``` + + ![push-snapshot-to-docker](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/push-snapshot-to-docker.png) + +#### Stage 5: Generate the artifact + +1. Click the plus icon on the right of the **Build and Push** stage to add a new stage to save artifacts and name it `Artifacts`. This example uses a JAR package. + + ![add-artifact-stage](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/add-artifact-stage.png) + +2. With the **Artifacts** stage selected, click **Add Step** under **Task** and select **archiveArtifacts**. Enter `target/*.jar` in the displayed dialog box, which is used to set the archive path of artifacts in Jenkins. Click **OK** to finish. + + ![artifact-info](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/artifact-info.png) + +#### Stage 6: Deploy to development + +1. Click the plus icon on the right of the stage **Artifacts** to add the last stage. Name it `Deploy to Dev`. This stage is used to deploy resources to your development environment (namely, the project of `kubesphere-sample-dev`). + + ![develop-to-dev](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/develop-to-dev.png) + +2. Click **Add Step** under the **Deploy to Dev** stage. Select **input** from the list and enter `@project-admin` in the **Message** field, which means the account `project-admin` will review this pipeline when it runs to this stage. Click **OK** to save it. + + ![input-message](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/input-message.png) + + {{< notice note >}} + + In KubeSphere 3.3, the account that can run a pipeline will be able to continue or terminate the pipeline if there is no reviewer specified. Pipeline creators and the account you specify will be able to continue or terminate a pipeline. + + {{}} + +3. Click **Add Step** under the **Deploy to Dev** stage again. Select **container** from the list, name it `maven`, and click **OK**. + +4. Click **Add Nesting Steps** in the `maven` container step. Select **withCredentials** from the list, fill in the following fields in the displayed dialog box, and click **OK**. + + - **Credential Name**: Select the kubeconfig credential you created, such as `demo-kubeconfig`. + - **Kubeconfig Variable**: Enter `KUBECONFIG_CONTENT`. + +5. Click **Add Nesting Steps** in the **withCredentials** step. Select **shell** from the list, enter the following commands in the displayed dialog box, and click **OK**. + + ```shell + mkdir ~/.kube + echo "$KUBECONFIG_CONTENT" > ~/.kube/config + envsubst < deploy/dev-ol/devops-sample-svc.yaml | kubectl apply -f - + envsubst < deploy/dev-ol/devops-sample.yaml | kubectl apply -f - + ``` + +6. If you want to receive email notifications when the pipeline runs successfully, click **Add Step** and select **mail** to add email information. Note that configuring the email server is optional, which means you can still run your pipeline if you skip this step. + + {{< notice note >}} + + For more information on configuring your email server, see [Set Email Server for KubeSphere Pipelines](../jenkins-email/). + + {{}} + +7. When you finish the steps above, click **Save** in the lower-right corner. You can see the pipeline now has a complete workflow with each stage clearly listed on the pipeline. When you define a pipeline using the graphical editing panel, KubeSphere automatically creates its corresponding Jenkinsfile. Click **Edit Jenkinsfile** to view the Jenkinsfile. + + {{< notice note >}} + + On the **Pipelines** page, you can click icon on the right side of the pipeline and then select **Copy** to create a copy of it. If you need to concurrently run multiple pipelines that don't contain multiple branches, you can select all of these pipelines and then click **Run** to run them in a batch. + + {{}} + +### Step 5: Run a pipeline + +1. You need to manually run the pipeline that is created through the graphical editing panel. Click **Run**, and you can see three string parameters defined in Step 3. Click **OK** to run the pipeline. + + ![run-pipeline](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/run-pipeline.png) + +2. To see the status of a pipeline, go to the **Run Records** tab and click the record you want to view. + +3. Wait for a while and the pipeline stops at the stage **Deploy to Dev** if it runs successfully. As the reviewer of the pipeline, `project-admin` needs to approve it before resources are deployed to the development environment. + + ![pipeline-successful](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/pipeline-successful.jpg) + +4. Log out of KubeSphere and log back in to the console as `project-admin`. Go to your DevOps project and click the pipeline `graphical-pipeline`. Under the **Run Records** tab, click the record to be reviewed. To approve the pipeline, click **Proceed**. + +### Step 6: View pipeline details + +1. Log in to the console as `project-regular`. Go to your DevOps project and click the pipeline `graphical-pipeline`. Under the **Run Records** tab, click the record marked with **Successful** under **Status**. + +2. If everything runs successfully, you can see that all stages are completed. + +3. Click **View Logs** in the upper-right corner to inspect all the logs. Click each stage to see detailed logs of it. You can debug any problems based on the logs which also can be downloaded locally for further analysis. + +### Step 7: Download the artifact + +Click the **Artifacts** tab and then click the icon on the right to download the artifact. + +![download-artifact](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/download-artifact.png) + +### Step 8: View code analysis results + +On the **Code Check** page, view the code analysis result of this example pipeline, which is provided by SonarQube. If you do not configure SonarQube in advance, this section is not available. For more information, see [Integrate SonarQube into Pipelines](../../../how-to-integrate/sonarqube/). + +![sonarqube-result-detail](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/sonarqube-result-detail.png) + +### Step 9: Verify Kubernetes resources + +1. If every stage of the pipeline runs successfully, a Docker image will be automatically built and pushed to your Docker Hub repository. Ultimately, the pipeline automatically creates a Deployment and a Service in the project you set beforehand. + +2. Go to the project (for example, `kubesphere-sample-dev` in this tutorial), click **Workloads** under **Application Workloads**, and you can see the Deployment appears in the list. + +3. In **Services**, you can find the port number of the example Service is exposed through a NodePort. To access the Service, visit `:`. + + {{< notice note >}} + + You may need to configure port forwarding rules and open the port in your security group before you access the Service. + + {{}} + +4. Now that the pipeline has run successfully, an image will be pushed to Docker Hub. Log in to Docker Hub and check the result. + + ![dockerhub-image](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/dockerhub-image.png) + +5. The app is named `devops-sample` as it is the value of `APP_NAME` and the tag is the value of `SNAPSHOT-$BUILD_NUMBER`. `$BUILD_NUMBER` is the serial number of a record under the **Run Records** tab. + +6. If you set the email server and add the email notification step in the final stage, you can also receive the email message. + +## See Also + +[Create a Pipeline Using a Jenkinsfile](../create-a-pipeline-using-jenkinsfile/) + +[Choose Jenkins Agent](../choose-jenkins-agent/) + +[Set Email Server for KubeSphere Pipelines](../jenkins-email/) \ No newline at end of file diff --git a/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-jenkinsfile.md b/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-jenkinsfile.md new file mode 100644 index 000000000..1c401cfa2 --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-jenkinsfile.md @@ -0,0 +1,281 @@ +--- +title: "Create a Pipeline Using a Jenkinsfile" +keywords: 'KubeSphere, Kubernetes, Docker, Spring Boot, Jenkins, DevOps, CI/CD, Pipeline' +description: "Learn how to create and run a pipeline by using an example Jenkinsfile." +linkTitle: "Create a Pipeline Using a Jenkinsfile" +weight: 11212 +--- + +A Jenkinsfile is a text file that contains the definition of a Jenkins pipeline and is checked into source control. As it stores the entire workflow as code, it underpins the code review and iteration process of a pipeline. For more information, see [the official documentation of Jenkins](https://www.jenkins.io/doc/book/pipeline/jenkinsfile/). + +This tutorial demonstrates how to create a pipeline based on a Jenkinsfile from a GitHub repository. Using the Jenkins pipeline, you deploy an example application to a development environment and a production environment respectively, which is accessible externally. + +{{< notice note >}} + +Two types of pipelines can be created in KubeSphere: Pipelines created based on a Jenkinsfile in SCM, which is introduced in this tutorial, and [pipelines created through the graphical editing panel](../create-a-pipeline-using-graphical-editing-panel/). The Jenkinsfile in SCM requires an internal Jenkinsfile in Source Control Management (SCM). In other words, the Jenkfinsfile serves as part of SCM. The KubeSphere DevOps system automatically builds a CI/CD Jenkins pipeline based on the existing Jenkinsfile of the code repository. You can define workflows such as `stage` and `step`. + +{{}} + +## Prerequisites + +- You need to have a [Docker Hub](https://hub.docker.com/) account and a [GitHub](https://github.com/) account. +- You need to [enable the KubeSphere DevOps system](../../../../pluggable-components/devops/). +- You need to create a workspace, a DevOps project, and a user (`project-regular`). This account needs to be invited to the DevOps project with the `operator` role. See [Create Workspaces, Projects, Users and Roles](../../../../quick-start/create-workspace-and-project/) if they are not ready. +- You need to set a CI dedicated node for running pipelines. Refer to [Set a CI Node for Dependency Caching](../../../../devops-user-guide/how-to-use/devops-settings/set-ci-node/). +- You need to install and configure SonarQube. Refer to [Integrate SonarQube into Pipeline](../../../../devops-user-guide/how-to-integrate/sonarqube/). If you skip this part, there is no **SonarQube Analysis** below. + +## Jenkins Pipeline Overview + +There are eight stages as shown below in this example pipeline. + +![Pipeline Overview](https://pek3b.qingstor.com/kubesphere-docs/png/20190512155453.png#align=left&display=inline&height=1302&originHeight=1302&originWidth=2180&search=&status=done&width=2180) + +{{< notice note >}} + +- **Stage 1. Checkout SCM**: Check out source code from the GitHub repository. +- **Stage 2. Unit test**: It will not proceed with the next stage until the test is passed. +- **Stage 3. SonarQube analysis**: The SonarQube code quality analysis. +- **Stage 4. Build & push snapshot image**: Build the image based on selected branches in **Strategy Settings**. Push the tag of `SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER` to Docker Hub, the `$BUILD_NUMBER` of which is the operation serial number in the pipeline's activity list. +- **Stage 5. Push the latest image**: Tag the sonarqube branch as `latest` and push it to Docker Hub. +- **Stage 6. Deploy to dev**: Deploy the sonarqube branch to the development environment. Review is required for this stage. +- **Stage 7. Push with tag**: Generate the tag and release it to GitHub. The tag is pushed to Docker Hub. +- **Stage 8. Deploy to production**: Deploy the released tag to the production environment. + +{{}} + +## Hands-on Lab + +### Step 1: Create credentials + +1. Log in to the KubeSphere console as `project-regular`. Go to your DevOps project and create the following credentials in **Credentials** under **DevOps Project Settings**. For more information about how to create credentials, see [Credential Management](../../../../devops-user-guide/how-to-use/devops-settings/credential-management/). + + {{< notice note >}} + + If there are any special characters such as `@` and `$` in your account or password, they can cause errors as a pipeline runs because they may not be recognized. In this case, you need to encode your account or password on some third-party websites first, such as [urlencoder](https://www.urlencoder.org/). After that, copy and paste the output for your credential information. + + {{}} + + | Credential ID | Type | Where to use | + | --------------- | ------------------- | ------------ | + | dockerhub-id | Account Credentials | Docker Hub | + | github-id | Account Credentials | GitHub | + | demo-kubeconfig | kubeconfig | Kubernetes | + +2. You need to create an additional credential (`sonar-token`) for SonarQube, which is used in stage 3 (SonarQube analysis) mentioned above. Refer to [Create SonarQube Token for New Project](../../../../devops-user-guide/how-to-integrate/sonarqube/#create-a-sonarqube-token-for-a-new-project) to enter your SonarQube token in the **Token** field for a credential of the **Access token** type. Click **OK** to finish. + +3. You also need to create a GitHub personal access token with the permission as shown in the below image, and then use the generated token to create Account Credentials (for example, `github-token`) for GitHub authentication in your DevOps project. + + ![github-token-scope](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/github-token-scope.png) + + {{< notice note >}} + + To create a GitHub personal access token, go to **Settings** of your GitHub account, click **Developer settings**, select **Personal access tokens**, and click **Generate new token**. + + {{}} + +4. In total, you have five credentials in the list. + +### Step 2: Modify the Jenkinsfile in your GitHub repository + +1. Log in to GitHub. Fork [devops-maven-sample](https://github.com/kubesphere/devops-maven-sample) from the GitHub repository to your own GitHub account. + +2. In your own GitHub repository of **devops-maven-sample**, click the file `Jenkinsfile-online` in the root directory. + +3. Click the edit icon on the right to edit environment variables. + + ![jenkins-edit-2](/images/docs/v3.3/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/jenkins-edit-2.jpg) + + | Items | Value | Description | + | :--- | :--- | :--- | + | DOCKER\_CREDENTIAL\_ID | dockerhub-id | The **Name** you set in KubeSphere for your Docker Hub account. | + | GITHUB\_CREDENTIAL\_ID | github-id | The **Name** you set in KubeSphere for your GitHub account. It is used to push tags to your GitHub repository. | + | KUBECONFIG\_CREDENTIAL\_ID | demo-kubeconfig | The **Name** you set in KubeSphere for your kubeconfig. It is used to access a running Kubernetes cluster. | + | REGISTRY | docker.io | It defaults to `docker.io`, serving as the address of pushing images. | + | DOCKERHUB\_NAMESPACE | your-dockerhub-account | Replace it with your Docker Hub's account name. It can be the Organization name under the account. | + | GITHUB\_ACCOUNT | your-github-account | Replace it with your GitHub account name. For example, your GitHub account name is `kubesphere` if your GitHub address is  `https://github.com/kubesphere/`. It can also be the account's Organization name. | + | APP\_NAME | devops-maven-sample | The application name. | + | SONAR\_CREDENTIAL\_ID | sonar-token | The **Name** you set in KubeSphere for the SonarQube token. It is used for code quality test. | + + {{< notice note >}} + + The command parameter `-o` of Jenkinsfile's `mvn` indicates that the offline mode is enabled. Relevant dependencies have already been downloaded in this tutorial to save time and to adapt to network interference in certain environments. The offline mode is on by default. + + {{}} + +4. After you edit the environmental variables, click **Commit changes** at the bottom of the page, which updates the file in the SonarQube branch. + +### Step 3: Create projects + +You need to create two projects, such as `kubesphere-sample-dev` and `kubesphere-sample-prod`, which represent the development environment and the production environment respectively. Related Deployments and Services of the app will be created automatically in these two projects once the pipeline runs successfully. + +{{< notice note >}} + +The account `project-admin` needs to be created in advance since it is the reviewer of the CI/CD Pipeline. See [Create Workspaces, Projects, Users and Roles](../../../../quick-start/create-workspace-and-project/) for more information. + +{{}} + +1. Use the account `project-admin` to log in to KubeSphere. In the same workspace where you create the DevOps project, create two projects as below. Make sure you invite `project-regular` to these two projects with the role of `operator`. + + | Project Name | Alias | + | ---------------------- | ----------------------- | + | kubesphere-sample-dev | development environment | + | kubesphere-sample-prod | production environment | + +2. After those projects are created, they will be listed in the project list. + +### Step 4: Create a pipeline + +1. Log out of KubeSphere and log back in as `project-regular`. Go to the DevOps project `demo-devops` and click **Create**. + +2. Provide the basic information in the displayed dialog box. Name it `jenkinsfile-in-scm` and specify a code repository under **Code Repository**. + +3. In the **GitHub** tab, select **github-token** from the drop-down list under **Credential**, and then click **OK** to select your repository. + +4. Choose your GitHub account. All the repositories related to this token will be listed on the right. Select **devops-maven-sample** and click **Select**. Click **Next** to continue. + +5. In **Advanced Settings**, select the checkbox next to **Delete outdated branches**. In this tutorial, you can use the default value of **Branch Retention Period (days)** and **Maximum Branches**. + + Delete outdated branches means that you will discard the branch record all together. The branch record includes console output, archived artifacts and other relevant metadata of specific branches. Fewer branches mean that you can save the disk space that Jenkins is using. KubeSphere provides two options to determine when old branches are discarded: + + - Branch Retention Period (days). Branches that exceed the retention period are deleted. + + - Maximum Branches. The earliest branch is deleted when the number of branches exceeds the maximum number. + + {{< notice note >}} + + **Branch Retention Period (days)** and **Maximum Branches** apply to branches at the same time. As long as a branch meets the condition of either field, it is deleted. For example, if you specify 2 as the retention period and 3 as the maximum number of branches, any branch that exceed either number is deleted. KubeSphere prepopulates these two fields with 7 and 5 by default respectively. + + {{}} + +6. In **Strategy Settings**, KubeSphere offers four strategies by default. You can delete **Discover PRs from Forks** as this strategy will not be used in this example. You do not need to change the setting and can use the default value directly. + + As a Jenkins pipeline runs, the Pull Request (PR) submitted by developers will also be regarded as a separate branch. + + **Discover Branches** + + - **Exclude branches filed as PRs**. The source branch is not scanned such as the origin's master branch. These branches need to be merged. + - **Include only branches filed as PRs**. Only scan the PR branch. + - **Include all branches**. Pull all the branches from the repository origin. + + **Discover PRs from Origin** + + - **Pull the code with the PR merged**. A pipeline is created and runs based on the source code after the PR is merged into the target branch. + - **Pull the code at the point of the PR**. A pipeline is created and runs based on the source code of the PR itself. + - **Create two pipelines respectively**. KubeSphere creates two pipelines, one based on the source code after the PR is merged into the target branch, and the other based on the source code of the PR itself. + + {{< notice note >}} + + You have to choose GitHub as your code repository to enable the settings of **Strategy Settings** here. + + {{}} + +7. Scroll down to **Script Path**. The field specifies the Jenkinsfile path in the code repository. It indicates the repository's root directory. If the file location changes, the script path also needs to be changed. Change it to `Jenkinsfile-online`, which is the file name of Jenkinsfile in the example repository located in the root directory. + +8. In **Scan Trigger**, select **Scan periodically** and set the interval to **5 minutes**. Click **Create** to finish. + + {{< notice note >}} + + You can set a specific interval to allow pipelines to scan remote repositories, so that any code updates or new PRs can be detected based on the strategy you set in **Strategy Settings**. + + {{}} + +### Step 5: Run a pipeline + +1. After a pipeline is created, click its name to go to its details page. + + {{< notice note >}} + + - You can click icon on the right side of the pipeline and then select **Copy** to create a copy of it. If you need to concurrently run multiple pipelines that don't contain multiple branches, you can select all of these pipelines and then click **Run** to run them in a batch. + - The pipeline details page shows **Sync Status**. It reflects the synchronization result between KubeSphere and Jenkins, and you can see the **Successful** icon if the synchronization is successful. + + {{}} + +2. Under **Run Records**, three branches are being scanned. Click **Run** on the right and the pipeline runs based on the behavioral strategy you set. Select **sonarqube** from the drop-down list and add a tag number such as `v0.0.2`. Click **OK** to trigger a new activity. + + {{< notice note >}} + + - If you do not see any run records on this page, you need to refresh your browser manually or click **Scan Repository** from the drop-down menu (the **More** button). + - The tag name is used to generate releases and images with the tag in GitHub and Docker Hub. An existing tag name cannot be used again for the field `TAG_NAME`. Otherwise, the pipeline will not be running successfully. + + {{}} + +3. Wait for a while, and you can see some activities stop and some fail. Click the first one to view details. + + {{< notice note >}} + + Activity failures may be caused by different factors. In this example, only the Jenkinsfile of the branch sonarqube is changed as you edit the environment variables in it in the steps above. On the contrary, these variables in the dependency and master branch remain changed (namely, wrong GitHub and Docker Hub account), resulting in the failure. You can click it and inspect its logs to see details. Other reasons for failures may be network issues, incorrect coding in the Jenkinsfile and so on. + + {{}} + +4. The pipeline pauses at the stage `deploy to dev`. You need to click **Proceed** manually. Note that the pipeline will be reviewed three times as `deploy to dev`, `push with tag`, and `deploy to production` are defined in the Jenkinsfile respectively. + + In a development or production environment, it requires someone who has higher authority (for example, release manager) to review the pipeline, images, as well as the code analysis result. They have the authority to determine whether the pipeline can go to the next stage. In the Jenkinsfile, you use the section `input` to specify who reviews the pipeline. If you want to specify a user (for example, `project-admin`) to review it, you can add a field in the Jenkinsfile. If there are multiple users, you need to use commas to separate them as follows: + + ```groovy + ··· + input(id: 'release-image-with-tag', message: 'release image with tag?', submitter: 'project-admin,project-admin1') + ··· + ``` + + {{< notice note >}} + + In KubeSphere 3.3, if you do not specify a reviewer, the user that can run a pipeline will be able to continue or terminate the pipeline. If you specify a reviewer, the user who created the pipeline and the user specified will be able to continue or terminate the pipeline. + + {{}} + +### Step 6: Check pipeline status + +1. In **Task Status**, you can see how a pipeline is running. Please note that the pipeline will keep initializing for several minutes after it is just created. There are eight stages in the sample pipeline and they have been defined separately in [Jenkinsfile-online](https://github.com/kubesphere/devops-maven-sample/blob/sonarqube/Jenkinsfile-online). + +2. Check the pipeline running logs by clicking **View Logs** in the upper-right corner. You can see the dynamic log output of the pipeline, including any errors that may stop the pipeline from running. For each stage, you click it to inspect logs, which can be downloaded to your local machine for further analysis. + +### Step 7: Verify results + +1. Once you successfully executed the pipeline, click **Code Check** to check the results through SonarQube as follows. + +2. The Docker image built through the pipeline has also been successfully pushed to Docker Hub, as it is defined in the Jenkinsfile. In Docker Hub, you will find the image with the tag `v0.0.2` that is specified before the pipeline runs. + +3. At the same time, a new tag and a new release have been generated in GitHub. + +4. The sample application will be deployed to `kubesphere-sample-dev` and `kubesphere-sample-prod` with corresponding Deployments and Services created. Go to these two projects and here are the expected result: + + | Environment | URL | Namespace | Deployment | Service | + | :--- | :--- | :--- | :--- | :--- | + | Development | `http://{$NodeIP}:{$30861}` | kubesphere-sample-dev | ks-sample-dev | ks-sample-dev | + | Production | `http://{$NodeIP}:{$30961}` | kubesphere-sample-prod | ks-sample | ks-sample | + + {{< notice note >}} + + You may need to open the port in your security groups so that you can access the app with the URL. + + {{}} + +### Step 8: Access the example Service + +1. To access the Service, log in to KubeSphere as `admin` to use the **kubectl** from **Toolbox**. Go to the project `kubesphere-sample-dev`, and click `ks-sample-dev` in **Services** under **Application Workloads**. Obtain the endpoint displayed on the details page to access the Service. + +2. Use the **kubectl** from **Toolbox** in the lower-right corner by executing the following command: + + ```bash + curl 10.233.120.230:8080 + ``` + +3. Expected output: + + ```bash + Really appreciate your star, that's the power of our life. + ``` + + {{< notice note >}} + + Use `curl` endpoints or {$Virtual IP}:{$Port} or {$Node IP}:{$NodePort} + + {{}} + +4. Similarly, you can test the Service in the project `kubesphere-sample-prod` and you will see the same result. + + ```bash + $ curl 10.233.120.236:8080 + Really appreciate your star, that's the power of our life. + ``` + diff --git a/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/customize-jenkins-agent.md b/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/customize-jenkins-agent.md new file mode 100644 index 000000000..d5804ef58 --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/customize-jenkins-agent.md @@ -0,0 +1,70 @@ +--- +title: "Customize Jenkins Agent" +keywords: "KubeSphere, Kubernetes, DevOps, Jenkins, Agent" +description: "Learn how to customize a Jenkins agent on KubeSphere." +linkTitle: "Customize Jenkins Agent" +Weight: 112191 +--- + +If you need to use a Jenkins agent that runs on a specific environment, for example, JDK 11, you can customize a Jenkins agent on KubeSphere. + +This document describes how to customize a Jenkins agent on KubeSphere. + +## Prerequisites + +- You have enabled [the KubeSphere DevOps System](../../../../pluggable-components/devops/). + +## Customize a Jenkins agent + +1. Log in to the web console of KubeSphere as `admin`. + +2. Click **Platform** in the upper-left corner, select **Cluster Management**, and click **Configmaps** under **Configuration** on the left navigation pane. + +3. On the **Configmaps** page, enter `jenkins-casc-config` in the search box and press **Enter**. + +4. Click `jenkins-casc-config` to go to its details page, click **More**, and select **Edit YAML**. + +5. In the displayed dialog box, enter the following code under the `data.jenkins_user.yaml:jenkins.clouds.kubernetes.templates` section and click **OK**. + + ```yaml + - name: "maven-jdk11" # The name of the customized Jenkins agent. + label: "maven jdk11" # The label of the customized Jenkins agent. To specify multiple labels, use spaces to seperate them. + inheritFrom: "maven" # The name of the existing pod template from which this customzied Jenkins agent inherits. + containers: + - name: "maven" # The container name specified in the existing pod template from which this customzied Jenkins agent inherits. + image: "kubespheredev/builder-maven:v3.2.0jdk11" # This image is used for testing purposes only. You can use your own images. + ``` + + {{< notice note >}} + + Make sure you follow the indentation in the YAML file. + + {{}} + +6. Wait for at least 70 seconds until your changes are automatically reloaded. + +7. To use the custom Jenkins agent, refer to the following sample Jenkinsfile to specify the label and container name of the custom Jenkins agent accordingly when creating a pipeline. + + ```groovy + pipeline { + agent { + node { + label 'maven && jdk11' + } + } + stages { + stage('Print Maven and JDK version') { + steps { + container('maven') { + sh ''' + mvn -v + java -version + ''' + } + } + } + } + } + ``` + + diff --git a/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/gitlab-multibranch-pipeline.md b/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/gitlab-multibranch-pipeline.md new file mode 100644 index 000000000..3d4563fe9 --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/gitlab-multibranch-pipeline.md @@ -0,0 +1,129 @@ +--- +title: "Create a Multi-branch Pipeline with GitLab" +keywords: 'KubeSphere, Kubernetes, GitLab, Jenkins, Pipelines' +description: 'Learn how to create a multi-branch pipeline with GitLab on KubeSphere.' +linkTitle: "Create a Multi-branch Pipeline with GitLab" +weight: 11215 +--- + +[GitLab](https://about.gitlab.com/) is an open source code repository platform that provides public and private repositories. It is a complete DevOps platform that enables professionals to perform their tasks in a project. + +In KubeSphere 3.1.x and later, you can create a multi-branch pipeline with GitLab in your DevOps project. This tutorial demonstrates how to create a multi-branch pipeline with GitLab. + +## Prerequisites + +- You need to have a [GitLab](https://gitlab.com/users/sign_in) account and a [Docker Hub](https://hub.docker.com/) account. +- You need to [enable the KubeSphere DevOps system](../../../../pluggable-components/devops/). +- You need to create a workspace, a DevOps project and a user (`project-regular`). This user must be invited to the DevOps project with the `operator` role. For more information, refer to [Create Workspaces, Projects, Users and Roles](../../../../quick-start/create-workspace-and-project/). + +## Hands-on Lab + +### Step 1: Create credentials + +1. Log in to the KubeSphere console as `project-regular`. Go to your DevOps project and create the following credentials in **Credentials** under **DevOps Project Settings**. For more information about how to create credentials, see [Credential Management](../../../../devops-user-guide/how-to-use/devops-settings/credential-management/). + + {{< notice note >}} + + If there are any special characters such as `@` and `$` in your account or password, they can cause errors as a pipeline runs because they may not be recognized. In this case, you need to encode your account or password on some third-party websites first, such as [urlencoder](https://www.urlencoder.org/). After that, copy and paste the output for your credential information. + + {{}} + + | Credential ID | Type | Where to use | + | --------------- | ------------------- | ------------ | + | dockerhub-id | Account Credentials | Docker Hub | + | gitlab-id | Account Credentials | GitLab | + | demo-kubeconfig | kubeconfig | Kubernetes | + +2. After creation, you can see the credentials in the list. + +### Step 2: Modify the Jenkinsfile in your GitLab repository + +1. Log in to GitLab and create a public project. Click **Import project/repository**, select **Repo by URL** to enter the URL of [devops-maven-sample](https://github.com/kubesphere/devops-maven-sample), select **Public** for **Visibility Level**, and then click **Create project**. + +2. In the project just created, create a new branch from the master branch and name it `gitlab-demo`. + +3. In the `gitlab-demo` branch, click the file `Jenkinsfile-online` in the root directory. + +4. Click **Edit**, change `GITHUB_CREDENTIAL_ID`, `GITHUB_ACCOUNT`, and `@github.com` to `GITLAB_CREDENTIAL_ID`, `GITLAB_ACCOUNT`, and `@gitlab.com` respectively, and then edit the following items. You also need to change the value of `branch` in the `push latest` and `deploy to dev` stages to `gitlab-demo`. + + | Item | Value | Description | + | -------------------- | --------- | ------------------------------------------------------------ | + | GITLAB_CREDENTIAL_ID | gitlab-id | The **Name** you set in KubeSphere for your GitLab account. It is used to push tags to your GitLab repository. | + | DOCKERHUB_NAMESPACE | felixnoo | Replace it with your Docker Hub’s account name. It can be the Organization name under the account. | + | GITLAB_ACCOUNT | felixnoo | Replace it with your GitLab account name. It can also be the account’s Group name. | + + {{< notice note >}} + + For more information about the environment variables in the Jenkinsfile, refer to [Create a Pipeline Using a Jenkinsfile](../create-a-pipeline-using-jenkinsfile/#step-2-modify-the-jenkinsfile-in-your-github-repository). + + {{}} + +5. Click **Commit changes** to update this file. + +### Step 3: Create projects + +You need to create two projects, such as `kubesphere-sample-dev` and `kubesphere-sample-prod`, which represent the development environment and the production environment respectively. For more information, refer to [Create a Pipeline Using a Jenkinsfile](../create-a-pipeline-using-jenkinsfile/#step-3-create-projects). + +### Step 4: Create a pipeline + +1. Log in to the KubeSphere web console as `project-regular`. Go to your DevOps project and click **Create** to create a new pipeline. + +2. Provide the basic information in the displayed dialog box. Name it `gitlab-multi-branch` and select a code repository. + +3. On the **GitLab** tab, select the default option `https://gitlab.com` for **GitLab Server Address**, enter the username of the GitLab project owner for **Project Group/Owner**, and then select the `devops-maven-sample` repository from the drop-down list for **Code Repository**. Click **√** in the lower-right corner and then click **Next**. + + {{< notice note >}} + + If you want to use a private repository from GitLab, refer to the following steps: + + - Go to **User Settings > Access Tokens** on GitLab to create an access token with API and read_repository permissions. + - [Log in to the Jenkins dashboard](../../../how-to-integrate/sonarqube/#step-5-add-the-sonarqube-server-to-jenkins), go to **Manage Jenkins > Manage Credentials** to use your GitLab token to create a Jenkins credential for accessing GitLab, and go to **Manage Jenkins > Configure System** to add the credential in **GitLab Server**. + - In your DevOps project, select **DevOps Project Settings > Credentials** to use your GitLab token to create a credential. You have to specify the credential for **Credential** on the **GitLab** tab when creating a pipeline so that the pipeline can pull code from your private GitLab repository. + + {{}} + +4. On the **Advanced Settings** tab, scroll down to **Script Path**. Change it to `Jenkinsfile-online` and then click **Create**. + + {{< notice note >}} + + The field specifies the Jenkinsfile path in the code repository. It indicates the repository’s root directory. If the file location changes, the script path also needs to be changed. + + {{}} + +### Step 5: Run a pipeline + +1. After a pipeline is created, it is displayed in the list. Click its name to go to its details page. + +2. Click **Run** on the right. In the displayed dialog box, select **gitlab-demo** from the drop-down list and add a tag number such as `v0.0.2`. Click **OK** to trigger a new run. + + {{< notice note >}} + + The pipeline pauses at the stage `deploy to dev`. You need to click **Proceed** manually. Note that the pipeline will be reviewed three times as `deploy to dev`, `push with tag`, and `deploy to production` are defined in the Jenkinsfile respectively. + + {{}} + +### Step 6: Check the pipeline status + +1. In the **Task Status** tab, you can see how a pipeline is running. Check the pipeline running logs by clicking **View Logs** in the upper-right corner. + +2. You can see the dynamic log output of the pipeline, including any errors that may stop the pipeline from running. For each stage, you can click it to inspect logs, which can also be downloaded to your local machine for further analysis. + +### Step 7: Verify results + +1. The Docker image built through the pipeline has been successfully pushed to Docker Hub, as it is defined in the Jenkinsfile. In Docker Hub, you will find the image with the tag `v0.0.2` that is specified before the pipeline runs. + +2. At the same time, a new tag is generated in GitLab. + +3. The sample application will be deployed to `kubesphere-sample-dev` and `kubesphere-sample-prod` with corresponding Deployments and Services created. + + | Environment | URL | Namespace | Deployment | Service | + | ----------- | --------------------------- | ---------------------- | ------------- | ------------- | + | Development | `http://{$NodeIP}:{$30861}` | kubesphere-sample-dev | ks-sample-dev | ks-sample-dev | + | Production | `http://{$NodeIP}:{$30961}` | kubesphere-sample-prod | ks-sample | ks-sample | + + {{< notice note >}} + + You may need to open the port in your security groups so that you can access the app with the URL. For more information, refer to [Access the example Service](../create-a-pipeline-using-jenkinsfile/#step-8-access-the-example-service). + + {{}} + diff --git a/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/jenkins-email.md b/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/jenkins-email.md new file mode 100644 index 000000000..a94c5ff48 --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/jenkins-email.md @@ -0,0 +1,42 @@ +--- +title: "Set the Email Server for KubeSphere Pipelines" +keywords: 'KubeSphere, Kubernetes, notification, jenkins, devops, ci/cd, pipeline, email server' +description: 'Set the email server to receive notifications of your Jenkins pipelines.' +linkTitle: "Set Email Server for KubeSphere Pipelines" +Weight: 11218 +--- + + +The built-in Jenkins cannot share the same email configuration with the platform notification system. Thus, you need to configure email server settings for KubeSphere DevOps pipelines separately. + +## Prerequisites + +- You need to enable the [KubeSphere DevOps System](../../../../pluggable-components/devops/). +- You need a user granted a role including the **Cluster Management** permission. For example, you can log in to the console as `admin` directly or create a new role with the permission and assign it to a user. + +## Set the Email Server + +1. Click **Platform** in the upper-left corner and select **Cluster Management**. + +2. If you have enabled the [multi-cluster feature](../../../../multicluster-management/) with member clusters imported, you can select a specific cluster to view its nodes. If you have not enabled the feature, refer to the next step directly. + +3. Go to **Workloads** under **Application Workloads**, and select the project **kubesphere-devops-system** from the drop-down list. Click icon on the right of `devops-jenkins` and select **Edit YAML** to edit its YAML. + +4. Scroll down to the fields in the image below which you need to specify. Click **OK** when you finish to save changes. + + {{< notice warning >}} + + Once you modify the Email server in the `devops-jenkins` Deployment, it will restart itself. Consequently, the DevOps system will be unavailable for a few minutes. Please make such modification at an appropriate time. + + {{}} + + ![set-jenkins-email](/images/docs/v3.3/devops-user-guide/using-devops/jenkins-email/set-jenkins-email.png) + + | Environment Variable Name | Description | + | ------------------------- | -------------------------------- | + | EMAIL\_SMTP\_HOST | SMTP server address | + | EMAIL\_SMTP\_PORT | SMTP server port (for example, 25) | + | EMAIL\_FROM\_ADDR | Email sender address | + | EMAIL\_FROM\_NAME | Email sender name | + | EMAIL\_FROM\_PASS | Email sender password | + | EMAIL\_USE\_SSL | SSL configuration enabled or not | diff --git a/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/jenkins-setting.md b/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/jenkins-setting.md new file mode 100644 index 000000000..daa858cad --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/jenkins-setting.md @@ -0,0 +1,50 @@ +--- +title: "Jenkins System Settings" +keywords: 'Kubernetes, KubeSphere, Jenkins, CasC' +description: 'Learn how to customize your Jenkins settings.' +linkTitle: 'Jenkins System Settings' +Weight: 11216 +--- + +Jenkins is powerful and flexible and it has become the de facto standard for CI/CD workflows. Nevertheless, many plugins require users to set system-level configurations before they can be put to use. + +The KubeSphere DevOps System offers containerized CI/CD functions based on Jenkins. To provide users with a schedulable Jenkins environment, KubeSphere uses **Configuration as Code** for Jenkins system settings, which requires users to log in to the Jenkins dashboard and reload the configuration after it is modified. In the current release, Jenkins system settings are not available on the KubeSphere console, which will be supported in upcoming releases. + +This tutorial demonstrates how to set up Jenkins and reload configurations on the Jenkins dashboard. + +## Prerequisites + +You have enabled [the KubeSphere DevOps System](../../../../pluggable-components/devops/). + +## Jenkins Configuration as Code + +KubeSphere has the Jenkins Configuration as Code plugin installed by default to allow you to define the desired state of your Jenkins configuration through YAML files and makes it easy to reproduce your Jenkins configurations including plugin configurations. You can find descriptions about specific Jenkins configurations and example YAML files [in this directory](https://github.com/jenkinsci/configuration-as-code-plugin/tree/master/demos). + +Besides, you can find the `formula.yaml` file in the repository [ks-jenkins](https://github.com/kubesphere/ks-jenkins), where you can view plugin versions and customize these versions based on your needs. + +![plugin-version](/images/docs/v3.3/devops-user-guide/using-devops/jenkins-system-settings/plugin-version.png) + +## Modify the ConfigMap + +It is recommended that you configure Jenkins in KubeSphere through Configuration as Code (CasC). The built-in Jenkins CasC file is stored as a [ConfigMap](../../../../project-user-guide/configuration/configmaps/). + +1. Log in to KubeSphere as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. + +2. If you have enabled the [multi-cluster feature](../../../../multicluster-management/) with member clusters imported, you can select a specific cluster to edit the ConfigMap. If you have not enabled the feature, refer to the next step directly. + +3. On the left navigation pane, select **ConfigMaps** under **Configuration**. On the **ConfigMaps** page, select `kubesphere-devops-system` from the drop-down list and click `jenkins-casc-config`. + +4. On the details page, click **Edit YAML** from the **More** drop-down list. + +5. The configuration template for `jenkins-casc-config` is a YAML file under the `data.jenkins_user.yaml:` section. You can modify the container image, label, resource requests and limits, etc. in the broker (Kubernetes Jenkins agent) in the ConfigMap or add a container in the podTemplate. When you finish, click **OK**. + +6. Wait for at least 70 seconds until your changes are automatically reloaded. + +7. For more information about how to set up Jenkins via CasC, see the [Jenkins documentation](https://github.com/jenkinsci/configuration-as-code-plugin). + + {{< notice note >}} + + In the current version, not all plugins support CasC settings. CasC will only overwrite plugin configurations that are set up through CasC. + + {{}} + diff --git a/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/jenkins-shared-library.md b/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/jenkins-shared-library.md new file mode 100644 index 000000000..def58cf76 --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/jenkins-shared-library.md @@ -0,0 +1,122 @@ +--- +title: "Use Jenkins Shared Libraries in a Pipeline" +keywords: 'KubeSphere, Kubernetes, Jenkins, Shared Library, Pipelines' +description: 'Learn how to use Jenkins shared libraries in a pipeline.' +linkTitle: "Use Jenkins Shared Libraries in a Pipeline" +weight: 11217 +--- + +For Jenkins pipelines that contain the same stages or steps, one way to avoid repetition in the pipeline codes is to use Jenkins shared libraries in the Jenkinsfiles. + +This tutorial demonstrates how to use Jenkins shared libraries in KubeSphere DevOps pipelines. + +## Prerequisites + +- You need to [enable the KubeSphere DevOps system](../../../../pluggable-components/devops/). +- You need to create a workspace, a DevOps project and a user (`project-regular`). This user must be invited to the DevOps project with the `operator` role. For more information, refer to [Create Workspaces, Projects, Users and Roles](../../../../quick-start/create-workspace-and-project/). +- You need to have a Jenkins shared library available. This tutorial uses the Jenkins shared library in [a GitHub repository](https://github.com/devops-ws/jenkins-shared-library) as an example. + +## Configure a Shared Library on the Jenkins Dashboard + +1. [Log in to the Jenkins dashboard](../../../how-to-integrate/sonarqube/#step-5-add-the-sonarqube-server-to-jenkins) and click **Manage Jenkins** in the left navigation pane. + +2. Scroll down and click **Configure System**. + +3. Scroll down to **Global Pipeline Libraries** and click **Add**. + +4. Configure the fields as below. + + - **Name**. Set a name (for example, `demo-shared-library`) for the shared library so that you can import the shared library by referring to this name in a Jenkinsfile. + + - **Default version**. Set a branch name from the repository where you put your shared library as the default branch for importing your shared library. Enter `master` for this tutorial. + + - Under **Retrieval method**, select **Modern SCM**. + + - Under **Source Code Management**, select **Git** and enter the URL of the example repository for **Project Repository**. You have to configure **Credentials** if you use your own repository that requires the credentials for accessing it. + +5. When you finish editing, click **Apply**. + + {{< notice note >}} + + You can also configure [Folder-level Shared Libraries](https://www.jenkins.io/doc/book/pipeline/shared-libraries/#folder-level-shared-libraries). + + {{}} + +## Use the Shared Library in a Pipeline + +### Step 1: Create a pipeline + +1. Log in to the KubeSphere web console as `project-regular`. Go to your DevOps project and click **Create** on the **Pipelines** page. + +2. Set a name (for example, `demo-shared-library`) in the displayed dialog box and click **Next**. + +3. On the **Advanced Settings** tab, click **Create** to create a pipeline with the default settings. + +### Step 2: Edit the pipeline + +1. In the pipeline list, click the pipeline to go to its details page and click **Edit Jenkinsfile**. + +2. In the displayed dialog box, enter the following example Jenkinsfile. When you finish editing, click **OK**. + + ```groovy + library identifier: 'devops-ws-demo@master', retriever: modernSCM([ + $class: 'GitSCMSource', + remote: 'https://github.com/devops-ws/jenkins-shared-library', + traits: [[$class: 'jenkins.plugins.git.traits.BranchDiscoveryTrait']] + ]) + + pipeline { + agent any + + stages { + stage('Demo') { + steps { + script { + mvn.fake() + } + } + } + } + } + ``` + + {{< notice note >}} + + You can specify a `label` for `agent` based on your needs. + + {{}} + +3. Alternatively, you can use a Jenkinsfile starting with `@Library('') _`. If you use this type of Jenkinsfile, you need to configure the shared library on the Jenkins dashboard in advance. In this tutorial, you can use the following example Jenkinsfile. + + ```groovy + @Library('demo-shared-library') _ + + pipeline { + agent any + + stages { + stage('Demo') { + steps { + script { + mvn.fake() + } + } + } + } + } + ``` + + {{< notice note >}} + + You can use `@Library('demo-shared-library@') _` to specify a specific branch. + + {{}} + +### Step 3: Run the pipeline + +1. You can view the stage under the **Task Status** tab. Click **Run** to run it. + +2. After a while, the pipeline ran successfully. + +3. You can click the **Successful** record under **Run Records**, and then click **View Logs** to view the log details. + diff --git a/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/pipeline-settings.md b/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/pipeline-settings.md new file mode 100644 index 000000000..dee25753d --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/pipeline-settings.md @@ -0,0 +1,170 @@ +--- +title: "Pipeline Settings" +keywords: 'KubeSphere, Kubernetes, Docker, Jenkins, Pipelines' +description: 'Understand various pipeline properties in a DevOps project.' +linkTitle: "Pipeline Settings" +weight: 11214 +--- + +When you create a pipeline, you can customize its configurations through various settings. This document illustrates these settings in detail. + +## Prerequisites + +- You need to create a workspace, a DevOps project and a user (`project-regular`). This user must be invited to the DevOps project with the `operator` role. For more information, refer to [Create Workspaces, Projects, Users and Roles](../../../../quick-start/create-workspace-and-project/). +- You need to [enable the KubeSphere DevOps System](../../../../pluggable-components/devops/). + +## Basic Information + +On the **Basic Information** tab, you can customize the following information: + +- **Name**. The name of the pipeline. Pipelines in the same DevOps project must have different names. + +- **DevOps Project**. The DevOps project to which the pipeline belongs. + +- **Description**. The additional information to describe the pipeline. Description is limited to 256 characters. + +- **Code Repository (optional)**. You can select a code repository as the code source for the pipeline. You can select GitHub, GitLab, Bitbucket, Git, and SVN as the code source. + + {{< tabs >}} + + {{< tab "GitHub" >}} + + If you select **GitHub**, you have to specify the credential for accessing GitHub. If you have created a credential with your GitHub token in advance, you can select it from the drop-down list, or you can click **Create Credential** to create a new one. Click **OK** after selecting the credential and you can view your repository on the right. Click the **√** icon after you finish all operations. + + {{}} + + {{< tab "GitLab" >}} + + If you select **GitLab**, you have to specify the GitLab server address, project group/owner, and code repository. You also need to specify a credential if it is needed for accessing the code repository. Click the **√** icon after you finish all operations. + + {{}} + + {{< tab "Bitbucket" >}} + + If you select **Bitbucket**, you have to enter your Bitbucket server address. You can create a credential with your Bitbucket username and password in advance and then select the credential from the drop-down list, or you can click **Create Credential** to create a new one. Click **OK** after entering the information, and you can view your repository on the right. Click the **√** icon after you finish all operations. + + {{}} + + {{< tab "Git" >}} + + If you select **Git**, you have to specify the repository URL. You need to specify a credential if it is needed for accessing the code repository. You can also click **Create Credential** to create a new credential. Click the **√** icon after you finish all operations. + + {{}} + + {{< tab "SVN" >}} + + If you select **SVN**, you have to specify the repository URL and the credential. You can also specify the branch included and excluded based on your needs. Click the **√** icon after you finish all operations. + + {{}} + + {{}} + +## Advanced Settings with Code Repository Specified + +If you specify a code repository, you can customize the following configurations on the **Advanced Settings** tab: + +### Branch Settings + +**Delete outdated branches**. Delete outdated branches automatically. The branch record is deleted all together. The branch record includes console output, archived artifacts and other relevant metadata of specific branches. Fewer branches mean that you can save the disk space that Jenkins is using. KubeSphere provides two options to determine when old branches are discarded: + +- **Branch Retention Period (days)**. Branches that exceeds the retention period are deleted. + +- **Maximum Branches**. When the number of branches exceeds the maximum number, the earliest branch is deleted. + + {{< notice note >}} + + **Branch Retention Period (days)** and **Maximum Branches** apply to branches at the same time. As long as a branch meets the condition of either field, it will be discarded. For example, if you specify 2 as the number of retention days and 3 as the maximum number of branches, any branches that exceed either number will be discarded. KubeSphere prepopulates these two fields with 7 and 5 by default respectively. + + {{}} + +### Strategy Settings + +In **Strategy Settings**, KubeSphere offers four strategies by default. As a Jenkins pipeline runs, the Pull Request (PR) submitted by developers will also be regarded as a separate branch. + +**Discover Branches** + +- **Exclude branches filed as PRs**. The branches filed as PRs are excluded. +- **Include only branches filed as PRs**. Only pull the branches filed as PRs. +- **Include all branches**. Pull all the branches from the repository. + +**Discover Tags** + +- **Enable tag discovery**. The branch with a specific tag is scanned. +- **Disable tag discovery**. The branch with a specific tag is not scanned. + +**Discover PRs from Origin** + +- **Pull the code with the PR merged**. A pipeline is created and runs based on the source code after the PR is merged into the target branch. +- **Pull the code at the point of the PR**. A pipeline is created and runs based on the source code of the PR itself. +- **Create two pipelines respectively**. KubeSphere creates two pipelines, one based on the source code after the PR is merged into the target branch, and the other based on the source code of the PR itself. + +**Discover PRs from Forks** + +- **Pull the code with the PR merged**. A pipeline is created and runs based on the source code after the PR is merged into the target branch. +- **Pull the code at the point of the PR**. A pipeline is created and runs based on the source code of the PR itself. +- **Create two pipelines respectively**. KubeSphere creates two pipelines, one based on the source code after the PR is merged into the target branch, and the other based on the source code of the PR itself. +- **Contributors**. The users who make contributions to the PR. +- **Everyone**. Every user who can access the PR. +- **Users with the admin or write permission**. Only from users with the admin or write permission to the PR. +- **None**. If you select this option, no PR will be discovered despite the option you select in **Pull Strategy**. + +### Filter by Regex + +Select the checkbox to specify a regular expression to filter branches, PRs, and tags. + +### Script Path + +The **Script Path** parameter specifies the Jenkinsfile path in the code repository. It indicates the repository’s root directory. If the file location changes, the script path also needs to be changed. + +### Scan Trigger + +Select **Scan periodically** and set the scan interval from the drop-down list. + +### Build Trigger + +You can select a pipeline from the drop-down list for **Trigger on Pipeline Creation** and **Trigger on Pipeline Deletion** so that when a new pipeline is created or a pipeline is deleted, the tasks in the specified pipeline can be automatically triggered. + +### Clone Settings + +- **Clone Depth**. The number of commits to fetch when you clone. +- **Clone Timeout Period (min)**. The number of minutes before which the cloning process has to complete. +- **Enable shallow clone**. Enable the shallow clone or not. If you enable it, the codes cloned will not contain tags. + +### Webhook + +**Webhook** is an efficient way to allow pipelines to discover changes in the remote code repository and automatically trigger a new running. Webhook should be the primary method to trigger Jenkins automatic scanning for GitHub and Git (for example, GitLab). + +## Advanced Settings with No Code Repository Specified + +If you do not specify a code repository, you can customize the following configurations on the **Advanced Settings** tab: + +### Build Settings + +**Delete outdated build records**. Determine when the build records under the branch are deleted. The build record includes the console output, archived artifacts, and other metadata related to a particular build. Keeping fewer builds saves disk space used by Jenkins. KubeSphere provides two options to determine when old builds are deleted: + +- **Build Record Retention Period (days)**. Build records that exceed the retention period are deleted. + +- **Maximum Build Records**. When the number of build records exceeds the maximum number, the earliest build record is deleted. + + {{< notice note >}} + + These two conditions apply to the build at the same time. If either one is met first, the build will be discarded. + + {{}} + +- **No concurrent builds**. If you select this option, you cannot run multiple builds concurrently. + +### Build Parameters + +The parameterized build process allows you to pass in one or more parameters when you start to run a pipeline. KubeSphere provides five types of parameters by default, including **String**, **Multi-line string**, **Boolean**, **Options**, and **Password**. When you parameterize a project, the build is replaced with a parameterized build, which prompts the user to enter a value for each defined parameter. + +### Build Trigger + +**Build periodically**. It enables builds with a specified schedule. Click **Learn More** to see the detailed CRON syntax. + + + + + + + diff --git a/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/pipeline-webhook.md b/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/pipeline-webhook.md new file mode 100644 index 000000000..b98f6a2e6 --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/pipeline-webhook.md @@ -0,0 +1,67 @@ +--- +title: "Trigger a Pipeline by Using a Webhook" +keywords: 'Kubernetes, DevOps, Jenkins, Pipeline, Webhook' +description: 'Learn how to trigger a Jenkins pipeline by using a webhook.' +linkTitle: "Trigger a Pipeline by Using a Webhook" +weight: 11219 +--- + +If you create a Jenkinsfile-based pipeline from a remote code repository, you can configure a webhook in the remote repository so that the pipeline is automatically triggered when changes are made to the remote repository. + +This tutorial demonstrates how to trigger a pipeline by using a webhook. + +## Prerequisites + +- You need to [enable the KubeSphere DevOps system](../../../../pluggable-components/devops/). +- You need to create a workspace, a DevOps project, and a user (`project-regular`). This account needs to be invited to the DevOps project and assigned the `operator` role. See [Create Workspaces, Projects, Users and Roles](../../../../quick-start/create-workspace-and-project/) if they are not ready. + +- You need to create a Jenkinsfile-based pipeline from a remote code repository. For more information, see [Create a Pipeline Using a Jenkinsfile](../create-a-pipeline-using-jenkinsfile/). + +## Configure a Webhook + +### Get a webhook URL + +1. Log in to the KubeSphere web console as `project-regular`. Go to your DevOps project and click a pipeline (for example, `jenkins-in-scm`) to go to its details page. + +2. Click **More** and select **Edit Settings** in the drop-down list. + +3. In the displayed dialog box, scroll down to **Webhook** to obtain the webhook push URL. + +### Set a webhook in the GitHub repository + +1. Log in to GitHub and go to your own repository `devops-maven-sample`. + +2. Click **Settings**, click **Webhooks**, and click **Add webhook**. + +3. Enter the webhook push URL of the pipeline for **Payload URL** and click **Add webhook**. This tutorial selects **Just the push event** for demonstration purposes. You can make other settings based on your needs. For more information, see [the GitHub document](https://docs.github.com/en/developers/webhooks-and-events/webhooks/creating-webhooks). + +4. The configured webhook is displayed on the **Webhooks** page. + +## Trigger the Pipeline by Using the Webhook + +### Submit a pull request to the repository + +1. On the **Code** page of your own repository, click **master** and then select the **sonarqube** branch. + +2. Go to `/deploy/dev-ol/` and click the file `devops-sample.yaml`. + +3. Click icon to edit the file. For example, change the value of `spec.replicas` to `3`. + +4. Click **Commit changes** at the bottom of the page. + +### Check the webhook deliveries + +1. On the **Webhooks** page of your own repository, click the webhook. + +2. Click **Recent Deliveries** and click a specific delivery record to view its details. + +### Check the pipeline + +1. Log in to the KubeSphere web console as `project-regular`. Go to your DevOps project and click the pipeline. + +2. On the **Run Records** tab, check that a new run is triggered by the pull request submitted to the `sonarqube` branch of the remote repository. + +3. Go to the **Pods** page of the project `kubesphere-sample-dev` and check the status of the 3 Pods. If the status of the 3 Pods is running, the pipeline is running properly. + + + diff --git a/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/use-pipeline-templates.md b/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/use-pipeline-templates.md new file mode 100644 index 000000000..fcdb34cec --- /dev/null +++ b/content/en/docs/v3.4/devops-user-guide/how-to-use/pipelines/use-pipeline-templates.md @@ -0,0 +1,104 @@ +--- +title: "Use Pipeline Templates" +keywords: 'KubeSphere, Kubernetes, Jenkins, Graphical Pipelines, Pipeline Templates' +description: 'Understand how to use pipeline templates on KubeSphere.' +linkTitle: "Use Pipeline Templates" +weight: 11213 +--- + +KubeSphere offers a graphical editing panel where the stages and steps of a Jenkins pipeline can be defined through interactive operations. KubeSphere 3.3 provides built-in pipeline templates, such as Node.js, Maven, and Golang, to help users quickly create pipelines. Additionally, KubeSphere 3.3 also supports customization of pipeline templates to meet diversified needs of enterprises. + +This section describes how to use pipeline templates on KubeSphere. +## Prerequisites + +- You have a workspace, a DevOps project and a user (`project-regular`) invited to the DevOps project with the `operator` role. If they are not ready yet, please refer to [Create Workspaces, Projects, Users and Roles](../../../../quick-start/create-workspace-and-project/). + +- You need to [enable the KubeSphere DevOps system](../../../../pluggable-components/devops/). + +- You need to [create a pipeline](../../../how-to-use/pipelines/create-a-pipeline-using-graphical-editing-panel/). + +## Use a Built-in Pipeline Template + +The following takes Node.js as an example to show how to use a built-in pipeline template. Steps for using Maven and Golang pipeline templates are alike. + + +1. Log in to the KubeSphere console as `project-regular`. In the navigation pane on the left, click **DevOps Projects**. + +2. On the **DevOps Projects** page, click the DevOps project you created. + +3. In the navigation pane on the left, click **Pipelines**. + +4. On the pipeline list on the right, click the created pipeline to go to its details page. + +5. On the right pane, click **Edit Pipeline**. + +6. On the **Create Pipeline** dialog box, click **Node.js**, and then click **Next**. + + +7. On the **Parameter Settings** tab, set the parameters based on the actual situation, and then click **Create**. + + | Parameter | Meaning | + | ----------- | ------------------------- | + | GitURL | URL of the project repository to clone | + | GitRevision | Revision to check out from | + | NodeDockerImage | Docker image version of Node.js | + | InstallScript | Shell script for installing dependencies | + | TestScript | Shell script for testing | + | BuildScript | Shell script for building a project | + | ArtifactsPath | Path where the artifacts reside | + +8. On the left pane, the system has preset several steps, and you can add more steps and parallel stages. + +9. Click a specific step. On the right pane, you can perform the following operations: + - Change the stage name. + - Delete a stage. + - Set the agent type. + - Add conditions. + - Edit or delete a task. + - Add steps or nested steps. + + {{< notice note >}} + + You can also customize the stages and steps in the pipeline templates based on your needs. For more information about how to use the graphical editing panel, refer to [Create a Pipeline Using Graphical Editing Panels](../create-a-pipeline-using-graphical-editing-panel/). + {{}} + +10. On the **Agent** area on the left, select an agent type, and click **OK**. The default value is **kubernetes**. + + The following table explains the agent types. + + + | Agent Type | Description | + | --------------- | ------------------------- | + | any | Uses the default base pod template to create a Jenkins agent to run pipelines. | + | node | Uses a pod template with the specific label to create a Jenkins agent to run pipelines. Available labels include base, java, nodejs, maven, go, and more. | + | kubernetes | Use a yaml file to customize a standard Kubernetes pod template to create a jenkins agent to run pipelines. | + +11. On the pipeline details page, you can view the created pipeline template. Click **Run** to run the pipeline. + +## Legacy Built-in Pipeline Templates + +In earlier versions, KubeSphere also provides the CI and CI & CD pipeline templates. However, as the two templates are hardly customizable, you are advised to use the Node.js, Maven, or Golang pipeline template, or directly customize a template based on your needs. +The following briefly introduces the CI and CI & CD pipeline templates. + +- CI pipeline template + + ![ci-template](/images/docs/v3.3/devops-user-guide/using-devops/use-pipeline-templates/ci-template.png) + + ![ci-stages](/images/docs/v3.3/devops-user-guide/using-devops/use-pipeline-templates/ci-stages.png) + + The CI pipeline template contains two stages. The **clone code** stage checks out code and the **build & push** stage builds an image and pushes it to Docker Hub. You need to create credentials for your code repository and your Docker Hub registry in advance, and then set the URL of your repository and these credentials in corresponding steps. After you finish editing, the pipeline is ready to run. + +- CI & CD pipeline template + + ![cicd-template](/images/docs/v3.3/devops-user-guide/using-devops/use-pipeline-templates/cicd-template.png) + + ![cicd-stages](/images/docs/v3.3/devops-user-guide/using-devops/use-pipeline-templates/cicd-stages.png) + + The CI & CD pipeline template contains six stages. For more information about each stage, refer to [Create a Pipeline Using a Jenkinsfile](../create-a-pipeline-using-jenkinsfile/#pipeline-overview), where you can find similar stages and the descriptions. You need to create credentials for your code repository, your Docker Hub registry, and the kubeconfig of your cluster in advance, and then set the URL of your repository and these credentials in corresponding steps. After you finish editing, the pipeline is ready to run. \ No newline at end of file diff --git a/content/en/docs/v3.4/faq/_index.md b/content/en/docs/v3.4/faq/_index.md new file mode 100644 index 000000000..753d10890 --- /dev/null +++ b/content/en/docs/v3.4/faq/_index.md @@ -0,0 +1,12 @@ +--- +title: "FAQ" +description: "FAQ is designed to answer and summarize the questions users ask most frequently about KubeSphere." +layout: "second" + +linkTitle: "FAQ" +weight: 16000 + +icon: "/images/docs/v3.3/docs.svg" +--- + +This chapter answers and summarizes the questions users ask most frequently about KubeSphere. You can find these questions and answers in their respective sections which are grouped based on KubeSphere functions. diff --git a/content/en/docs/v3.4/faq/access-control/_index.md b/content/en/docs/v3.4/faq/access-control/_index.md new file mode 100644 index 000000000..e36af958d --- /dev/null +++ b/content/en/docs/v3.4/faq/access-control/_index.md @@ -0,0 +1,7 @@ +--- +title: "Access Control and Account Management FAQ" +keywords: 'Kubernetes, KubeSphere, account, access control' +description: 'Faq about access control and account management' +layout: "second" +weight: 16400 +--- diff --git a/content/en/docs/v3.4/faq/access-control/add-kubernetes-namespace-to-kubesphere-workspace.md b/content/en/docs/v3.4/faq/access-control/add-kubernetes-namespace-to-kubesphere-workspace.md new file mode 100644 index 000000000..e887f6b72 --- /dev/null +++ b/content/en/docs/v3.4/faq/access-control/add-kubernetes-namespace-to-kubesphere-workspace.md @@ -0,0 +1,38 @@ +--- +title: "Add Existing Kubernetes Namespaces to a KubeSphere Workspace" +keywords: "namespace, project, KubeSphere, Kubernetes" +description: "Add your existing Kubernetes namespaces to a KubeSphere workspace." +linkTitle: "Add existing Kubernetes namespaces to a KubeSphere Workspace" +Weight: 16430 +--- + +A Kubernetes namespace is a KubeSphere project. If you create a namespace object not from the KubeSphere console, the namespace does not appear directly in a certain workspace. But cluster administrators can still see the namespace on the **Cluster Management** page. At the same time, you can also place the namespace into a workspace. + +This tutorial demonstrates how to add an existing Kubernetes namespace to a KubeSphere workspace. + +## Prerequisites + +- You need a user granted a role including the permission of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the permission and assign it to a user. + +- You have an available workspace so that the namespace can be assigned to it. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Create a Kubernetes Namespace + +Create an example Kubernetes namespace first so that you can add it to a workspace later. Execute the following command: + +```bash +kubectl create ns demo-namespace +``` + +For more information about creating a Kubernetes namespace, see [Namespaces Walkthrough](https://kubernetes.io/docs/tasks/administer-cluster/namespaces-walkthrough/). + +## Add the Namespace to a KubeSphere Workspace + +1. Log in to the KubeSphere console as `admin` and go to the **Cluster Management** page. Click **Projects**, and you can see all your projects running on the current cluster, including the one just created. + +2. The namespace created through kubectl does not belong to any workspace. Click on the right and select **Assign Workspace**. + +3. In the dialog that appears, select a **Workspace** and a **Project Administrator** for the project and click **OK**. + +4. Go to your workspace and you can see the project on the **Projects** page. + diff --git a/content/en/docs/v3.4/faq/access-control/cannot-login.md b/content/en/docs/v3.4/faq/access-control/cannot-login.md new file mode 100644 index 000000000..ba0d8bd39 --- /dev/null +++ b/content/en/docs/v3.4/faq/access-control/cannot-login.md @@ -0,0 +1,141 @@ +--- +title: "User Login Failure" +keywords: "login failure, user is not active, KubeSphere, Kubernetes" +description: "How to solve the issue of login failure" +linkTitle: "User Login Failure" +Weight: 16440 +--- + +KubeSphere automatically creates a default user (`admin/P@88w0rd`) when it is installed. A user cannot be used for login if the status is not **Active** or you use an incorrect password. + +Here are some of the frequently asked questions about user login failure. + +## User Not Active + +You may see an image below when the login fails. To find out the reason and solve the issue, perform the following steps: + +![account-not-active](/images/docs/v3.3/faq/access-control-and-account-management/cannot-login/account-not-active.png) + +1. Execute the following command to check the status of the user. + + ```bash + $ kubectl get users + NAME EMAIL STATUS + admin admin@kubesphere.io Active + ``` + +2. Verify that `ks-controller-manager` is running and check if exceptions are contained in logs: + + ```bash + kubectl -n kubesphere-system logs -l app=ks-controller-manager + ``` + +Here are some possible reasons for this issue. + +### Admission webhooks malfunction in Kubernetes 1.19 + +Kubernetes 1.19 uses Golang 1.15 in coding, requiring the certificate for admission webhooks to be updated. This causes the failure of `ks-controller` admission webhook. + +Related error logs: + +```bash +Internal error occurred: failed calling webhook "validating-user.kubesphere.io": Post "https://ks-controller-manager.kubesphere-system.svc:443/validate-email-iam-kubesphere-io-v1alpha2-user?timeout=30s": x509: certificate relies on legacy Common Name field, use SANs or temporarily enable Common Name matching with GODEBUG=x509ignoreCN=0 +``` + +For more information about the issue and solution, see this [GitHub issue](https://github.com/kubesphere/kubesphere/issues/2928). + +### ks-controller-manager malfunctions + +`ks-controller-manager` relies on two stateful Services: OpenLDAP and Jenkins. When OpenLDAP or Jenkins goes down, `ks-controller-manager` will be in the status of `reconcile`. + +Execute the following commands to verify that OpenLDAP and Jenkins are running normally. + +``` +kubectl -n kubesphere-devops-system get po | grep -v Running +kubectl -n kubesphere-system get po | grep -v Running +kubectl -n kubesphere-system logs -l app=openldap +``` + +Related error logs: + +```bash +failed to connect to ldap service, please check ldap status, error: factory is not able to fill the pool: LDAP Result Code 200 \"Network Error\": dial tcp: lookup openldap.kubesphere-system.svc on 169.254.25.10:53: no such host +``` + +```bash +Internal error occurred: failed calling webhook “validating-user.kubesphere.io”: Post https://ks-controller-manager.kubesphere-system.svc:443/validate-email-iam-kubesphere-io-v1alpha2-user?timeout=4s: context deadline exceeded +``` + +#### Solution + +You need to restore OpenLDAP and Jenkins with good network connection, and then restart `ks-controller-manager`. + +``` +kubectl -n kubesphere-system rollout restart deploy ks-controller-manager +``` + +### Wrong code branch used + +If you used the incorrect version of ks-installer, the versions of different components would not match after the installation. Execute the following commands to check version consistency. Note that the correct image tag is `v3.3.2`. + +``` +kubectl -n kubesphere-system get deploy ks-installer -o jsonpath='{.spec.template.spec.containers[0].image}' +kubectl -n kubesphere-system get deploy ks-apiserver -o jsonpath='{.spec.template.spec.containers[0].image}' +kubectl -n kubesphere-system get deploy ks-controller-manager -o jsonpath='{.spec.template.spec.containers[0].image}' +``` + +## Wrong Username or Password + +![incorrect-password](/images/docs/v3.3/faq/access-control-and-account-management/cannot-login/wrong-password.png) + +Run the following command to verify that the username and the password are correct. + +``` +curl -u : "http://`kubectl -n kubesphere-system get svc ks-apiserver -o jsonpath='{.spec.clusterIP}'`/api/v1/nodes" +``` + +### Redis failure + +`ks-console` and `ks-apiserver` use Redis to share data across multiple copies. Use the following commands to verify that Redis is running normally. + +``` +kubectl -n kubesphere-system logs -l app=ks-console +kubectl -n kubesphere-system get po | grep -v Running +# High Availability +kubectl -n kubesphere-system exec -it redis-ha-server-0 redis-cli info replication +kubectl -n kubesphere-system exec -it redis-ha-server-0 -- sh -c 'for i in `seq 0 2`; do nc -vz redis-ha-server-$i.redis-ha.kubesphere-system.svc 6379; done' +kubectl -n kubesphere-system logs -l app=redis-ha-haproxy +kubectl -n kubesphere-system logs -l app=redis-ha +# Single Replica +kubectl -n kubesphere-system logs -l app=redis +``` + +Related error logs: + +```bash +1344:C 17 Sep 2020 17:13:18.099 # Failed opening the RDB file dump.rdb (in server root dir /data) for saving: Stale file handle +1:M 17 Sep 2020 17:13:18.198 # Background saving error +1:M 17 Sep 2020 17:13:24.014 * 1 changes in 3600 seconds. Saving... +1:M 17 Sep 2020 17:13:24.015 * Background saving started by pid 1345 +1345:C 17 Sep 2020 17:13:24.016 # Failed opening the RDB file dump.rdb (in server root dir /data) for saving: Stale file handle +1:M 17 Sep 2020 17:13:24.115 # Background saving error +``` + +```bash +E0909 07:05:22.770468 1 redis.go:51] unable to reach redis host EOF +``` + +```bash +[WARNING] 252/094143 (6) : Server check_if_redis_is_master_0/R0 is DOWN, reason: Layer7 timeout, info: " at step 5 of tcp-check (expect string '10.223.2.232')", check duration: 1000ms. 2 active and 0 backup servers left. 0 sessions active, 0 requeued, 0 remaining in queue. +[WARNING] 252/094143 (6) : Server check_if_redis_is_master_0/R1 is DOWN, reason: Layer7 timeout, info: " at step 5 of tcp-check (expect string '10.223.2.232')", check duration: 1000ms. 1 active and 0 backup servers left. 0 sessions active, 0 requeued, 0 remaining in queue. +[WARNING] 252/094143 (6) : Server check_if_redis_is_master_0/R2 is DOWN, reason: Layer7 timeout, info: " at step 5 of tcp-check (expect string '10.223.2.232')", check duration: 1000ms. 0 active and 0 backup servers left. 0 sessions active, 0 requeued, 0 remaining in queue. +[ALERT] 252/094143 (6) : backend 'check_if_redis_is_master_0' has no server available! +``` + +#### Solution + +You need to restore Redis and make sure it is running normally with good network connection between Pods. After that, restart `ks-console` to synchronize the data across copies. + +``` +kubectl -n kubesphere-system rollout restart deploy ks-console +``` diff --git a/content/en/docs/v3.4/faq/access-control/forgot-password.md b/content/en/docs/v3.4/faq/access-control/forgot-password.md new file mode 100644 index 000000000..fa2bec3b4 --- /dev/null +++ b/content/en/docs/v3.4/faq/access-control/forgot-password.md @@ -0,0 +1,33 @@ +--- +title: "Reset the Account Password" +keywords: "Forgot, Password, KubeSphere, Kubernetes" +description: "Reset the password of any account." +linkTitle: "Reset the Account Password" +Weight: 16410 +--- + +## Reset the Password of a Regular User + +1. Log in to the KubeSphere web console using the administrator who has the permission to manage users. + +2. Click **Platform** on the upper-left corner and select **Access Control**. Click **Users**. + +3. On the **Users** page, click the user of which you need to change the password to visit its details page. + +4. On the details page, click **More**, and then select **Change Password** from the drop-down list. + +5. On the displayed dialog box, enter a new password and confirm the password. Click **OK** after finished. + +## Reset the Administrator Password + +Execute the following command on the host cluster to change the password of any account. + +```bash +kubectl patch users -p '{"spec":{"password":""}}' --type='merge' && kubectl annotate users iam.kubesphere.io/password-encrypted- +``` + +{{< notice note >}} + +Make sure you replace `` and `` with the username and the new password in the command before you run it. + +{{}} \ No newline at end of file diff --git a/content/en/docs/v3.4/faq/access-control/session-timeout.md b/content/en/docs/v3.4/faq/access-control/session-timeout.md new file mode 100644 index 000000000..d3965d67b --- /dev/null +++ b/content/en/docs/v3.4/faq/access-control/session-timeout.md @@ -0,0 +1,21 @@ +--- +title: "Session Timeout" +keywords: "Session timeout, KubeSphere, Kubernetes" +description: "Understand session timeout and customize the timeout period." +linkTitle: "Session Timeout" +Weight: 16420 +--- + +A session starts as a user logs in to the console of KubeSphere. You may see a message of "**Session timeout or this account is logged in elsewhere, please login again**" when the session expires. + +## Session Timeout + +You can control when a session expires. The default session timeout is two hours of inactivity. It means once the session timeout is reached, the user will be automatically logged out of the console. You can [configure accessTokenMaxAge and accessTokenInactivityTimeout](../../../access-control-and-account-management/external-authentication/set-up-external-authentication/) for the session timeout. + +## Signature Verification Failed + +In a [multi-cluster environment](../../../multicluster-management/enable-multicluster/direct-connection/#prepare-a-member-cluster), `clusterRole` and `jwtSecret` must be set correctly. + +## Node Clock Skew + +The node clock skew affects time-sensitive operations such as validating the expiration time of a user token. You can configure the server time synchronization with an NTP server. [MaximumClockSkew](../../../access-control-and-account-management/external-authentication/set-up-external-authentication/) can also be set, which defaults to 10 seconds. \ No newline at end of file diff --git a/content/en/docs/v3.4/faq/applications/_index.md b/content/en/docs/v3.4/faq/applications/_index.md new file mode 100644 index 000000000..ee170628d --- /dev/null +++ b/content/en/docs/v3.4/faq/applications/_index.md @@ -0,0 +1,7 @@ +--- +title: "Applications" +keywords: 'Kubernetes, KubeSphere, OpenPitrix, Application, App' +description: 'Faq about applications in KubeSphere' +layout: "second" +weight: 16900 +--- diff --git a/content/en/docs/v3.4/faq/applications/remove-built-in-apps.md b/content/en/docs/v3.4/faq/applications/remove-built-in-apps.md new file mode 100644 index 000000000..7b2afcda6 --- /dev/null +++ b/content/en/docs/v3.4/faq/applications/remove-built-in-apps.md @@ -0,0 +1,32 @@ +--- +title: "Remove Built-in Apps in KubeSphere" +keywords: "KubeSphere, OpenPitrix, Application, App" +description: "Learn how to remove built-in apps from the KubeSphere App Store." +linkTitle: "Remove Built-in Apps in KubeSphere" +Weight: 16910 +--- + +As an open source and app-centric container platform, KubeSphere integrates apps in the App Store that is based on [OpenPitrix](https://github.com/openpitrix/openpitrix). They are accessible to all tenants in a workspace, while you can also remove them from the App Store. This tutorial demonstrates how to remove a built-in app from the App Store. + +## Prerequisites + +- You need to use a user with the role of `platform-admin` (for example, `admin`) for this tutorial. +- You need to [enable the App Store](../../../pluggable-components/app-store/). + +## Remove a Built-in App + +1. Log in to the web console of KubeSphere as `admin`, click **Platform** in the upper-left corner, and then select **App Store Management**. + +2. On the **Apps** page, you can see all apps in the list. Select an app that you want to remove from the App Store. For example, click **Tomcat** to go to its detail page. + +3. On the detail page of Tomcat, click **Suspend App** to remove the app. + +4. In the displayed dialog box, click **OK** to confirm your operation. + +5. To make the app available again in the App Store, click **Activate App** and then click **OK** to confirm your operation. + + {{< notice note >}} + + You can also create a new user with necessary roles based on your needs. For more information about managing apps in KubeSphere, refer to [Application Lifecycle Management](../../../application-store/app-lifecycle-management/). + + {{}} \ No newline at end of file diff --git a/content/en/docs/v3.4/faq/console/_index.md b/content/en/docs/v3.4/faq/console/_index.md new file mode 100644 index 000000000..2ca0e6030 --- /dev/null +++ b/content/en/docs/v3.4/faq/console/_index.md @@ -0,0 +1,7 @@ +--- +title: "KubeSphere Web Console" +keywords: 'Kubernetes, KubeSphere, web console' +description: 'Faq about the KubeSphere web console' +layout: "second" +weight: 16500 +--- diff --git a/content/en/docs/v3.4/faq/console/change-console-language.md b/content/en/docs/v3.4/faq/console/change-console-language.md new file mode 100644 index 000000000..808641417 --- /dev/null +++ b/content/en/docs/v3.4/faq/console/change-console-language.md @@ -0,0 +1,25 @@ +--- +title: "Change the Console Language" +keywords: "FAQ, console, KubeSphere, Kubernetes, language" +description: "Select a desire language of the console." +linkTitle: "Change the Console Language" +Weight: 16530 +--- + +The KubeSphere web console is currently available in four languages: Simplified Chinese, Traditional Chinese, English, and Spanish. + +This tutorial demonstrates how to change the language of the console. + +## **Prerequisites** + +You have installed KubeSphere. + +## Change the Console Language + +1. Log in to KubeSphere and click the username in the upper-right corner. + +2. In the drop-down list, select **User Settings**. + +3. On the **Basic Information** page, select a desired language from the **Language** drop-down list. + +4. Click icon to save it. \ No newline at end of file diff --git a/content/en/docs/v3.4/faq/console/console-web-browser.md b/content/en/docs/v3.4/faq/console/console-web-browser.md new file mode 100644 index 000000000..3e632c6ae --- /dev/null +++ b/content/en/docs/v3.4/faq/console/console-web-browser.md @@ -0,0 +1,11 @@ +--- +title: "Supported Browsers" +keywords: "FAQ, console, KubeSphere, Kubernetes" +description: "Use a supported web browser to access the console." +linkTitle: "Supported Browsers" +Weight: 16510 +--- + +The KubeSphere web console supports major web browsers including **Chrome, Firefox, Safari, Opera, and Edge.** You only need to consider the supported versions of these browsers listed in the green box of the table below: + +![console-browser](/images/docs/v3.3/faq/kubesphere-web-console/supported-browsers/console-browser.png) diff --git a/content/en/docs/v3.4/faq/console/edit-resources-in-system-workspace.md b/content/en/docs/v3.4/faq/console/edit-resources-in-system-workspace.md new file mode 100644 index 000000000..d39e72e7d --- /dev/null +++ b/content/en/docs/v3.4/faq/console/edit-resources-in-system-workspace.md @@ -0,0 +1,50 @@ +--- +title: "Edit System Resources on the Console" +keywords: "system, resources, KubeSphere, Kubernetes" +description: "Enable the editing function of system resources on the console." +linkTitle: 'Edit System Resources on the Console' +Weight: 16520 +--- + +When you install KubeSphere, the workspace `system-workspace` is created where all KubeSphere system projects and Kubernetes system projects run. To avoid any misoperation on both systems, you are not allowed to edit resources in the workspace directly on the console. However, you can still make adjustments to resources using `kubectl`. + +This tutorial demonstrates how to enable the editing function of `system-workspace` resources. + +{{< notice warning >}} + +Editing resources in `system-workspace` may cause unexpected results, such as KubeSphere system and node failures, and your business may be affected. Please be extremely careful about the operation. + +{{}} + +## Edit the Console Configuration + +1. Log in to KubeSphere as `admin`. Click icon in the lower-right corner and select **Kubectl**. + +2. Execute the following command: + + ```bash + kubectl -n kubesphere-system edit cm ks-console-config + ``` + +3. Add the `systemWorkspace` field under `client` and save the file. + + ```yaml + client: + version: + kubesphere: v3.3.2 + kubernetes: v1.22.12 + openpitrix: v3.3.2 + enableKubeConfig: true + systemWorkspace: "$" # Add this line manually. + ``` + +4. Redeploy `ks-console` by executing the following command and wait for Pods to be recreated. + + ```bash + kubectl -n kubesphere-system rollout restart deployment ks-console + ``` + +5. Refresh the KubeSphere console and you can see that editing buttons in projects in `system-workspace` appear. + +6. If you want to disable the editing function on the console, delete the field `systemWorkspace` by following the same steps above. + diff --git a/content/en/docs/v3.4/faq/devops/_index.md b/content/en/docs/v3.4/faq/devops/_index.md new file mode 100644 index 000000000..79fb04523 --- /dev/null +++ b/content/en/docs/v3.4/faq/devops/_index.md @@ -0,0 +1,7 @@ +--- +title: "DevOps" +keywords: 'Kubernetes, KubeSphere, DevOps, Jenkins' +description: 'FAQ about DevOps in KubeSphere' +layout: "second" +weight: 16800 +--- diff --git a/content/en/docs/v3.4/faq/devops/create-devops-kubeconfig-on-aws.md b/content/en/docs/v3.4/faq/devops/create-devops-kubeconfig-on-aws.md new file mode 100644 index 000000000..266f02a08 --- /dev/null +++ b/content/en/docs/v3.4/faq/devops/create-devops-kubeconfig-on-aws.md @@ -0,0 +1,106 @@ +--- +title: "Create a DevOps Kubeconfig on AWS" +keywords: "KubeSphere, Kubernetes, DevOps, Kubeconfig, AWS" +description: "How to create a DevOps kubeconfig on AWS" +linkTitle: "Create a DevOps Kubeconfig on AWS" +Weight: 16820 +--- + +If you have trouble deploying applications into your project when running a pipeline on your AWS cluster with KubeSphere installed, it may be caused by the issue of DevOps kubeconfig. This tutorial demonstrates how to create a DevOps kubeconfig on AWS. + +## Prerequisites + +- You have an AWS cluster with KubeSphere installed. For more information about how to install KubeSphere on AWS, refer to [Deploy KubeSphere on AWS EKS](../../../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks/). +- You have enabled [the KubeSphere DevOps system](../../../pluggable-components/devops/). +- You have a project available for deploying applications. This tutorial uses the project `kubesphere-sample-dev` as an example. + +## Create a DevOps Kubeconfig + +### Step 1: Create a Service Account + +1. Create a `devops-deploy.yaml` file on your AWS cluster and enter the following contents. + + ```yaml + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: devops-deploy + namespace: kubesphere-sample-dev + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: devops-deploy-role + namespace: kubesphere-sample-dev + rules: + - apiGroups: + - "*" + resources: + - "*" + verbs: + - "*" + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: devops-deploy-rolebinding + namespace: kubesphere-sample-dev + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: devops-deploy-role + subjects: + - kind: ServiceAccount + name: devops-deploy + namespace: kubesphere-sample-dev + ``` + +2. Run the following command to apply the YAML file. + + ```bash + kubectl apply -f devops-deploy.yaml + ``` + +### Step 2: Get the Service Account Token + +1. Run the following command to get the Service Account token. + + ```bash + export TOKEN_NAME=$(kubectl -n kubesphere-sample-dev get sa devops-deploy -o jsonpath='{.secrets[0].name}') + kubectl -n kubesphere-sample-dev get secret "${TOKEN_NAME}" -o jsonpath='{.data.token}' | base64 -d + ``` + +2. The output is similar to the following: + + ![get-token](/images/docs/v3.3/faq/devops/create-devops-kubeconfig-on-aws/get-token.jpg) + +### Step 3: Create a DevOps kubeconfig + +1. Log in to your KubeSphere console of the AWS cluster and go to your DevOps project. Go to **Credentials** under **DevOps Project Settings**, and then click **Create**. You can name this kubeconfig based on your needs. + +2. In the **Content** text box, pay attention to the following contents: + + ``` + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FUR... + client-key-data: LS0tLS1CRUdJTiBQUk... + ``` + + You have to replace them with the token retrieved in step 2, then click **OK** to create the kubeconfig. + + ```bash + user: + token:eyJhbGciOiJSUzI1NiIsImtpZCI6Ikl3UkhCay13dHpPY2Z6LW9VTlZKQVR6dVdmb2FHallJQ2E4VzJULUNjUzAifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlc3BoZXJlLXNhbXBsZS1kZXYiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlY3JldC5uYW1lIjoiZGV2b3BzLWRlcGxveS10b2tlbi1kcGY2ZiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJkZXZvcHMtZGVwbG95Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiMjM0ZTI4OTUtMjM3YS00M2Y5LTkwMTgtZDg4YjY2YTQyNzVmIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmVzcGhlcmUtc2FtcGxlLWRldjpkZXZvcHMtZGVwbG95In0.Ls6mkpgAU75zVw87FkcWx-MLEXGcJjlnb4rUVtT61Jmc_G6jkn4X45MK1V_HuLje3JZMFjL80QUl5ljHLiCUPQ7oE5AUZaUCdqZVdDYEhqeFuGQb_7Qlh8-UFVGGg8vrb0HeGiOlS0qq5hzwKc9C1OmsXHS92yhNwz9gIOujZRafnGKIsG6TL2hEVY2xI0vvmseDKmKg5o0TbeaTMVePHvECju9Qz3Z7TUYsr7HAOvCPtGutlPWLqGx5uOHenOdeLn71x5RoS98xguZoxYVollciPKCQwBlZ4zWK2hzsLSNNLb9cZpxtgUVyHE0AB0e86IHRngnnNrzpp1_pDxL5jw/ + ``` + + {{< notice note >}} + + Make sure you use your own token. + + {{}} + + + + + diff --git a/content/en/docs/v3.4/faq/devops/install-jenkins-plugins.md b/content/en/docs/v3.4/faq/devops/install-jenkins-plugins.md new file mode 100644 index 000000000..dfa608ee4 --- /dev/null +++ b/content/en/docs/v3.4/faq/devops/install-jenkins-plugins.md @@ -0,0 +1,67 @@ +--- +title: "Install Plugins to Jenkins in KubeSphere" +keywords: "KubeSphere, Kubernetes, DevOps, Jenkins, Plugin" +description: "How to install plugins to Jenkins in KubeSphere" +linkTitle: "Install Plugins to Jenkins in KubeSphere" +Weight: 16810 +--- + +The KubeSphere DevOps System offers containerized CI/CD functions based on Jenkins. The primary means of enhancing the functionality of Jenkins is to install plugins. This tutorial demonstrates how to install plugins on the Jenkins dashboard. + +{{< notice warning >}} + +Not all Jenkins plugins have good maintaining support. Some plugins may lead to issues in Jenkins or even cause serious problems in KubeSphere. It is highly recommended that you make a backup before installing any plugin and run testing in another environment if you can. + +{{}} + +## Prerequisites + +You need to enable [the KubeSphere DevOps system](../../../pluggable-components/devops/). + +## Install Plugins + +### Step 1: Get the address of Jenkins + +1. Run the following command to get the address of Jenkins. + + ```bash + export NODE_PORT=$(kubectl get --namespace kubesphere-devops-system -o jsonpath="{.spec.ports[0].nodePort}" services devops-jenkins) + export NODE_IP=$(kubectl get nodes --namespace kubesphere-devops-system -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT + ``` + +2. You can get the output similar to the following. You can access the Jenkins dashboard through the address with your own KubeSphere account and password (for example, `admin/P@88w0rd`). + + ``` + http://192.168.0.4:30180 + ``` + + {{< notice note >}} + + Make sure you use your own address of Jenkins. You may also need to open the port in your security groups and configure related port forwarding rules depending on where your KubeSphere cluster is deployed. + + {{}} + +### Step 2: Install plugins on the Jenkins dashboard + +1. Log in to the Jenkins dashboard and click **Manage Jenkins**. + +2. On the **Manage Jenkins** page, scroll down to **Manage Plugins** and click it. + +3. Select the **Available** tab and you have to use the search field to search for the plugins you need. For example, you can enter `git` in the search field, check the checkbox next to the plugin you need, and then click **Install without restart** or **Download now and install after restart** based on your needs. + + {{< notice note >}} + + Jenkins plugins are inter-dependent. You may also need to install dependencies when you install a plugin. + + {{}} + +4. If you downloaded an HPI file in advance, you can also select the **Advanced** tab and upload the HPI file to install it as a plugin. + +5. On the **Installed** tab, you can view all the plugins installed, and the plugins that are safe to uninstall will have the **Uninstall** button shown on the right. + +6. On the **Updates** tab, you can install the updates for plugins by checking the checkbox of a plugin and then click the **Download now and install after restart** button. You can also click the **Check now** button to check for updates. + +## See Also + +[Managing Plugins](https://www.jenkins.io/doc/book/managing/plugins/) \ No newline at end of file diff --git a/content/en/docs/v3.4/faq/installation/_index.md b/content/en/docs/v3.4/faq/installation/_index.md new file mode 100644 index 000000000..3030de69b --- /dev/null +++ b/content/en/docs/v3.4/faq/installation/_index.md @@ -0,0 +1,7 @@ +--- +title: "Installation" +keywords: 'Kubernetes, KubeSphere, installation, FAQ' +description: 'Faq about installation' +layout: "second" +weight: 16100 +--- diff --git a/content/en/docs/v3.4/faq/installation/configure-booster.md b/content/en/docs/v3.4/faq/installation/configure-booster.md new file mode 100644 index 000000000..5c002d0b2 --- /dev/null +++ b/content/en/docs/v3.4/faq/installation/configure-booster.md @@ -0,0 +1,84 @@ +--- +title: "Configure a Booster for Installation" +keywords: 'KubeSphere, booster, installation, faq' +description: 'Set a registry mirror to speed up image downloads during installation.' +linkTitle: "Configure a Booster for Installation" +weight: 16200 +--- + +If you have trouble downloading images from `dockerhub.io`, it is highly recommended that you configure a registry mirror (i.e. booster) beforehand to speed up downloads. You can refer to the [official documentation of Docker](https://docs.docker.com/registry/recipes/mirror/#configure-the-docker-daemon) or follow the steps below. + +## Get a Booster URL + +To configure the booster, you need a registry mirror address. See how you can [get a booster URL from Alibaba Cloud](https://www.alibabacloud.com/help/doc-detail/60750.htm?spm=a2c63.p38356.b99.18.4f4133f0uTKb8S). + +## Set the Registry Mirror + +You can configure the Docker daemon directly or use KubeKey to set the configuration. + +### Configure the Docker daemon + +{{< notice note >}} + +Docker needs to be installed in advance for this method. + +{{}} + +1. Run the following commands: + + ```bash + sudo mkdir -p /etc/docker + ``` + + ```bash + sudo vi /etc/docker/daemon.json + ``` + +2. Add the `registry-mirrors` key and value to the file. + + ```json + { + "registry-mirrors": ["https://"] + } + ``` + + {{< notice note >}} + + Make sure you replace the address within the quotation mark above with your own Booster URL. + + {{}} + +3. Save the file and reload Docker by executing the following commands so that the change can take effect. + + ```bash + sudo systemctl daemon-reload + ``` + + ```bash + sudo systemctl restart docker + ``` + +### Use KubeKey to set the registry mirror + +1. After you create a `config-sample.yaml` file with KubeKey before installation, navigate to `registry` in the file. + + ```yaml + registry: + registryMirrors: [] + insecureRegistries: [] + privateRegistry: "" + ``` + + {{< notice note >}} + + For more information about each parameter under the `registry` section, see [Kubernetes Cluster Configurations](../../../installing-on-linux/introduction/vars/). + + {{}} + +2. Provide the registry mirror address as the value of `registryMirrors` and save the file. For more information about installation, see [Multi-node Installation](../../../installing-on-linux/introduction/multioverview/). + +{{< notice note >}} + +If you adopt [All-in-One Installation](../../../quick-start/all-in-one-on-linux/), refer to the first method because a `config-sample.yaml` file is not needed for this mode. + +{{}} \ No newline at end of file diff --git a/content/en/docs/v3.4/faq/installation/install-addon-through-yaml-using-kubekey.md b/content/en/docs/v3.4/faq/installation/install-addon-through-yaml-using-kubekey.md new file mode 100644 index 000000000..303fbab14 --- /dev/null +++ b/content/en/docs/v3.4/faq/installation/install-addon-through-yaml-using-kubekey.md @@ -0,0 +1,19 @@ +--- +title: "Install an Add-on through YAML Using KubeKey" +keywords: "Installer, KubeKey, KubeSphere, Kubernetes, add-ons" +description: "Understand why the installation may fail when you use KubeKey to install an add-on through YAML." +linkTitle: "Install an Add-on through YAML Using KubeKey" +Weight: 16400 +--- + +When you use KubeKey to install add-ons, you put the add-on information (Chart or YAML) under the `addons` field in the configuration file (`config-sample.yaml` by default). If the add-on configuration is provided as a YAML file, in some cases, you may see an error message similar to this during the installation: + +```bash +Error from server: failed to create typed patch object: xxx: element 0: associative list with keys has an element that omits key field "protocol" +``` + +This is a [known issue of Kubernetes itself](https://github.com/kubernetes-sigs/structured-merge-diff/issues/130), caused by the flag `--server-side`. To solve this issue, do not add your add-on in the configuration file. Instead, you can apply your YAML file after KubeSphere is deployed. For example: + +```bash +kubectl apply -f xxx.yaml # Use your own YAML file. +``` \ No newline at end of file diff --git a/content/en/docs/v3.4/faq/installation/ssh-connection-failure.md b/content/en/docs/v3.4/faq/installation/ssh-connection-failure.md new file mode 100644 index 000000000..8cb8df40d --- /dev/null +++ b/content/en/docs/v3.4/faq/installation/ssh-connection-failure.md @@ -0,0 +1,40 @@ +--- +title: "SSH Connection Failure" +keywords: "Installation, SSH, KubeSphere, Kubernetes" +description: "Understand why the SSH connection may fail when you use KubeKey to create a cluster." +linkTitle: "SSH Connection Failure" +Weight: 16600 +--- + +When you use KubeKey to set up a cluster, you create a configuration file which contains necessary host information. Here is an example of the field `hosts`: + +```bash +spec: + hosts: + - {name: control plane, address: 192.168.0.2, internalAddress: 192.168.0.2, user: ubuntu, password: Testing123} + - {name: node1, address: 192.168.0.3, internalAddress: 192.168.0.3, user: ubuntu, password: Testing123} + - {name: node2, address: 192.168.0.4, internalAddress: 192.168.0.4, user: ubuntu, password: Testing123} +``` + +Before you start to use the `./kk` command to create your cluster, it is recommended that you test the connection between the taskbox and other instances using SSH. + +## Possible Error Message + +```bash +Failed to connect to xx.xxx.xx.xxx: could not establish connection to xx.xxx.xx.xxx:xx: ssh: handshake failed: ssh: unable to authenticate , attempted methods [none], no supported methods remain node=xx.xxx.xx.xxx +``` + +If you see an error message as above, verify that: + +- You are using the correct port number. Port `22` is the default port of SSH and you need to add the port number after the IP address if your port is different. For example: + + ```bash + hosts: + - {name: control plane, address: 192.168.0.2, internalAddress: 192.168.0.2, port: 8022, user: ubuntu, password: Testing123} + ``` + +- SSH connections are not restricted in `/etc/ssh/sshd_config`. For example, `PasswordAuthentication` should be set to `true`. + +- You are using the correct username, password or key. Note that the user must have sudo privileges. + +- Your firewall configurations allow SSH connections. \ No newline at end of file diff --git a/content/en/docs/v3.4/faq/installation/telemetry.md b/content/en/docs/v3.4/faq/installation/telemetry.md new file mode 100644 index 000000000..74e31d1db --- /dev/null +++ b/content/en/docs/v3.4/faq/installation/telemetry.md @@ -0,0 +1,86 @@ +--- +title: "Telemetry in KubeSphere" +keywords: "Installer, Telemetry, KubeSphere, Kubernetes" +description: "Understand what Telemetry is and how to enable or disable it in KubeSphere." +linkTitle: "Telemetry in KubeSphere" +Weight: 16300 +--- + +Telemetry collects aggregate information about the size of KubeSphere clusters installed, KubeSphere and Kubernetes versions, components enabled, cluster running time, error logs and so on. KubeSphere promises that the information is only used by the KubeSphere community to improve products and will not be shared with any third parties. + +## What Information Is Collected + +- External network IP +- Download date +- Kubernetes version +- KubeSphere version +- Kubernetes cluster size +- The type of the operating system +- Installer error logs +- Components enabled +- The running time of Kubernetes clusters +- The running time of KubeSphere clusters +- Cluster ID +- Machine ID + +## Disable Telemetry + +Telemetry is enabled by default when you install KubeSphere, while you also have the option to disable it either before or after the installation. + +### Disable Telemetry before installation + +When you install KubeSphere on an existing Kubernetes cluster, you need to download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) for cluster settings. If you want to disable Telemetry, do not run `kubectl apply -f` directly for this file. + +{{< notice note >}} + +If you install KubeSphere on Linux, see [Disable Telemetry After Installation](../telemetry/#disable-telemetry-after-installation) directly. + +{{}} + +1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) and edit it: + + ```bash + vi cluster-configuration.yaml + ``` + +2. In this local `cluster-configuration.yaml` file, scroll down to the bottom of the file and add `telemetry_enabled: false` as follows: + + ```yaml + openpitrix: + store: + enabled: false + servicemesh: + enabled: false + telemetry_enabled: false # Add this line manually to disable Telemetry. + ``` + +3. Save the file and run the following commands to start installation. + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f cluster-configuration.yaml + ``` + +### Disable Telemetry after installation + +1. Log in to the console as `admin` and click **Platform** in the upper-left corner. + +2. Select **Cluster Management** and navigate to **CRDs**. + + {{< notice note >}} +If you have enabled [the multi-cluster feature](../../../multicluster-management/), you need to select a cluster first. + {{}} + +3. Enter `clusterconfiguration` in the search bar and click the result to go to its detail page. + +4. Click icon on the right of `ks-installer` and select **Edit YAML**. + +5. Scroll down to the bottom of the file, add `telemetry_enabled: false`, and then click **OK**. + + +{{< notice note >}} + +If you want to enable Telemetry again, you can update `ks-installer` by deleting `telemetry_enabled: false` or changing it to `telemetry_enabled: true`. + +{{}} diff --git a/content/en/docs/v3.4/faq/multi-cluster-management/_index.md b/content/en/docs/v3.4/faq/multi-cluster-management/_index.md new file mode 100644 index 000000000..05c8c18b9 --- /dev/null +++ b/content/en/docs/v3.4/faq/multi-cluster-management/_index.md @@ -0,0 +1,7 @@ +--- +title: "Multi-cluster Management" +keywords: 'Kubernetes, KubeSphere, Multi-cluster Management, Host Cluster, Member Cluster' +description: 'Faq about multi-cluster management in KubeSphere' +layout: "second" +weight: 16700 +--- diff --git a/content/en/docs/v3.4/faq/multi-cluster-management/host-cluster-access-member-cluster.md b/content/en/docs/v3.4/faq/multi-cluster-management/host-cluster-access-member-cluster.md new file mode 100644 index 000000000..bd93b4c8b --- /dev/null +++ b/content/en/docs/v3.4/faq/multi-cluster-management/host-cluster-access-member-cluster.md @@ -0,0 +1,71 @@ +--- +title: "Restore the Host Cluster Access to A Member Cluster" +keywords: "Kubernetes, KubeSphere, Multi-cluster, Host Cluster, Member Cluster" +description: "Learn how to restore the Host Cluster access to a Member Cluster." +linkTitle: "Restore the Host Cluster Access to A Member Cluster" +Weight: 16720 +--- + +KubeSphere features [multi-cluster maganement](../../../multicluster-management/introduction/kubefed-in-kubesphere/) and tenants with necessary permissions (usually cluster administrators) can access the central control plane from the Host Cluster to manage all the Member Clusters. It is highly recommended that you manage your resources across your cluster through the Host Cluster. + +This tutorial demomstrates how to restore the Host Cluster access to a Member Cluster. + +## Possible Error Message + +If you can't access a Member Cluster from the central control plane and your browser keeps redirecting you to the login page of KubeSphere, run the following command on that Member Cluster to get the logs of the ks-apiserver. + +``` +kubectl -n kubesphere-system logs ks-apiserver-7c9c9456bd-qv6bs +``` + +{{< notice note >}} + +`ks-apiserver-7c9c9456bd-qv6bs` refers to the Pod ID on that Member Cluster. Make sure you use the ID of your own Pod. + +{{}} + +You will probably see the following error message: + +``` +E0305 03:46:42.105625 1 token.go:65] token not found in cache +E0305 03:46:42.105725 1 jwt_token.go:45] token not found in cache +E0305 03:46:42.105759 1 authentication.go:60] Unable to authenticate the request due to error: token not found in cache +E0305 03:46:52.045964 1 token.go:65] token not found in cache +E0305 03:46:52.045992 1 jwt_token.go:45] token not found in cache +E0305 03:46:52.046004 1 authentication.go:60] Unable to authenticate the request due to error: token not found in cache +E0305 03:47:34.502726 1 token.go:65] token not found in cache +E0305 03:47:34.502751 1 jwt_token.go:45] token not found in cache +E0305 03:47:34.502764 1 authentication.go:60] Unable to authenticate the request due to error: token not found in cache +``` + +## Solution + +### Step 1: Verify the jwtSecret + +Run the following command on your Host Cluster and Member Cluser respectively to confirm whether their jwtSecrets are identical. + +``` +kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v “apiVersion” | grep jwtSecret +``` + +### Step 2: Modify `accessTokenMaxAge` + +Make sure the jwtSecrets are identical, then run the following command on that Member Cluster to get the value of `accessTokenMaxAge`. + +``` +kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep accessTokenMaxAge +``` + +If the value is not `0`, run the following command to modify the value of `accessTokenMaxAge`. + +``` +kubectl -n kubesphere-system edit cm kubesphere-config -o yaml +``` + +After you modified the value of `accessTokenMaxAge` to `0`, run the following command to restart the ks-apiserver. + +``` +kubectl -n kubesphere-system rollout restart deploy ks-apiserver +``` + +Now, you can access that Member Cluster from the central control plane again. \ No newline at end of file diff --git a/content/en/docs/v3.4/faq/multi-cluster-management/manage-multi-cluster.md b/content/en/docs/v3.4/faq/multi-cluster-management/manage-multi-cluster.md new file mode 100644 index 000000000..5bb132e42 --- /dev/null +++ b/content/en/docs/v3.4/faq/multi-cluster-management/manage-multi-cluster.md @@ -0,0 +1,61 @@ +--- +title: "Manage a Multi-cluster Environment on KubeSphere" +keywords: 'Kubernetes, KubeSphere, federation, multicluster, hybrid-cloud' +description: 'Understand how to manage a multi-cluster environment on KubeSphere.' +linkTitle: "Manage a Multi-cluster Environment on KubeSphere" +weight: 16710 +--- + +KubeSphere provides an easy-to-use multi-cluster feature to help you [build your multi-cluster environment on KubeSphere](../../../multicluster-management/). This guide illustrates how to manage a multi-cluster environment on KubeSphere. + +## Prerequisites + +- Make sure your Kubernetes clusters are installed with KubeSphere before you use them as your Host Cluster and Member Clusters. + +- Make sure the cluster role is set correctly on your Host Cluster and Member Clusters respectively, and the `jwtSecret` is the same between them. + +- It is recommended that your Member Cluster is in a clean environment where no resources have been created on it before it is imported to the Host Cluster. + + +## Manage your KubeSphere Multi-cluster Environment + +Once you build a multi-cluster environment on KubeSphere, you can manage it through the central control plane from your Host Cluster. When creating resources, you can select a specific cluster while the Host Cluster should be avoided in case of overload. It is not recommended to log in to the KubeSphere web console of your Member Clusters to create resources on them as some resources (for example, workspaces) won't be synchronized to your Host Cluster for management. + +### Resource Management + +It is not recommended that you change a Host Cluster to a Member Cluster or the other way round. If a Member Cluster has been imported to a Host Cluster before, you have to use the same cluster name when importing it to a new Host Cluster after unbinding it from the previous Host Cluster. + +If you want to import the Member Cluster to a new Host Cluster while retaining existing projects, you can follow the steps as below. + +1. Run the following command on the Member Cluster to unbind the projects to be retained from your workspace. + + ```bash + kubectl label ns kubesphere.io/workspace- && kubectl patch ns -p '{"metadata":{"ownerReferences":[]}}' --type=merge + ``` + +2. Run the following command on the Member Cluster to clear your workspace. + + ```bash + kubectl delete workspacetemplate + ``` + +3. When you create a workspace on the new Host Cluster and assign the Member Cluster to this workspace, run the following command on the Member Cluster to bind the projects retained for the workspace. + + ```bash + kuebctl label ns kubesphere.io/workspace= + ``` + +### User Management + +The users you create through the central control plane from your Host Cluster will be synchronized to Member Clusters. + +If you want to let different users access different clusters, you can create workspaces and [assign different clusters to them](../../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/). After that, you can invite different users to these workspaces per access requirements for these users. + +### KubeSphere Components Management + +KubeSphere provides some pluggable components that you can enable based on your needs. In a multi-cluster environment, you can choose to enable these components on your Host Cluster or Member Cluster. + +For example, you only need to enable the App Store on your Host Cluster and you can use functions related to the App Store on your Member Clusters directly. For other components, when you enable them on your Host Cluster, you still have to manually enable the same components on your Member Cluster to implement the same features. Besides, you can also enable components only on your Member Cluster to implement corresponding features solely on your Member Cluster. + +For more information about how to enable pluggable components, refer to [Enable Pluggable Components](../../../pluggable-components/). + diff --git a/content/en/docs/v3.4/faq/observability/_index.md b/content/en/docs/v3.4/faq/observability/_index.md new file mode 100644 index 000000000..0ff90fcb5 --- /dev/null +++ b/content/en/docs/v3.4/faq/observability/_index.md @@ -0,0 +1,7 @@ +--- +title: "Observability FAQ" +keywords: 'Kubernetes, KubeSphere, observability, FAQ' +description: 'Faq about observability' +layout: "second" +weight: 16300 +--- diff --git a/content/en/docs/v3.4/faq/observability/byop.md b/content/en/docs/v3.4/faq/observability/byop.md new file mode 100644 index 000000000..c6a996333 --- /dev/null +++ b/content/en/docs/v3.4/faq/observability/byop.md @@ -0,0 +1,205 @@ +--- +title: "Bring Your Own Prometheus" +keywords: "Monitoring, Prometheus, node-exporter, kube-state-metrics, KubeSphere, Kubernetes" +description: "Use your own Prometheus stack setup in KubeSphere." +linkTitle: "Bring Your Own Prometheus" +Weight: 16330 +--- + +KubeSphere comes with several pre-installed customized monitoring components, including Prometheus Operator, Prometheus, Alertmanager, Grafana (Optional), various ServiceMonitors, node-exporter, and kube-state-metrics. These components might already exist before you install KubeSphere. It is possible to use your own Prometheus stack setup in KubeSphere v3.3. + +## Bring Your Own Prometheus + +### Step 1. Uninstall the customized Prometheus stack of KubeSphere + +1. Execute the following commands to uninstall the stack: + + ```bash + kubectl -n kubesphere-system exec $(kubectl get pod -n kubesphere-system -l app=ks-installer -o jsonpath='{.items[0].metadata.name}') -- kubectl delete -f /kubesphere/kubesphere/prometheus/alertmanager/ 2>/dev/null + kubectl -n kubesphere-system exec $(kubectl get pod -n kubesphere-system -l app=ks-installer -o jsonpath='{.items[0].metadata.name}') -- kubectl delete -f /kubesphere/kubesphere/prometheus/devops/ 2>/dev/null + kubectl -n kubesphere-system exec $(kubectl get pod -n kubesphere-system -l app=ks-installer -o jsonpath='{.items[0].metadata.name}') -- kubectl delete -f /kubesphere/kubesphere/prometheus/etcd/ 2>/dev/null + kubectl -n kubesphere-system exec $(kubectl get pod -n kubesphere-system -l app=ks-installer -o jsonpath='{.items[0].metadata.name}') -- kubectl delete -f /kubesphere/kubesphere/prometheus/grafana/ 2>/dev/null + kubectl -n kubesphere-system exec $(kubectl get pod -n kubesphere-system -l app=ks-installer -o jsonpath='{.items[0].metadata.name}') -- kubectl delete -f /kubesphere/kubesphere/prometheus/kube-state-metrics/ 2>/dev/null + kubectl -n kubesphere-system exec $(kubectl get pod -n kubesphere-system -l app=ks-installer -o jsonpath='{.items[0].metadata.name}') -- kubectl delete -f /kubesphere/kubesphere/prometheus/node-exporter/ 2>/dev/null + kubectl -n kubesphere-system exec $(kubectl get pod -n kubesphere-system -l app=ks-installer -o jsonpath='{.items[0].metadata.name}') -- kubectl delete -f /kubesphere/kubesphere/prometheus/upgrade/ 2>/dev/null + kubectl -n kubesphere-system exec $(kubectl get pod -n kubesphere-system -l app=ks-installer -o jsonpath='{.items[0].metadata.name}') -- kubectl delete -f /kubesphere/kubesphere/prometheus/prometheus-rules-v1.16\+.yaml 2>/dev/null + kubectl -n kubesphere-system exec $(kubectl get pod -n kubesphere-system -l app=ks-installer -o jsonpath='{.items[0].metadata.name}') -- kubectl delete -f /kubesphere/kubesphere/prometheus/prometheus-rules.yaml 2>/dev/null + kubectl -n kubesphere-system exec $(kubectl get pod -n kubesphere-system -l app=ks-installer -o jsonpath='{.items[0].metadata.name}') -- kubectl delete -f /kubesphere/kubesphere/prometheus/prometheus 2>/dev/null + # Uncomment this line if you don't have Prometheus managed by Prometheus Operator in other namespaces. + # kubectl -n kubesphere-system exec $(kubectl get pod -n kubesphere-system -l app=ks-installer -o jsonpath='{.items[0].metadata.name}') -- kubectl delete -f /kubesphere/kubesphere/prometheus/init/ 2>/dev/null + ``` + +2. Delete the PVC that Prometheus uses. + + ```bash + kubectl -n kubesphere-monitoring-system delete pvc `kubectl -n kubesphere-monitoring-system get pvc | grep -v VOLUME | awk '{print $1}' | tr '\n' ' '` + ``` + +### Step 2. Install your own Prometheus stack + +{{< notice note >}} + +KubeSphere 3.3 was certified to work well with the following Prometheus stack components: + +- Prometheus Operator **v0.55.1+** +- Prometheus **v2.34.0+** +- Alertmanager **v0.23.0+** +- kube-state-metrics **v2.5.0** +- node-exporter **v1.3.1** + +Make sure your Prometheus stack components' version meets these version requirements, especially **node-exporter** and **kube-state-metrics**. + +Make sure you install **node-exporter** and **kube-state-metrics** if only **Prometheus Operator** and **Prometheus** are installed. **node-exporter** and **kube-state-metrics** are required for KubeSphere to work properly. + +**If you've already had the entire Prometheus stack up and running, you can skip this step.** + +{{}} + +The Prometheus stack can be installed in many ways. The following steps show how to install it into the namespace `monitoring` using [Prometheus stack manifests in ks-installer](https://github.com/kubesphere/ks-installer/tree/release-3.3/roles/ks-monitor/files/prometheus) (generated from a KubeSphere custom version of [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus.git)). + +1. Obtain `ks-installer` that KubeSphere v3.3.0 uses. + + ```bash + cd ~ && git clone -b release-3.3 https://github.com/kubesphere/ks-installer.git && cd ks-installer/roles/ks-monitor/files/prometheus + ``` + +2. Create `kustomization.yaml`: + ```bash + # create + cat < kustomization.yaml + apiVersion: kustomize.config.k8s.io/v1beta1 + kind: Kustomization + namespace: monitoring + resources: + EOF + + # append yaml paths + find . -mindepth 2 -name "*.yaml" -type f -print | sed 's/^/- /' >> kustomization.yaml + ``` + +3. Remove unnecessary components. For example, if Grafana is not enabled in KubeSphere, you can run the following command to delete the Grafana section in `kustomization.yaml`. + + ```bash + sed -i '/grafana\//d' kustomization.yaml + ``` + +4. Install the stack. + + ```bash + kubectl apply -k . + ``` + +### Step 3. Install KubeSphere customized stuff to your Prometheus stack + +{{< notice note >}} + +If your Prometheus stack is installed using [Prometheus stack manifests in ks-installer](https://github.com/kubesphere/ks-installer/tree/release-3.3/roles/ks-monitor/files/prometheus), skip this step. + +KubeSphere 3.3.0 uses Prometheus Operator to manage Prometheus/Alertmanager config and lifecycle, ServiceMonitor (to manage scrape config), and PrometheusRule (to manage Prometheus recording/alert rules). + +If your Prometheus stack setup isn't managed by Prometheus Operator, you can skip this step. But you have to make sure that: + +- You must copy the recording/alerting rules in [PrometheusRule](https://github.com/kubesphere/ks-installer/tree/release-3.3/roles/ks-monitor/files/prometheus/kubernetes/kubernetes-prometheusRule.yaml) and [PrometheusRule for etcd](https://github.com/kubesphere/ks-installer/tree/release-3.3/roles/ks-monitor/files/prometheus/etcd/prometheus-rulesEtcd.yaml) to your Prometheus config for KubeSphere v3.3.0 to work properly. + +- Configure your Prometheus to scrape metrics from the same targets as that in [serviceMonitor](https://github.com/kubesphere/ks-installer/tree/release-3.3/roles/ks-monitor/files/prometheus/) of each component. + +{{}} + +1. Obtain `ks-installer` that KubeSphere v3.3.0 uses. + + ```bash + cd ~ && git clone -b release-3.3 https://github.com/kubesphere/ks-installer.git && cd ks-installer/roles/ks-monitor/files/prometheus + ``` + +2. Create `kustomization.yaml`, fill the following content. + + ```yaml + apiVersion: kustomize.config.k8s.io/v1beta1 + kind: Kustomization + namespace: + resources: + - ./alertmanager/alertmanager-secret.yaml + - ./etcd/prometheus-rulesEtcd.yaml + - ./kube-state-metrics/kube-state-metrics-serviceMonitor.yaml + - ./kubernetes/kubernetes-prometheusRule.yaml + - ./kubernetes/kubernetes-serviceKubeControllerManager.yaml + - ./kubernetes/kubernetes-serviceKubeScheduler.yaml + - ./kubernetes/kubernetes-serviceMonitorApiserver.yaml + - ./kubernetes/kubernetes-serviceMonitorCoreDNS.yaml + - ./kubernetes/kubernetes-serviceMonitorKubeControllerManager.yaml + - ./kubernetes/kubernetes-serviceMonitorKubeScheduler.yaml + - ./kubernetes/kubernetes-serviceMonitorKubelet.yaml + - ./node-exporter/node-exporter-serviceMonitor.yaml + - ./prometheus/prometheus-clusterRole.yaml + ``` + + {{< notice note >}} + + - Set the value of `namespace` to your own namespace in which the Prometheus stack is deployed. For example, it is `monitoring` if you install Prometheus in the `monitoring` namespace in Step 2. + - If you have enabled the alerting component for KubeSphere, supplement yaml paths of `thanos-ruler` into `kustomization.yaml`. + + {{}} + +3. Install the required components of KubeSphere. + + ```bash + kubectl apply -k . + ``` + +4. Find the Prometheus CR which is usually `k8s` in your own namespace. + + ```bash + kubectl -n get prometheus + ``` + +5. Set the Prometheus rule evaluation interval to 1m to be consistent with the KubeSphere v3.3.0 customized ServiceMonitor. The Rule evaluation interval should be greater than or equal to the scrape interval. + + ```bash + kubectl -n patch prometheus k8s --patch '{ + "spec": { + "evaluationInterval": "1m" + } + }' --type=merge + ``` + +### Step 4. Change KubeSphere's `monitoring endpoint` + +Now that your own Prometheus stack is up and running, you can change KubeSphere's monitoring endpoint to use your own Prometheus. + +1. Run the following command to edit `kubesphere-config`. + + ```bash + kubectl edit cm -n kubesphere-system kubesphere-config + ``` + +2. Navigate to the `monitoring endpoint` section, as shown in the following: + + ```bash + monitoring: + endpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090 + ``` + +3. Change `monitoring endpoint` to your own Prometheus: + + ```bash + monitoring: + endpoint: http://prometheus-operated.monitoring.svc:9090 + ``` + +4. If you have enabled the alerting component of KubeSphere, navigate to `prometheusEndpoint` and `thanosRulerEndpoint` of `alerting`, and change the values according to the following sample. KubeSphere APIServer will restart automatically to make your configurations take effect. + + ```yaml + ... + alerting: + ... + prometheusEndpoint: http://prometheus-operated.monitoring.svc:9090 + thanosRulerEndpoint: http://thanos-ruler-operated.monitoring.svc:10902 + ... + ... + ``` + +{{< notice warning >}} + +If you enable/disable KubeSphere pluggable components following [this guide](../../../pluggable-components/overview/) , the `monitoring endpoint` will be reset to the original value. In this case, you need to change it to the new one. + +{{}} \ No newline at end of file diff --git a/content/en/docs/v3.4/faq/observability/logging.md b/content/en/docs/v3.4/faq/observability/logging.md new file mode 100644 index 000000000..6d483a355 --- /dev/null +++ b/content/en/docs/v3.4/faq/observability/logging.md @@ -0,0 +1,163 @@ +--- +title: "Observability — Logging FAQ" +keywords: "Kubernetes, Elasticsearch, KubeSphere, Logging, logs" +description: "Questions asked frequently about the logging functionality." +linkTitle: "Logging" +weight: 16310 +--- + +This page contains some of the frequently asked questions about logging. + +- [How to change the log store to the external Elasticsearch and shut down the internal Elasticsearch](#how-to-change-the-log-store-to-the-external-elasticsearch-and-shut-down-the-internal-elasticsearch) +- [How to change the log store to Elasticsearch with X-Pack Security enabled](#how-to-change-the-log-store-to-elasticsearch-with-x-pack-security-enabled) +- [How to set the data retention period of logs, events, auditing logs, and Istio logs](#how-to-set-the-data-retention-period-of-logs-events-auditing-logs-and-istio-logs) +- [I cannot find logs from workloads on some nodes using Toolbox](#i-cannot-find-logs-from-workloads-on-some-nodes-using-toolbox) +- [The log search page in Toolbox gets stuck when loading](#the-log-search-page-in-toolbox-gets-stuck-when-loading) +- [Toolbox shows no log record today](#toolbox-shows-no-log-record-today) +- [I see Internal Server Error when viewing logs in Toolbox](#i-see-internal-server-error-when-viewing-logs-in-toolbox) +- [How to make KubeSphere only collect logs from specified workloads](#how-to-make-kubesphere-only-collect-logs-from-specified-workloads) + +## How to change the log store to the external Elasticsearch and shut down the internal Elasticsearch + +If you are using the KubeSphere internal Elasticsearch and want to change it to your external alternate, follow the steps below. If you haven't enabled the logging system, refer to [KubeSphere Logging System](../../../pluggable-components/logging/) to setup your external Elasticsearch directly. + +1. First, you need to update the KubeKey configuration. Execute the following command: + + ```bash + kubectl edit cc -n kubesphere-system ks-installer + ``` + +2. Comment out `es.elasticsearchDataXXX`, `es.elasticsearchMasterXXX` and `status.logging`, and set `es.externalElasticsearchHost` to the address of your Elasticsearch and `es.externalElasticsearchPort` to its port number. Below is an example for your reference. + + ```yaml + apiVersion: installer.kubesphere.io/v1alpha1 + kind: ClusterConfiguration + metadata: + name: ks-installer + namespace: kubesphere-system + ... + spec: + ... + common: + es: + # elasticsearchDataReplicas: 1 + # elasticsearchDataVolumeSize: 20Gi + # elasticsearchMasterReplicas: 1 + # elasticsearchMasterVolumeSize: 4Gi + elkPrefix: logstash + logMaxAge: 7 + externalElasticsearchHost: <192.168.0.2> + externalElasticsearchPort: <9200> + ... + status: + ... + # logging: + # enabledTime: 2020-08-10T02:05:13UTC + # status: enabled + ... + ``` + +3. Rerun `ks-installer`. + + ```bash + kubectl rollout restart deploy -n kubesphere-system ks-installer + ``` + +4. Remove the internal Elasticsearch by running the following command. Please make sure you have backed up data in the internal Elasticsearch. + + ```bash + helm uninstall -n kubesphere-logging-system elasticsearch-logging + ``` + +5. Change the configuration of Jaeger if Istio is enabled. + + ```yaml + $ kubectl -n istio-system edit jaeger + ... + options: + es: + index-prefix: logstash + server-urls: http://elasticsearch-logging-data.kubesphere-logging-system.svc:9200 # Change it to the external address. + ``` + +## How to change the log store to Elasticsearch with X-Pack Security enabled + +Currently, KubeSphere doesn't support the integration of Elasticsearch with X-Pack Security enabled. This feature is coming soon. + +## How to set the data retention period of logs, events, auditing logs, and Istio logs + +Before KubeSphere 3.3, you can only set the retention period of logs, which is 7 days by default. In KubeSphere 3.3, apart from logs, you can also set the data retention period of events, auditing logs, and Istio logs. + +You need to update the KubeKey configuration and rerun `ks-installer`. + +1. Execute the following command: + + ```bash + kubectl edit cc -n kubesphere-system ks-installer + ``` + +2. In the YAML file, if you only want to change the retention period of logs, you can directly change the default value of `logMaxAge` to a desired one. If you want to set the retention period of events, auditing logs, and Istio logs, you need to add parameters `auditingMaxAge`, `eventMaxAge`, and `istioMaxAge` and set a value for them, respectively, as shown in the following example: + + + ```yaml + apiVersion: installer.kubesphere.io/v1alpha1 + kind: ClusterConfiguration + metadata: + name: ks-installer + namespace: kubesphere-system + ... + spec: + ... + common: + es: # Storage backend for logging, events and auditing. + ... + logMaxAge: 7 # Log retention time in built-in Elasticsearch. It is 7 days by default. + auditingMaxAge: 2 + eventMaxAge: 1 + istioMaxAge: 4 + ... + ``` + +3. Rerun `ks-installer`. + + ```bash + kubectl rollout restart deploy -n kubesphere-system ks-installer + ``` + +## I cannot find logs from workloads on some nodes using Toolbox + +If you deployed KubeSphere through [multi-node installation](../../../installing-on-linux/introduction/multioverview/) and are using symbolic links for the docker root directory, make sure all nodes follow the same symbolic links. Logging agents are deployed in DaemonSets onto nodes. Any discrepancy in container log paths may cause collection failures on that node. + +To find out the docker root directory path on nodes, you can run the following command. Make sure the same value applies to all nodes. + +```bash +docker info -f '{{.DockerRootDir}}' +``` + +## The log search page in Toolbox gets stuck when loading + +If the log search page is stuck when loading, check the storage system you are using. For example, a misconfigured NFS storage system may cause this issue. + +## Toolbox shows no log record today + +Check if your log volume exceeds the storage limit of Elasticsearch. If so, you need to increase the Elasticsearch disk volume. + +## I see Internal Server Error when viewing logs in Toolbox + +There can be several reasons for this issue: + +- Network partition +- Invalid Elasticsearch host and port +- The Elasticsearch health status is red + +## How to make KubeSphere only collect logs from specified workloads + +The KubeSphere logging agent is powered by Fluent Bit. You need to update the Fluent Bit configuration to exclude certain workload logs. To modify the Fluent Bit input configuration, run the following command: + +```bash +kubectl edit input -n kubesphere-logging-system tail +``` + +Update the field `Input.Spec.Tail.ExcludePath`. For example, set the path to `/var/log/containers/*_kube*-system_*.log` to exclude any log from system components. + +For more information, see [Fluent Operator](https://github.com/kubesphere/fluentbit-operator). diff --git a/content/en/docs/v3.4/faq/observability/monitoring.md b/content/en/docs/v3.4/faq/observability/monitoring.md new file mode 100644 index 000000000..cf7d5c2ab --- /dev/null +++ b/content/en/docs/v3.4/faq/observability/monitoring.md @@ -0,0 +1,123 @@ +--- +title: "Observability — Monitoring FAQ" +keywords: "Kubernetes, Prometheus, KubeSphere, Monitoring" +description: "Questions asked frequently about the monitoring functionality." +linkTitle: "Monitoring" +weight: 16320 +--- + +This page contains some of the frequently asked questions about monitoring. + +- [How to access the Prometheus console in KubeSphere](#how-to-access-the-prometheus-console-in-kubesphere) +- [Host port 9100 conflict caused by the node exporter](#host-port-9100-conflict-caused-by-the-node-exporter) +- [Conflicts with the preexisting prometheus operator](#conflicts-with-the-preexisting-prometheus-operator) +- [How to change the monitoring data retention period](#how-to-change-the-monitoring-data-retention-period) +- [No monitoring data for kube-scheduler and kube-controller-manager](#no-monitoring-data-for-kube-scheduler-and-kube-controller-manager) +- [No monitoring data for the last few minutes](#no-monitoring-data-for-the-last-few-minutes) +- [No monitoring data for both nodes and the control plane](#no-monitoring-data-for-both-nodes-and-the-control-plane) +- [Prometheus produces an error log: opening storage failed, no such file or directory: opening storage failed, no such file or directory](#prometheus-produces-an-error-log-opening-storage-failed-no-such-file-or-directory) + +## How to access the Prometheus console in KubeSphere + +The KubeSphere monitoring engine is powered by Prometheus. For debugging purposes, you may want to access the built-in Prometheus service through a NodePort. Run the following command to change the service type to `NodePort`: + +```bash +kubectl edit svc -n kubesphere-monitoring-system prometheus-k8s +``` + +{{< notice note >}} + +To access the Prometheus console, you may need to open relevant ports and configure port forwarding rules depending on your environment. + +{{}} + +## Host port 9100 conflict caused by the node exporter + +If you have processes occupying the host port 9100, the node exporter in `kubesphere-monitoring-system` will be crashing. To resolve the conflict, you need to either terminate the process or assign another available port to the node exporter. + +To adopt another host port, such as `29100`, run the following command and replace all `9100` with `29100` (5 places need to be changed). + + ```bash +kubectl edit ds -n kubesphere-monitoring-system node-exporter + ``` + + ```yaml + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: node-exporter + namespace: kubesphere-monitoring-system + ... + spec: + ... + template: + ... + spec: + containers: + - name: node-exporter + image: kubesphere/node-exporter:ks-v0.18.1 + args: + - --web.listen-address=127.0.0.1:9100 + ... + - name: kube-rbac-proxy + image: kubesphere/kube-rbac-proxy:v0.4.1 + args: + - --logtostderr + - --secure-listen-address=[$(IP)]:9100 + - --upstream=http://127.0.0.1:9100/ + ... + ports: + - containerPort: 9100 + hostPort: 9100 + ... + ``` + +## Conflicts with the preexisting prometheus operator + +If you have deployed Prometheus Operator on your own, make sure it is removed before you install KubeSphere. Otherwise, there may be conflicts that the built-in Prometheus Operator of KubeSphere selects duplicate ServiceMonitor objects. + +## How to change the monitoring data retention period + +Run the following command to edit the maximum retention period. Navigate to the field `retention` and set a desired retention period (`7d` by default). + +```bash +kubectl edit prometheuses -n kubesphere-monitoring-system k8s +``` + +## No monitoring data for kube-scheduler and kube-controller-manager + +First, make sure the flag `--bind-address` is set to `0.0.0.0` (default) rather than `127.0.0.1`. Prometheus may need to access theses components from other hosts. + +Second, check the presence of endpoint objects for `kube-scheduler` and `kube-controller-manager`. If they are missing, create them manually by creating services and selecting target Pods. + +```bash +kubectl get ep -n kube-system | grep -E 'kube-scheduler|kube-controller-manager' +``` + +## No monitoring data for the last few minutes + +Check if your computer browser's local clock is in sync with the Internet time and with your cluster. A time gap may cause this issue. This may occur especially if your computer resides on an Intranet. + +## No monitoring data for both nodes and the control plane + +Check your network plugin and make sure that there is no IP Pool overlap between your hosts and Pod network CIDR. It is strongly recommended that you install Kubernetes with [KubeKey](https://github.com/kubesphere/kubekey). + +Chinese readers may refer to [the discussion](https://ask.kubesphere.io/forum/d/2027/16) in the KubeSphere China forum for more information. + +## Prometheus produces an error log: opening storage failed, no such file or directory + +If the Prometheus Pod in `kubesphere-monitoring-system` is crashing and produces the following error log, your Prometheus data may be corrupt and needs manual deletion to recover. + +```shell +level=error ts=2020-10-14T17:43:30.485Z caller=main.go:764 err="opening storage failed: block dir: \"/prometheus/01EM0016F8FB33J63RNHFMHK3\": open /prometheus/01EM0016F8FB33J63RNHFMHK3/meta.json: no such file or directory" +``` + +Exec into the Prometheus Pod (if possible), and remove the block directory `/prometheus/01EM0016F8FB33J63RNHFMHK3`: + +```bash +kubectl exec -it -n kubesphere-monitoring-system prometheus-k8s-0 -c prometheus sh + +rm -rf 01EM0016F8FB33J63RNHFMHK3/ +``` + +Alternatively, you can simply delete the directory from the persistent volume bound to the Prometheus PVC. diff --git a/content/en/docs/v3.4/faq/upgrade/_index.md b/content/en/docs/v3.4/faq/upgrade/_index.md new file mode 100644 index 000000000..08a35d25d --- /dev/null +++ b/content/en/docs/v3.4/faq/upgrade/_index.md @@ -0,0 +1,7 @@ +--- +title: "Upgrade FAQ" +keywords: 'Kubernetes, KubeSphere, upgrade, FAQ' +description: 'Faq about upgrade' +layout: "second" +weight: 16200 +--- diff --git a/content/en/docs/v3.4/faq/upgrade/qingcloud-csi-upgrade.md b/content/en/docs/v3.4/faq/upgrade/qingcloud-csi-upgrade.md new file mode 100644 index 000000000..3b6254b04 --- /dev/null +++ b/content/en/docs/v3.4/faq/upgrade/qingcloud-csi-upgrade.md @@ -0,0 +1,60 @@ +--- +title: "Upgrade QingCloud CSI" +keywords: "Kubernetes, upgrade, KubeSphere, v3.2.0" +description: "Upgrade the QingCloud CSI after you upgrade KubeSphere." +linkTitle: "Upgrade QingCloud CSI" +weight: 16210 +--- + +## Upgrade QingCloud CSI after Upgrading KubeSphere + +Currently QingCloud CSI cannot be upgraded by KubeKey. You can run the following command to upgrade the CSI manually after you upgrade KubeSphere: + +``` +git clone https://github.com/yunify/qingcloud-csi.git +``` + +``` +cd qingcloud-csi/ +``` + +``` +git checkout v1.2.0 +``` + +``` +kubectl delete -f deploy/disk/kubernetes/releases/qingcloud-csi-disk-v1.1.1.yaml +``` + +``` +kubectl delete sc csi-qingcloud +``` + +``` +helm repo add test https://charts.kubesphere.io/test +``` + +``` +helm install test/csi-qingcloud --name-template csi-qingcloud --namespace kube-system \ + --set config.qy_access_key_id=KEY,config.qy_secret_access_key=SECRET,config.zone=ZONE,sc.type=2 +``` + +Wait until the CSI controller and DaemonSet are up and running. + +``` +$ kubectl get po -n kube-system | grep csi +csi-qingcloud-controller-56979d46cb-qk9ck 5/5 Running 0 24h +csi-qingcloud-node-4s8n5 2/2 Running 0 24h +csi-qingcloud-node-65dqn 2/2 Running 0 24h +csi-qingcloud-node-khk49 2/2 Running 0 24h +csi-qingcloud-node-nz9q9 2/2 Running 0 24h +csi-qingcloud-node-pxr56 2/2 Running 0 24h +csi-qingcloud-node-whqhk 2/2 Running 0 24h +``` + +Run the following command to check whether the CSI image version is 1.2.x: + +``` +$ kubectl get po -n kube-system csi-qingcloud-controller-56979d46cb-qk9ck -ojson | jq '.spec.containers[].image' | grep qingcloud +"csiplugin/csi-qingcloud:v1.2.0-rc.4" +``` diff --git a/content/en/docs/v3.4/installing-on-kubernetes/_index.md b/content/en/docs/v3.4/installing-on-kubernetes/_index.md new file mode 100644 index 000000000..f83d3d1f3 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-kubernetes/_index.md @@ -0,0 +1,28 @@ +--- +title: "Installing on Kubernetes" +description: "Demonstrate how to install KubeSphere on Kubernetes either hosted on cloud or on-premises." +layout: "second" + +linkTitle: "Installing on Kubernetes" +weight: 4000 + +icon: "/images/docs/v3.3/docs.svg" +--- + +This chapter demonstrates how to deploy KubeSphere on existing Kubernetes clusters hosted on cloud or on-premises. As a highly flexible solution to container orchestration, KubeSphere can be deployed across various Kubernetes engines. + +## Most Popular Pages + +Below you will find some of the most viewed and helpful pages in this chapter. It is highly recommended that you refer to them first. + +{{< popularPage icon="/images/docs/v3.3/brand-icons/gke.jpg" title="Deploy KubeSphere on GKE" description="Provision KubeSphere on existing Kubernetes clusters on GKE." link="../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke/" >}} + +{{< popularPage icon="/images/docs/v3.3/bitmap.jpg" title="Deploy KubeSphere on AWS EKS" description="Provision KubeSphere on existing Kubernetes clusters on EKS." link="../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks/" >}} + +{{< popularPage icon="/images/docs/v3.3/brand-icons/aks.jpg" title="Deploy KubeSphere on AKS" description="Provision KubeSphere on existing Kubernetes clusters on AKS." link="../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks/" >}} + +{{< popularPage icon="/images/docs/v3.3/brand-icons/huawei.svg" title="Deploy KubeSphere on CCE" description="Provision KubeSphere on existing Kubernetes clusters on Huawei CCE." link="../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-huaweicloud-cce/" >}} + +{{< popularPage icon="/images/docs/v3.3/brand-icons/oracle.jpg" title="Deploy KubeSphere on Oracle OKE" description="Provision KubeSphere on existing Kubernetes clusters on OKE." link="../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke/" >}} + +{{< popularPage icon="/images/docs/v3.3/brand-icons/digital-ocean.jpg" title="Deploy KubeSphere on DO" description="Provision KubeSphere on existing Kubernetes clusters on DigitalOcean." link="../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do/" >}} diff --git a/content/en/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/_index.md b/content/en/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/_index.md new file mode 100644 index 000000000..917923708 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Installing on Hosted Kubernetes" +weight: 4200 + +_build: + render: false +--- \ No newline at end of file diff --git a/content/en/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks.md b/content/en/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks.md new file mode 100644 index 000000000..9e12fa160 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks.md @@ -0,0 +1,131 @@ +--- +title: "Deploy KubeSphere on AKS" +keywords: "KubeSphere, Kubernetes, Installation, Azure, AKS" +description: "Learn how to deploy KubeSphere on Azure Kubernetes Service." + +weight: 4210 +--- + +This guide walks you through the steps of deploying KubeSphere on [Azure Kubernetes Service](https://docs.microsoft.com/en-us/azure/aks/). + +## Prepare an AKS cluster + +Azure can help you implement infrastructure as code by providing resource deployment automation options. Commonly adopted tools include [ARM templates](https://docs.microsoft.com/en-us/azure/azure-resource-manager/templates/overview) and [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/what-is-azure-cli?view=azure-cli-latest). In this guide, we will use Azure CLI to create all the resources that are needed for the installation of KubeSphere. + +### Use Azure Cloud Shell + +You don't have to install Azure CLI on your machine as Azure provides a web-based terminal. Click the Cloud Shell button on the menu bar at the upper-right corner in Azure portal. + +![Cloud Shell](/images/docs/v3.3/aks/aks-launch-icon.png) + +Select **Bash** Shell. + +![Bash Shell](/images/docs/v3.3/aks/aks-choices-bash.png) + +### Create a Resource Group + +An Azure resource group is a logical group in which Azure resources are deployed and managed. The following example creates a resource group named `KubeSphereRG` in the location `westus`. + +```bash +az group create --name KubeSphereRG --location westus +``` + +### Create an AKS Cluster + +Use the command `az aks create` to create an AKS cluster. The following example creates a cluster named `KuberSphereCluster` with three nodes. This will take several minutes to complete. + +```bash +az aks create --resource-group KubeSphereRG --name KuberSphereCluster --node-count 3 --enable-addons monitoring --generate-ssh-keys +``` + +{{< notice note >}} + +You can use `--node-vm-size` or `-s` option to change the size of Kubernetes nodes. The default node size is Standard_DS2_v2 (2vCPU, 7GB memory). For more options, see [az aks create](https://docs.microsoft.com/en-us/cli/azure/aks?view=azure-cli-latest#az-aks-create). + +{{}} + +### Connect to the Cluster + +To configure kubectl to connect to the Kubernetes cluster, use the command `az aks get-credentials`. This command downloads the credentials and configures that the Kubernetes CLI will use. + +```bash +az aks get-credentials --resource-group KubeSphereRG --name KuberSphereCluster +``` + +```bash +$ kubectl get nodes +NAME STATUS ROLES AGE VERSION +aks-nodepool1-23754246-vmss000000 Ready agent 38m v1.16.13 +``` + +### Check Azure Resources in the Portal + +After you execute all the commands above, you can see there are 2 Resource Groups created in Azure Portal. + +![Resource groups](/images/docs/v3.3/aks/aks-create-command.png) + +Azure Kubernetes Services itself will be placed in `KubeSphereRG`. + +![Azure Kubernetes Services](/images/docs/v3.3/aks/aks-dashboard.png) + +All the other Resources will be placed in `MC_KubeSphereRG_KuberSphereCluster_westus`, such as VMs, Load Balancer and Virtual Network. + +![Azure Kubernetes Services](/images/docs/v3.3/aks/aks-all-resources.png) + +## Deploy KubeSphere on AKS + +To start deploying KubeSphere, use the following commands. + +```bash +kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + +kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml +``` + +You can inspect the logs of installation through the following command: + +```bash +kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f +``` + +## Access KubeSphere Console + +To access KubeSphere console from a public IP address, you need to change the service type to `LoadBalancer`. + +```bash +kubectl edit service ks-console -n kubesphere-system +``` + +Find the following section and change the type to `LoadBalancer`. + +```yaml +spec: + clusterIP: 10.0.78.113 + externalTrafficPolicy: Cluster + ports: + - name: nginx + nodePort: 30880 + port: 80 + protocol: TCP + targetPort: 8000 + selector: + app: ks-console + tier: frontend + version: v3.0.0 + sessionAffinity: None + type: LoadBalancer # Change NodePort to LoadBalancer +status: + loadBalancer: {} +``` + +After saving the configuration of ks-console service, you can use the following command to get the public IP address (under `EXTERNAL-IP`). Use the IP address to access the console with the default account and password (`admin/P@88w0rd`). + +```bash +$ kubectl get svc/ks-console -n kubesphere-system +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +ks-console LoadBalancer 10.0.181.93 13.86.xxx.xxx 80:30194/TCP 13m 6379/TCP 10m +``` + +## Enable Pluggable Components (Optional) + +The example above demonstrates the process of a default minimal installation. For pluggable components, you can enable them either before or after the installation. See [Enable Pluggable Components](../../../pluggable-components/) for details. diff --git a/content/en/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do.md b/content/en/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do.md new file mode 100644 index 000000000..9eeb34aa2 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do.md @@ -0,0 +1,113 @@ +--- +title: "Deploy KubeSphere on DigitalOcean Kubernetes" +keywords: 'Kubernetes, KubeSphere, DigitalOcean, Installation' +description: 'Learn how to deploy KubeSphere on DigitalOcean.' + +weight: 4230 +--- + +![KubeSphere+DOKS](/images/docs/v3.3/do/KubeSphere-DOKS.png) + +This guide walks you through the steps of deploying KubeSphere on [DigitalOcean Kubernetes](https://www.digitalocean.com/products/kubernetes/). + +## Prepare a DOKS Cluster + +A Kubernetes cluster in DO is a prerequisite for installing KubeSphere. Go to your [DO account](https://cloud.digitalocean.com/) and refer to the image below to create a cluster from the navigation menu. + +![create-cluster-do](/images/docs/v3.3/do/create-cluster-do.png) + +You need to select: + +1. Kubernetes version (for example, *1.18.6-do.0*) +2. Datacenter region (for example, *Frankfurt*) +3. VPC network (for example, *default-fra1*) +4. Cluster capacity (for example, 2 standard nodes with 2 vCPUs and 4GB of RAM each) +5. A name for the cluster (for example, *kubesphere-3*) + +![config-cluster-do](/images/docs/v3.3/do/config-cluster-do.png) + +{{< notice note >}} + +- To install KubeSphere 3.3 on Kubernetes, your Kubernetes version must be v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, and * v1.24.x. For Kubernetes versions with an asterisk, some features of edge nodes may be unavailable due to incompatability. Therefore, if you want to use edge nodes, you are advised to install Kubernetes v1.21.x. +- 2 nodes are included in this example. You can add more nodes based on your own needs, especially in a production environment. +- The machine type Standard/4 GB/2 vCPUs is for minimal installation. If you plan to enable several pluggable components or use the cluster for production, you can upgrade your nodes to a more powerful type (such as CPU-Optimized / 8 GB / 4 vCPUs). It seems that DigitalOcean provisions the control plane nodes based on the type of the worker nodes, and for Standard ones the API server can become unresponsive quite soon. + +{{}} + +When the cluster is ready, you can download the config file for kubectl. + +![download-config-file](/images/docs/v3.3/do/download-config-file.png) + +## Install KubeSphere on DOKS + +Now that the cluster is ready, you can install KubeSphere following the steps below: + +- Install KubeSphere using kubectl. The following commands are only for the default minimal installation. + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml + ``` + +- Inspect the logs of installation: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + +When the installation finishes, you can see the following message: + +```bash +##################################################### +### Welcome to KubeSphere! ### +##################################################### +Console: http://10.XXX.XXX.XXX:30880 +Account: admin +Password: P@88w0rd +NOTES: + 1. After logging into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are ready. + 2. Please modify the default password after login. +##################################################### +https://kubesphere.io 2020-xx-xx xx:xx:xx +``` + +## Access KubeSphere Console + +Now that KubeSphere is installed, you can access the web console of KubeSphere by following the steps below. + +- Go to the Kubernetes Dashboard provided by DigitalOcean. + + ![kubernetes-dashboard-access](/images/docs/v3.3/do/kubernetes-dashboard-access.png) + +- Select the **kubesphere-system** namespace. + + ![kubernetes-dashboard-namespace](/images/docs/v3.3/do/kubernetes-dashboard-namespace.png) + +- In **Services** under **Service**, edit the service **ks-console**. + + ![kubernetes-dashboard-edit](/images/docs/v3.3/do/kubernetes-dashboard-edit.png) + +- Change the type from `NodePort` to `LoadBalancer`. Save the file when you finish. + + ![lb-change](/images/docs/v3.3/do/lb-change.png) + +- Access the KubeSphere's web console using the endpoint generated by DO. + + ![access-console](/images/docs/v3.3/do/access-console.png) + + {{< notice tip >}} + + Instead of changing the service type to `LoadBalancer`, you can also access KubeSphere console via `NodeIP:NodePort` (service type set to `NodePort`). You need to get the public IP of one of your nodes. + + {{}} + +- Log in to the console with the default account and password (`admin/P@88w0rd`). In the cluster overview page, you can see the dashboard. + +## Enable Pluggable Components (Optional) + +The example above demonstrates the process of a default minimal installation. To enable other components in KubeSphere, see [Enable Pluggable Components](../../../pluggable-components/) for more details. diff --git a/content/en/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks.md b/content/en/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks.md new file mode 100644 index 000000000..4cab20ec0 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks.md @@ -0,0 +1,184 @@ +--- +title: "Deploy KubeSphere on AWS EKS" +keywords: 'Kubernetes, KubeSphere, EKS, Installation' +description: 'Learn how to deploy KubeSphere on Amazon Elastic Kubernetes Service.' + +weight: 4220 +--- + +This guide walks you through the steps of deploying KubeSphere on [AWS EKS](https://docs.aws.amazon.com/eks/latest/userguide/what-is-eks.html). You also can visit [KubeSphere on AWS Quick Start](https://aws.amazon.com/quickstart/architecture/qingcloud-kubesphere/) which uses Amazon Web Services (AWS) CloudFormation templates to help end users automatically provision an Amazon Elastic Kubernetes Service (Amazon EKS) and KubeSphere environment on the AWS Cloud. + +## Install the AWS CLI + +First we need to install the AWS CLI. Below is an example for macOS and please refer to [Getting Started EKS](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html) for other operating systems. + +```shell +pip3 install awscli --upgrade --user +``` + +Check the installation with `aws --version`. +![check-aws-cli](/images/docs/v3.3/eks/check-aws-cli.png) + +## Prepare an EKS Cluster + +1. A standard Kubernetes cluster in AWS is a prerequisite of installing KubeSphere. Go to the navigation menu and refer to the image below to create a cluster. + ![create-cluster-eks](/images/docs/v3.3/eks/eks-launch-icon.png) + +2. On the **Configure cluster** page, fill in the following fields: + ![config-cluster-page](/images/docs/v3.3/eks/config-cluster-page.png) + + - Name: A unique name for your cluster. + + - Kubernetes version: The version of Kubernetes to use for your cluster. + + - Cluster service role: Select the IAM role that you created with [Create your Amazon EKS cluster IAM role](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html#role-create). + + - Secrets encryption (Optional): Choose to enable envelope encryption of Kubernetes secrets using the AWS Key Management Service (AWS KMS). If you enable envelope encryption, the Kubernetes secrets are encrypted using the customer master key (CMK) that you select. The CMK must be symmetric, created in the same region as the cluster. If the CMK was created in a different account, the user must have access to the CMK. For more information, see [Allowing users in other accounts to use a CMK](https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-modifying-external-accounts.html) in the *AWS Key Management Service Developer Guide*. + + - Kubernetes secrets encryption with an AWS KMS CMK requires Kubernetes version 1.13 or later. If no keys are listed, you must create one first. For more information, see [Creating keys](https://docs.aws.amazon.com/kms/latest/developerguide/create-keys.html). + + - Tags (Optional): Add any tags to your cluster. For more information, see [Tagging your Amazon EKS resources](https://docs.aws.amazon.com/eks/latest/userguide/eks-using-tags.html). + +3. Select **Next**. On the **Specify networking** page, select values for the following fields: + ![network](/images/docs/v3.3/eks/networking.png) + + - VPC: The VPC that you created previously in [Create your Amazon EKS cluster VPC](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html#vpc-create). You can find the name of your VPC in the drop-down list. + + - Subnets: By default, the available subnets in the VPC specified in the previous field are preselected. Select any subnet that you don't want to host cluster resources, such as worker nodes or load balancers. + + - Security groups: The SecurityGroups value from the AWS CloudFormation output that you generated with [Create your Amazon EKS cluster VPC](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html#vpc-create). This security group has ControlPlaneSecurityGroup in the drop-down name. + + - For **Cluster endpoint access**, choose one of the following options: + ![endpoints](/images/docs/v3.3/eks/endpoints.png) + + - Public: Enables only public access to your cluster's Kubernetes API server endpoint. Kubernetes API requests that originate from outside of your cluster's VPC use the public endpoint. By default, access is allowed from any source IP address. You can optionally restrict access to one or more CIDR ranges such as 192.168.0.0/16, for example, by selecting **Advanced settings** and then selecting **Add source**. + + - Private: Enables only private access to your cluster's Kubernetes API server endpoint. Kubernetes API requests that originate from within your cluster's VPC use the private VPC endpoint. + + {{< notice note >}} + If you created a VPC without outbound internet access, then you must enable private access. + {{}} + + - Public and private: Enables public and private access. + +4. Select **Next**. On the **Configure logging** page, you can optionally choose which log types that you want to enable. By default, each log type is **Disabled**. For more information, see [Amazon EKS control plane logging](https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html). + ![logging](/images/docs/v3.3/eks/logging.png) + +5. Select **Next**. On the **Review and create page**, review the information that you entered or selected on the previous pages. Select **Edit** if you need to make changes to any of your selections. Once you're satisfied with your settings, select **Create**. The **Status** field shows **CREATING** until the cluster provisioning process completes. + ![revies](/images/docs/v3.3/eks/review.png) + + - For more information about the previous options, see [Modifying cluster endpoint access](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html#modify-endpoint-access). + When your cluster provisioning is complete (usually between 10 and 15 minutes), save the API server endpoint and Certificate authority values. These are used in your kubectl configuration. + ![creating](/images/docs/v3.3/eks/creating.png) + +6. Create **Node Group** and define 3 nodes in this cluster. + ![node-group](/images/docs/v3.3/eks/node-group.png) + +7. Configure the node group. + ![config-node-group](/images/docs/v3.3/eks/config-node-grop.png) + + {{< notice note >}} + +- To install KubeSphere 3.3 on Kubernetes, your Kubernetes version must be v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, and * v1.24.x. For Kubernetes versions with an asterisk, some features of edge nodes may be unavailable due to incompatability. Therefore, if you want to use edge nodes, you are advised to install Kubernetes v1.21.x. +- 3 nodes are included in this example. You can add more nodes based on your own needs especially in a production environment. +- The machine type t3.medium (2 vCPU, 4GB memory) is for minimal installation. If you want to enable pluggable components or use the cluster for production, please select a machine type with more resources. +- For other settings, you can change them as well based on your own needs or use the default value. + + {{}} + +8. When the EKS cluster is ready, you can connect to the cluster with kubectl. + +## Configure kubectl + +We will use the kubectl command-line utility for communicating with the cluster API server. First, get the kubeconfig of the EKS cluster created just now. + +1. Configure your AWS CLI credentials. + + ```shell + $ aws configure + AWS Access Key ID [None]: AKIAIOSFODNN7EXAMPLE + AWS Secret Access Key [None]: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY + Default region name [None]: region-code + Default output format [None]: json + ``` + +2. Create your kubeconfig file with the AWS CLI. + + ```shell + aws eks --region us-west-2 update-kubeconfig --name cluster_name + ``` + + - By default, the resulting configuration file is created at the default kubeconfig path (`.kube/config`) in your home directory or merged with an existing kubeconfig at that location. You can specify another path with the `--kubeconfig` option. + + - You can specify an IAM role ARN with the `--role-arn` option to use for authentication when you issue kubectl commands. Otherwise, the IAM entity in your default AWS CLI or SDK credential chain is used. You can view your default AWS CLI or SDK identity by running the `aws sts get-caller-identity` command. + + For more information, see the help page with the `aws eks update-kubeconfig help` command or see [update-kubeconfig](https://docs.aws.amazon.com/cli/latest/reference/eks/update-kubeconfig.html) in the *AWS CLI Command Reference*. + +3. Test your configuration. + + ```bash + kubectl get svc + ``` + +## Install KubeSphere on EKS + +- Install KubeSphere using kubectl. The following commands are only for the default minimal installation. + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml + ``` + +- Inspect the logs of installation: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + +- When the installation finishes, you can see the following message: + + ```bash + ##################################################### + ### Welcome to KubeSphere! ### + ##################################################### + Account: admin + Password: P@88w0rd + NOTES: + 1. After logging into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are ready. + 2. Please modify the default password after login. + ##################################################### + https://kubesphere.io 2020-xx-xx xx:xx:xx + ``` + +## Access KubeSphere Console + +Now that KubeSphere is installed, you can access the web console of KubeSphere by following the step below. + +- Check the service of KubeSphere console through the following command. + + ```shell + kubectl get svc -n kubesphere-system + ``` + +- Edit the configuration of the service **ks-console** by executing `kubectl edit ks-console` and change `type` from `NodePort` to `LoadBalancer`. Save the file when you finish. +![loadbalancer](/images/docs/v3.3/eks/loadbalancer.png) + +- Run `kubectl get svc -n kubesphere-system` and get your external IP. + ![external-ip](/images/docs/v3.3/eks/external-ip.png) + +- Access the web console of KubeSphere using the external IP generated by EKS. + +- Log in to the console with the default account and password (`admin/P@88w0rd`). In the cluster overview page, you can see the dashboard. + +## Enable Pluggable Components (Optional) + +The example above demonstrates the process of a default minimal installation. To enable other components in KubeSphere, see [Enable Pluggable Components](../../../pluggable-components/) for more details. + +## Reference + +[Getting started with the AWS Management Console](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html) diff --git a/content/en/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke.md b/content/en/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke.md new file mode 100644 index 000000000..8e24229c1 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke.md @@ -0,0 +1,106 @@ +--- +title: "Deploy KubeSphere on GKE" +keywords: 'Kubernetes, KubeSphere, GKE, Installation' +description: 'Learn how to deploy KubeSphere on Google Kubernetes Engine.' + +weight: 4240 +--- + +![KubeSphere+GKE](https://pek3b.qingstor.com/kubesphere-docs/png/20191123145223.png) + +This guide walks you through the steps of deploying KubeSphere on [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/). + +## Prepare a GKE Cluster + +- A standard Kubernetes cluster in GKE is a prerequisite of installing KubeSphere. Go to the navigation menu and refer to the image below to create a cluster. + + ![create-cluster-gke](https://ap3.qingstor.com/kubesphere-website/docs/create-cluster-gke.jpg) + +- In **Cluster basics**, select a Master version. The static version `1.15.12-gke.2` is used here as an example. + + ![select-master-version](https://ap3.qingstor.com/kubesphere-website/docs/master-version.png) + +- In **default-pool** under **Node Pools**, define 3 nodes in this cluster. + + ![node-number](https://ap3.qingstor.com/kubesphere-website/docs/node-number.png) + +- Go to **Nodes**, select the image type and set the Machine Configuration as below. When you finish, click **Create**. + + ![machine-config](https://ap3.qingstor.com/kubesphere-website/docs/machine-configuration.jpg) + +{{< notice note >}} + +- To install KubeSphere 3.3 on Kubernetes, your Kubernetes version must be v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, and * v1.24.x. For Kubernetes versions with an asterisk, some features of edge nodes may be unavailable due to incompatability. Therefore, if you want to use edge nodes, you are advised to install Kubernetes v1.21.x. +- 3 nodes are included in this example. You can add more nodes based on your own needs especially in a production environment. +- The machine type e2-medium (2 vCPU, 4GB memory) is for minimal installation. If you want to enable pluggable components or use the cluster for production, please select a machine type with more resources. +- For other settings, you can change them as well based on your own needs or use the default value. + +{{}} + +- When the GKE cluster is ready, you can connect to the cluster with Cloud Shell. + + ![cloud-shell-gke](https://ap3.qingstor.com/kubesphere-website/docs/cloud-shell.png) + +## Install KubeSphere on GKE + +- Install KubeSphere using kubectl. The following commands are only for the default minimal installation. + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml + ``` + +- Inspect the logs of installation: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + +- When the installation finishes, you can see the following message: + + ```yaml + ##################################################### + ### Welcome to KubeSphere! ### + ##################################################### + Console: http://10.128.0.44:30880 + Account: admin + Password: P@88w0rd + NOTES: + 1. After logging into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are ready. + 2. Please modify the default password after login. + ##################################################### + https://kubesphere.io 2020-xx-xx xx:xx:xx + ``` + +## Access KubeSphere Console + +Now that KubeSphere is installed, you can access the web console of KubeSphere by following the steps below. + +- In **Services & Ingress**, select the service **ks-console**. + + ![ks-console](https://ap3.qingstor.com/kubesphere-website/docs/console-service.jpg) + +- In **Service details**, click **Edit** and change the type from `NodePort` to `LoadBalancer`. Save the file when you finish. + + ![lb-change](https://ap3.qingstor.com/kubesphere-website/docs/lb-change.jpg) + +- Access the web console of KubeSphere using the endpoint generated by GKE. + + ![access-console](https://ap3.qingstor.com/kubesphere-website/docs/access-console.png) + + {{< notice tip >}} + + Instead of changing the service type to `LoadBalancer`, you can also access KubeSphere console via `NodeIP:NodePort` (service type set to `NodePort`). You may need to open port `30880` in firewall rules. + + {{}} + +- Log in to the console with the default account and password (`admin/P@88w0rd`). In the cluster overview page, you can see the dashboard. + +## Enable Pluggable Components (Optional) + +The example above demonstrates the process of a default minimal installation. To enable other components in KubeSphere, see [Enable Pluggable Components](../../../pluggable-components/) for more details. \ No newline at end of file diff --git a/content/en/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-huaweicloud-cce.md b/content/en/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-huaweicloud-cce.md new file mode 100644 index 000000000..81b5d9f17 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-huaweicloud-cce.md @@ -0,0 +1,112 @@ +--- +title: "Deploy KubeSphere on Huawei CCE" +keywords: "KubeSphere, Kubernetes, installation, huawei, cce" +description: "Learn how to deploy KubeSphere on Huawei Cloud Container Engine." + +weight: 4250 +--- + +This guide walks you through the steps of deploying KubeSphere on [Huaiwei CCE](https://support.huaweicloud.com/en-us/qs-cce/cce_qs_0001.html). + +## Preparation for Huawei CCE + +### Create Kubernetes cluster + +First, create a Kubernetes cluster based on the requirements below. + +- To install KubeSphere 3.3 on Kubernetes, your Kubernetes version must be v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, and * v1.24.x. For Kubernetes versions with an asterisk, some features of edge nodes may be unavailable due to incompatability. Therefore, if you want to use edge nodes, you are advised to install Kubernetes v1.21.x. +- Ensure the cloud computing network for your Kubernetes cluster works, or use an elastic IP when you use **Auto Create** or **Select Existing**. You can also configure the network after the cluster is created. Refer to [NAT Gateway](https://support.huaweicloud.com/en-us/productdesc-natgateway/en-us_topic_0086739762.html). +- Select `s3.xlarge.2` `4-core|8GB` for nodes and add more if necessary (3 and more nodes are required for a production environment). + +### Create a public key for kubectl + +- Go to **Resource Management** > **Cluster Management** > **Basic Information** > **Network**, and bind `Public apiserver`. +- Select **kubectl** on the right column, go to **Download kubectl configuration file**, and click **Click here to download**, then you will get a public key for kubectl. + + ![Generate Kubectl config file](/images/docs/v3.3/huawei-cce/en/generate-kubeconfig.png) + +After you get the configuration file for kubectl, use kubectl command line to verify the connection to the cluster. + +```bash +$ kubectl version +Client Version: version.Info{Major:"1", Minor:"18", GitVersion:"v1.18.8", GitCommit:"9f2892aab98fe339f3bd70e3c470144299398ace", GitTreeState:"clean", BuildDate:"2020-08-15T10:08:56Z", GoVersion:"go1.14.7", Compiler:"gc", Platform:"darwin/amd64"} +Server Version: version.Info{Major:"1", Minor:"17+", GitVersion:"v1.17.9-r0-CCE20.7.1.B003-17.36.3", GitCommit:"136c81cf3bd314fcbc5154e07cbeece860777e93", GitTreeState:"clean", BuildDate:"2020-08-08T06:01:28Z", GoVersion:"go1.13.9", Compiler:"gc", Platform:"linux/amd64"} +``` + +## Deploy KubeSphere + +### Create a custom StorageClass + +{{< notice note >}} + +Huawei CCE built-in Everest CSI provides StorageClass `csi-disk` which uses SATA (normal I/O) by default, but the actual disk that is used for Kubernetes clusters is either SAS (high I/O) or SSD (extremely high I/O). Therefore, it is suggested that you create an extra StorageClass and set it as **default**. Refer to the official document - [Use kubectl to create a cloud storage](https://support.huaweicloud.com/en-us/usermanual-cce/cce_01_0044.html). + +{{}} + +Below is an example to create a SAS (high I/O) for its corresponding StorageClass. + +```yaml +# csi-disk-sas.yaml + +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "true" + storageclass.kubesphere.io/support-snapshot: "false" + name: csi-disk-sas +parameters: + csi.storage.k8s.io/csi-driver-name: disk.csi.everest.io + csi.storage.k8s.io/fstype: ext4 + # Bind Huawei “high I/O storage. If use “extremely high I/O, change it to SSD. + everest.io/disk-volume-type: SAS + everest.io/passthrough: "true" +provisioner: everest-csi-provisioner +allowVolumeExpansion: true +reclaimPolicy: Delete +volumeBindingMode: Immediate + +``` + +For how to set up or cancel a default StorageClass, refer to Kubernetes official document - [Change Default StorageClass](https://kubernetes.io/docs/tasks/administer-cluster/change-default-storage-class/). + +### Use ks-installer to minimize the deployment + +Use [ks-installer](https://github.com/kubesphere/ks-installer) to deploy KubeSphere on an existing Kubernetes cluster. Execute the following commands directly for a minimal installation: + +```bash +kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + +kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml +``` + +Go to **Workload** > **Pod**, and check the running status of the pod in `kubesphere-system` of its namespace to understand the minimal deployment of KubeSphere. Check `ks-console-xxxx` of the namespace to understand the availability of KubeSphere console. + + ![Deploy KubeSphere in Minimal](/images/docs/v3.3/huawei-cce/en/deploy-ks-minimal.png) + +### Expose KubeSphere Console + +Check the running status of Pods in `kubesphere-system` namespace and make sure the basic components of KubeSphere are running. Then expose KubeSphere console. + +Go to **Resource Management** > **Network** and choose the service in `ks-console`. It is suggested that you choose `LoadBalancer` (Public IP is required). The configuration is shown below. + + ![Expose KubeSphere Console](/images/docs/v3.3/huawei-cce/en/expose-ks-console.png) + +Default settings are OK for other detailed configurations. You can also set them based on your needs. + + ![Edit KubeSphere Console SVC](/images/docs/v3.3/huawei-cce/en/edit-ks-console-svc.png) + +After you set LoadBalancer for KubeSphere console, you can visit it via the given address. Go to KubeSphere login page and use the default account (username `admin` and password `P@88w0rd`) to log in. + +## Enable Pluggable Components (Optional) + +The example above demonstrates the process of a default minimal installation. To enable other components in KubeSphere, see [Enable Pluggable Components](../../../pluggable-components/) for more details. + +{{< notice warning >}} + +Before you use Istio-based features of KubeSphere, you have to delete `applications.app.k8s.io` built in Huawei CCE due to the CRD conflict. You can run the command `kubectl delete crd applications.app.k8s.io` directly to delete it. + +{{}} + +After your component is installed, go to the **Cluster Management** page, and you will see the interface below. You can check the status of your component in **System Components**. diff --git a/content/en/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke.md b/content/en/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke.md new file mode 100644 index 000000000..a64faf0b9 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke.md @@ -0,0 +1,147 @@ +--- +title: "Deploy KubeSphere on Oracle OKE" +keywords: 'Kubernetes, KubeSphere, OKE, Installation, Oracle-cloud' +description: 'Learn how to deploy KubeSphere on Oracle Cloud Infrastructure Container Engine for Kubernetes.' + +weight: 4260 +--- + +This guide walks you through the steps of deploying KubeSphere on [Oracle Kubernetes Engine](https://www.oracle.com/cloud/compute/container-engine-kubernetes.html). + +## Create a Kubernetes Cluster + +- A standard Kubernetes cluster in OKE is a prerequisite of installing KubeSphere. Go to the navigation menu and refer to the image below to create a cluster. + + ![oke-cluster](https://ap3.qingstor.com/kubesphere-website/docs/oke-cluster.jpg) + +- In the pop-up window, select **Quick Create** and click **Launch Workflow**. + + ![oke-quickcreate](https://ap3.qingstor.com/kubesphere-website/docs/oke-quickcreate.jpg) + + {{< notice note >}} + + In this example, **Quick Create** is used for demonstration which will automatically create all the resources necessary for a cluster in Oracle Cloud. If you select **Custom Create**, you need to create all the resources (such as VCN and LB Subnets) by yourself. + + {{}} + +- Next, you need to set the cluster with basic information. Here is an example for your reference. When you finish, click **Next**. + + ![set-basic-info](https://ap3.qingstor.com/kubesphere-website/docs/cluster-setting.jpg) + + {{< notice note >}} + + - To install KubeSphere 3.3 on Kubernetes, your Kubernetes version must be v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, and * v1.24.x. For Kubernetes versions with an asterisk, some features of edge nodes may be unavailable due to incompatability. Therefore, if you want to use edge nodes, you are advised to install Kubernetes v1.21.x. + - It is recommended that you should select **Public** for **Visibility Type**, which will assign a public IP address for every node. The IP address can be used later to access the web console of KubeSphere. + - In Oracle Cloud, a Shape is a template that determines the number of CPUs, amount of memory, and other resources that are allocated to an instance. `VM.Standard.E2.2 (2 CPUs and 16G Memory)` is used in this example. For more information, see [Standard Shapes](https://docs.cloud.oracle.com/en-us/iaas/Content/Compute/References/computeshapes.htm#vmshapes__vm-standard). + - 3 nodes are included in this example. You can add more nodes based on your own needs especially in a production environment. + + {{}} + +- Review cluster information and click **Create Cluster** if no adjustment is needed. + + ![create-cluster](https://ap3.qingstor.com/kubesphere-website/docs/create-cluster.jpg) + +- After the cluster is created, click **Close**. + + ![cluster-ready](https://ap3.qingstor.com/kubesphere-website/docs/cluster-ready.jpg) + +- Make sure the Cluster Status is **Active** and click **Access Cluster**. + + ![access-cluster](https://ap3.qingstor.com/kubesphere-website/docs/access-cluster.jpg) + +- In the pop-up window, select **Cloud Shell Access** to access the cluster. Click **Launch Cloud Shell** and copy the code provided by Oracle Cloud. + + ![cloud-shell-access](https://ap3.qingstor.com/kubesphere-website/docs/cloudshell-access.png) + +- In Cloud Shell, paste the command so that we can execute the installation command later. + + ![cloud-shell-oke](https://ap3.qingstor.com/kubesphere-website/docs/oke-cloud-shell.png) + + {{< notice warning >}} + + If you do not copy and execute the command above, you cannot proceed with the steps below. + + {{}} + +## Install KubeSphere on OKE + +- Install KubeSphere using kubectl. The following commands are only for the default minimal installation. + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml + ``` + +- Inspect the logs of installation: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + +- When the installation finishes, you can see the following message: + + ```yaml + ##################################################### + ### Welcome to KubeSphere! ### + ##################################################### + + Console: http://10.0.10.2:30880 + Account: admin + Password: P@88w0rd + + NOTES: + 1. After logging into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are ready. + 2. Please modify the default password after login. + + ##################################################### + https://kubesphere.io 20xx-xx-xx xx:xx:xx + ``` + +## Access KubeSphere Console + +Now that KubeSphere is installed, you can access the web console of KubeSphere either through `NodePort` or `LoadBalancer`. + +- Check the service of KubeSphere console through the following command: + + ```bash + kubectl get svc -n kubesphere-system + ``` + +- The output may look as below. You can change the type to `LoadBalancer` so that the external IP address can be exposed. + + ![console-nodeport](https://ap3.qingstor.com/kubesphere-website/docs/nodeport-console.jpg) + + {{< notice tip >}} + + It can be seen above that the service `ks-console` is being exposed through a NodePort, which means you can access the console directly via `NodeIP:NodePort` (the public IP address of any node is applicable). You may need to open port `30880` in firewall rules. + + {{}} + +- Execute the command to edit the service configuration. + + ```bash + kubectl edit svc ks-console -o yaml -n kubesphere-system + ``` + +- Navigate to `type` and change `NodePort` to `LoadBalancer`. Save the configuration after you finish. + + ![change-svc-type](https://ap3.qingstor.com/kubesphere-website/docs/change-service-type.png) + +- Execute the following command again and you can see the IP address displayed as below. + + ```bash + kubectl get svc -n kubesphere-system + ``` + + ![console-service](https://ap3.qingstor.com/kubesphere-website/docs/console-service.png) + +- Log in to the console through the external IP address with the default account and password (`admin/P@88w0rd`). In the cluster overview page, you can see the dashboard. + +## Enable Pluggable Components (Optional) + +The example above demonstrates the process of a default minimal installation. To enable other components in KubeSphere, see [Enable Pluggable Components](../../../pluggable-components/) for more details. diff --git a/content/en/docs/v3.4/installing-on-kubernetes/introduction/_index.md b/content/en/docs/v3.4/installing-on-kubernetes/introduction/_index.md new file mode 100644 index 000000000..88e0a6478 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-kubernetes/introduction/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Introduction" +weight: 4100 + +_build: + render: false +--- \ No newline at end of file diff --git a/content/en/docs/v3.4/installing-on-kubernetes/introduction/overview.md b/content/en/docs/v3.4/installing-on-kubernetes/introduction/overview.md new file mode 100644 index 000000000..4e93b072d --- /dev/null +++ b/content/en/docs/v3.4/installing-on-kubernetes/introduction/overview.md @@ -0,0 +1,63 @@ +--- +title: "Installing KubeSphere on Kubernetes — Overview" +keywords: "KubeSphere, Kubernetes, Installation" +description: "Develop a basic understanding of the general steps of deploying KubeSphere on an existing Kubernetes cluster." +linkTitle: "Overview" +weight: 4110 +--- + +![kubesphere+k8s](/images/docs/v3.3/installing-on-kubernetes/introduction/overview/kubesphere+k8s.png) + +As part of KubeSphere's commitment to provide a plug-and-play architecture for users, it can be easily installed on existing Kubernetes clusters. More specifically, KubeSphere can be deployed on Kubernetes either hosted on clouds (for example, AWS EKS, QingCloud QKE and Google GKE) or on-premises. This is because KubeSphere does not hack Kubernetes itself. It only interacts with the Kubernetes API to manage Kubernetes cluster resources. In other words, KubeSphere can be installed on any native Kubernetes cluster and Kubernetes distribution. + +This section gives you an overview of the general steps of installing KubeSphere on Kubernetes. For more information about the specific way of installation in different environments, see Installing on Hosted Kubernetes and Installing on On-premises Kubernetes. + +{{< notice note >}} + +Read [Prerequisites](../prerequisites/) before you install KubeSphere on existing Kubernetes clusters. + +{{}} + +## Video Demonstration + +{{< youtube 6wdOBD4gyg4 >}} + +## Deploy KubeSphere + +After you make sure your existing Kubernetes cluster meets all the requirements, you can use kubectl to install KubeSphere with the default minimal package. + +1. Execute the following commands to start installation: + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml + ``` + +2. Inspect the logs of installation: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + +3. Use `kubectl get pod --all-namespaces` to see whether all pods are running normally in relevant namespaces of KubeSphere. If they are, check the port (30880 by default) of the console through the following command: + + ```bash + kubectl get svc/ks-console -n kubesphere-system + ``` + +4. Make sure port 30880 is opened in security groups and access the web console through the NodePort (`IP:30880`) with the default account and password (`admin/P@88w0rd`). + + ![login](/images/docs/v3.3/installing-on-kubernetes/introduction/overview/login.png) + +## Enable Pluggable Components (Optional) + +If you start with a default minimal installation, refer to [Enable Pluggable Components](../../../pluggable-components/) to install other components. + +{{< notice tip >}} + +- Pluggable components can be enabled either before or after the installation. Please refer to the example file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/blob/release-3.0/deploy/cluster-configuration.yaml) for more details. +- Make sure there is enough CPU and memory available in your cluster. +- It is highly recommended that you install these pluggable components to discover the full-stack features and capabilities provided by KubeSphere. + +{{}} diff --git a/content/en/docs/v3.4/installing-on-kubernetes/introduction/prerequisites.md b/content/en/docs/v3.4/installing-on-kubernetes/introduction/prerequisites.md new file mode 100644 index 000000000..2aa8a6db5 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-kubernetes/introduction/prerequisites.md @@ -0,0 +1,49 @@ +--- +title: "Prerequisites" +keywords: "KubeSphere, Kubernetes, Installation, Prerequisites" +description: "Make sure your environment where an existing Kubernetes cluster runs meets the prerequisites before installation." +linkTitle: "Prerequisites" +weight: 4120 +--- + +You can install KubeSphere on virtual machines and bare metal with Kubernetes also provisioned. In addition, KubeSphere can also be deployed on cloud-hosted and on-premises Kubernetes clusters as long as your Kubernetes cluster meets the prerequisites below. + +- To install KubeSphere 3.3 on Kubernetes, your Kubernetes version must be v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, and * v1.24.x. For Kubernetes versions with an asterisk, some features of edge nodes may be unavailable due to incompatability. Therefore, if you want to use edge nodes, you are advised to install Kubernetes v1.21.x. +- Available CPU > 1 Core and Memory > 2 G. Only x86_64 CPUs are supported, and Arm CPUs are not fully supported at present. +- A **default** StorageClass in your Kubernetes cluster is configured; use `kubectl get sc` to verify it. +- The CSR signing feature is activated in kube-apiserver when it is started with the `--cluster-signing-cert-file` and `--cluster-signing-key-file` parameters. See [RKE installation issue](https://github.com/kubesphere/kubesphere/issues/1925#issuecomment-591698309). + +## Pre-checks + +1. Make sure your Kubernetes version is compatible by running `kubectl version` in your cluster node. The output may look as below: + + ```bash + $ kubectl version + Client Version: version.Info{Major:"1", Minor:"19", GitVersion:"v1.19.8", GitCommit:"fd5d41537aee486160ad9b5356a9d82363273721", GitTreeState:"clean", BuildDate:"2021-02-17T12:41:51Z", GoVersion:"go1.15.8", Compiler:"gc", Platform:"linux/amd64"} + Server Version: version.Info{Major:"1", Minor:"19", GitVersion:"v1.19.8", GitCommit:"fd5d41537aee486160ad9b5356a9d82363273721", GitTreeState:"clean", BuildDate:"2021-02-17T12:33:08Z", GoVersion:"go1.15.8", Compiler:"gc", Platform:"linux/amd64"} + ``` + + {{< notice note >}} +Pay attention to the `Server Version` line. If `GitVersion` shows an older one, you need to upgrade Kubernetes first. + {{}} + +2. Check if the available resources in your cluster meet the minimum requirements. + + ```bash + $ free -g + total used free shared buff/cache available + Mem: 16 4 10 0 3 2 + Swap: 0 0 0 + ``` + +3. Check if there is a **default** StorageClass in your cluster. An existing default StorageClass is a prerequisite for KubeSphere installation. + + ```bash + $ kubectl get sc + NAME PROVISIONER AGE + glusterfs (default) kubernetes.io/glusterfs 3d4h + ``` + +If your Kubernetes cluster environment meets all the requirements above, then you are ready to deploy KubeSphere on your existing Kubernetes cluster. + +For more information, see [Overview](../overview/). diff --git a/content/en/docs/v3.4/installing-on-kubernetes/on-prem-kubernetes/_index.md b/content/en/docs/v3.4/installing-on-kubernetes/on-prem-kubernetes/_index.md new file mode 100644 index 000000000..61a6c6053 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-kubernetes/on-prem-kubernetes/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Installing on On-premises Kubernetes" +weight: 4300 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/installing-on-kubernetes/on-prem-kubernetes/install-ks-on-linux-airgapped.md b/content/en/docs/v3.4/installing-on-kubernetes/on-prem-kubernetes/install-ks-on-linux-airgapped.md new file mode 100644 index 000000000..af93374bb --- /dev/null +++ b/content/en/docs/v3.4/installing-on-kubernetes/on-prem-kubernetes/install-ks-on-linux-airgapped.md @@ -0,0 +1,412 @@ +--- +title: "Air-gapped Installation on Kubernetes" +keywords: 'Kubernetes, KubeSphere, air-gapped, installation' +description: 'Explore the best practice of installing KubeSphere in an air-gapped environment.' +linkTitle: "Air-gapped Installation" +weight: 4310 +--- + +The air-gapped installation is almost the same as the online installation except that you must create a local registry to host Docker images. This tutorial demonstrates how to install KubeSphere on Kubernetes in an air-gapped environment. + +Before you follow the steps below, read [Prerequisites](../../../installing-on-kubernetes/introduction/prerequisites/) first. + +## Step 1: Prepare a Private Image Registry + +You can use Harbor or any other private image registries. This tutorial uses Docker registry as an example with [self-signed certificates](https://docs.docker.com/registry/insecure/#use-self-signed-certificates) (If you have your own private image registry, you can skip this step). + +### Use self-signed certificates + +1. Generate your own certificate by executing the following commands: + + ```bash + mkdir -p certs + ``` + + ```bash + openssl req \ + -newkey rsa:4096 -nodes -sha256 -keyout certs/domain.key \ + -x509 -days 36500 -out certs/domain.crt + ``` + +2. Make sure you specify a domain name in the field `Common Name` when you are generating your own certificate. For instance, the field is set to `dockerhub.kubekey.local` in this example. + + ![self-signed-cert](/images/docs/v3.3/installing-on-linux/introduction/air-gapped-installation/self-signed-cert.jpg) + +### Start the Docker registry + +Run the following commands to start the Docker registry: + +``` +docker run -d \ + --restart=always \ + --name registry \ + -v "$(pwd)"/certs:/certs \ + -v /mnt/registry:/var/lib/registry \ + -e REGISTRY_HTTP_ADDR=0.0.0.0:443 \ + -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \ + -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \ + -p 443:443 \ + registry:2 +``` + +{{< notice note >}} + +Docker uses `/var/lib/docker` as the default directory where all Docker related files, including images, are stored. It is recommended you add additional storage volumes with at least **100G** mounted to `/var/lib/docker` and `/mnt/registry` respectively. See [fdisk](https://www.computerhope.com/unix/fdisk.htm) command for reference. + +{{}} + +### Configure the registry + +1. Add an entry to `/etc/hosts` to map the hostname (i.e. the registry domain name; in this case, it is `dockerhub.kubekey.local`) to the private IP address of your machine as below. + + ```bash + # docker registry + 192.168.0.2 dockerhub.kubekey.local + ``` + +2. Execute the following commands to copy the certificate to a specified directory and make Docker trust it. + + ```bash + mkdir -p /etc/docker/certs.d/dockerhub.kubekey.local + ``` + + ```bash + cp certs/domain.crt /etc/docker/certs.d/dockerhub.kubekey.local/ca.crt + ``` + + {{< notice note >}} + + The path of the certificate is related to the domain name. When you copy the path, use your actual domain name if it is different from the one set above. + + {{}} + +3. To verify whether the private registry is effective, you can copy an image to your local machine first, and use `docker push` and `docker pull` to test it. + +## Step 2: Prepare Installation Images + +As you install KubeSphere in an air-gapped environment, you need to prepare an image package containing all the necessary images in advance. + +1. Download the image list file `images-list.txt` from a machine that has access to the Internet through the following command: + + ```bash + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/images-list.txt + ``` + + {{< notice note >}} + + This file lists images under `##+modulename` based on different modules. You can add your own images to this file following the same rule. To view the complete file, see [Appendix](#appendix). + + {{}} + +2. Download `offline-installation-tool.sh`. + + ```bash + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/offline-installation-tool.sh + ``` + +3. Make the `.sh` file executable. + + ```bash + chmod +x offline-installation-tool.sh + ``` + +4. You can execute the command `./offline-installation-tool.sh -h` to see how to use the script: + + ```bash + root@master:/home/ubuntu# ./offline-installation-tool.sh -h + Usage: + + ./offline-installation-tool.sh [-l IMAGES-LIST] [-d IMAGES-DIR] [-r PRIVATE-REGISTRY] [-v KUBERNETES-VERSION ] + + Description: + -b : save kubernetes' binaries. + -d IMAGES-DIR : the dir of files (tar.gz) which generated by `docker save`. default: ./kubesphere-images + -l IMAGES-LIST : text file with list of images. + -r PRIVATE-REGISTRY : target private registry:port. + -s : save model will be applied. Pull the images in the IMAGES-LIST and save images as a tar.gz file. + -v KUBERNETES-VERSION : download kubernetes' binaries. default: v1.22.12 + -h : usage message + ``` + +5. Pull images in `offline-installation-tool.sh`. + + ```bash + ./offline-installation-tool.sh -s -l images-list.txt -d ./kubesphere-images + ``` + + {{< notice note >}} + + You can choose to pull images as needed. For example, you can delete `##k8s-images` and related images under it in `images-list.text` as you already have a Kubernetes cluster. + + {{}} + +## Step 3: Push Images to Your Private Registry + +Transfer your packaged image file to your local machine and execute the following command to push it to the registry. + +```bash +./offline-installation-tool.sh -l images-list.txt -d ./kubesphere-images -r dockerhub.kubekey.local +``` + +{{< notice note >}} + +The domain name is `dockerhub.kubekey.local` in the command. Make sure you use your **own registry address**. + +{{}} + +## Step 4: Download Deployment Files + +Similar to installing KubeSphere on an existing Kubernetes cluster in an online environment, you also need to download `cluster-configuration.yaml` and `kubesphere-installer.yaml` first. + +1. Execute the following commands to download these two files and transfer them to your machine that serves as the taskbox for installation. + + ```bash + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + ``` + +2. Edit `cluster-configuration.yaml` to add your private image registry. For example, `dockerhub.kubekey.local` is the registry address in this tutorial, then use it as the value of `.spec.local_registry` as below: + + ```yaml + spec: + persistence: + storageClass: "" + authentication: + jwtSecret: "" + local_registry: dockerhub.kubekey.local # Add this line manually; make sure you use your own registry address. + ``` + + {{< notice note >}} + + You can enable pluggable components in this YAML file to explore more features of KubeSphere. Refer to [Enable Pluggle Components](../../../pluggable-components/) for more details. + + {{}} + +3. Save `cluster-configuration.yaml` after you finish editing. Replace `ks-installer` with your **own registry address** with the following command: + + ```bash + sed -i "s#^\s*image: kubesphere.*/ks-installer:.*# image: dockerhub.kubekey.local/kubesphere/ks-installer:v3.0.0#" kubesphere-installer.yaml + ``` + + {{< notice warning >}} + + `dockerhub.kubekey.local` is the registry address in the command. Make sure you use your own registry address. + + {{}} + + +## Step 5: Start Installation + +Execute the following commands after you make sure that all steps above are completed. + +```bash +kubectl apply -f kubesphere-installer.yaml +kubectl apply -f cluster-configuration.yaml +``` + +## Step 6: Verify Installation + +When the installation finishes, you can see the content as follows: + +```bash +##################################################### +### Welcome to KubeSphere! ### +##################################################### + +Console: http://192.168.0.2:30880 +Account: admin +Password: P@88w0rd + +NOTES: + 1. After logging into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are ready. + 2. Please modify the default password after login. + +##################################################### +https://kubesphere.io 20xx-xx-xx xx:xx:xx +##################################################### +``` + +Now, you will be able to access the web console of KubeSphere through `http://{IP}:30880` with the default account and password `admin/P@88w0rd`. + +{{< notice note >}} + +To access the console, make sure port 30880 is opened in your security group. + +{{}} + +![kubesphere-login](https://ap3.qingstor.com/kubesphere-website/docs/login.png) + +## Appendix + +### Image list of KubeSphere 3.3 + +```txt +##k8s-images +kubesphere/kube-apiserver:v1.23.10 +kubesphere/kube-controller-manager:v1.23.10 +kubesphere/kube-proxy:v1.23.10 +kubesphere/kube-scheduler:v1.23.10 +kubesphere/kube-apiserver:v1.24.3 +kubesphere/kube-controller-manager:v1.24.3 +kubesphere/kube-proxy:v1.24.3 +kubesphere/kube-scheduler:v1.24.3 +kubesphere/kube-apiserver:v1.22.12 +kubesphere/kube-controller-manager:v1.22.12 +kubesphere/kube-proxy:v1.22.12 +kubesphere/kube-scheduler:v1.22.12 +kubesphere/kube-apiserver:v1.21.14 +kubesphere/kube-controller-manager:v1.21.14 +kubesphere/kube-proxy:v1.21.14 +kubesphere/kube-scheduler:v1.21.14 +kubesphere/pause:3.7 +kubesphere/pause:3.6 +kubesphere/pause:3.5 +kubesphere/pause:3.4.1 +coredns/coredns:1.8.0 +coredns/coredns:1.8.6 +calico/cni:v3.23.2 +calico/kube-controllers:v3.23.2 +calico/node:v3.23.2 +calico/pod2daemon-flexvol:v3.23.2 +calico/typha:v3.23.2 +kubesphere/flannel:v0.12.0 +openebs/provisioner-localpv:2.10.1 +openebs/linux-utils:2.10.0 +library/haproxy:2.3 +kubesphere/nfs-subdir-external-provisioner:v4.0.2 +kubesphere/k8s-dns-node-cache:1.15.12 +##kubesphere-images +kubesphere/ks-installer:v3.3.2 +kubesphere/ks-apiserver:v3.3.2 +kubesphere/ks-console:v3.3.2 +kubesphere/ks-controller-manager:v3.3.2 +kubesphere/ks-upgrade:v3.3.2 +kubesphere/kubectl:v1.22.0 +kubesphere/kubectl:v1.21.0 +kubesphere/kubectl:v1.20.0 +kubesphere/kubefed:v0.8.1 +kubesphere/tower:v0.2.0 +minio/minio:RELEASE.2019-08-07T01-59-21Z +minio/mc:RELEASE.2019-08-07T23-14-43Z +csiplugin/snapshot-controller:v4.0.0 +kubesphere/nginx-ingress-controller:v1.1.0 +mirrorgooglecontainers/defaultbackend-amd64:1.4 +kubesphere/metrics-server:v0.4.2 +redis:5.0.14-alpine +haproxy:2.0.25-alpine +alpine:3.14 +osixia/openldap:1.3.0 +kubesphere/netshoot:v1.0 +##kubeedge-images +kubeedge/cloudcore:v1.9.2 +kubeedge/iptables-manager:v1.9.2 +kubesphere/edgeservice:v0.2.0 +##gatekeeper-images +openpolicyagent/gatekeeper:v3.5.2 +##openpitrix-images +kubesphere/openpitrix-jobs:v3.3.2 +##kubesphere-devops-images +kubesphere/devops-apiserver:ks-v3.3.2 +kubesphere/devops-controller:ks-v3.3.2 +kubesphere/devops-tools:ks-v3.3.2 +kubesphere/ks-jenkins:v3.3.0-2.319.1 +jenkins/inbound-agent:4.10-2 +kubesphere/builder-base:v3.2.2 +kubesphere/builder-nodejs:v3.2.0 +kubesphere/builder-maven:v3.2.0 +kubesphere/builder-maven:v3.2.1-jdk11 +kubesphere/builder-python:v3.2.0 +kubesphere/builder-go:v3.2.0 +kubesphere/builder-go:v3.2.2-1.16 +kubesphere/builder-go:v3.2.2-1.17 +kubesphere/builder-go:v3.2.2-1.18 +kubesphere/builder-base:v3.2.2-podman +kubesphere/builder-nodejs:v3.2.0-podman +kubesphere/builder-maven:v3.2.0-podman +kubesphere/builder-maven:v3.2.1-jdk11-podman +kubesphere/builder-python:v3.2.0-podman +kubesphere/builder-go:v3.2.0-podman +kubesphere/builder-go:v3.2.2-1.16-podman +kubesphere/builder-go:v3.2.2-1.17-podman +kubesphere/builder-go:v3.2.2-1.18-podman +kubesphere/s2ioperator:v3.2.1 +kubesphere/s2irun:v3.2.0 +kubesphere/s2i-binary:v3.2.0 +kubesphere/tomcat85-java11-centos7:v3.2.0 +kubesphere/tomcat85-java11-runtime:v3.2.0 +kubesphere/tomcat85-java8-centos7:v3.2.0 +kubesphere/tomcat85-java8-runtime:v3.2.0 +kubesphere/java-11-centos7:v3.2.0 +kubesphere/java-8-centos7:v3.2.0 +kubesphere/java-8-runtime:v3.2.0 +kubesphere/java-11-runtime:v3.2.0 +kubesphere/nodejs-8-centos7:v3.2.0 +kubesphere/nodejs-6-centos7:v3.2.0 +kubesphere/nodejs-4-centos7:v3.2.0 +kubesphere/python-36-centos7:v3.2.0 +kubesphere/python-35-centos7:v3.2.0 +kubesphere/python-34-centos7:v3.2.0 +kubesphere/python-27-centos7:v3.2.0 +quay.io/argoproj/argocd:v2.3.3 +quay.io/argoproj/argocd-applicationset:v0.4.1 +ghcr.io/dexidp/dex:v2.30.2 +redis:6.2.6-alpine +##kubesphere-monitoring-images +jimmidyson/configmap-reload:v0.5.0 +prom/prometheus:v2.34.0 +kubesphere/prometheus-config-reloader:v0.55.1 +kubesphere/prometheus-operator:v0.55.1 +kubesphere/kube-rbac-proxy:v0.11.0 +kubesphere/kube-state-metrics:v2.5.0 +prom/node-exporter:v1.3.1 +prom/alertmanager:v0.23.0 +thanosio/thanos:v0.25.2 +grafana/grafana:8.3.3 +kubesphere/kube-rbac-proxy:v0.8.0 +kubesphere/notification-manager-operator:v1.4.0 +kubesphere/notification-manager:v1.4.0 +kubesphere/notification-tenant-sidecar:v3.2.0 +##kubesphere-logging-images +kubesphere/elasticsearch-curator:v5.7.6 +kubesphere/elasticsearch-oss:6.8.22 +kubesphere/fluentbit-operator:v0.13.0 +docker:19.03 +kubesphere/fluent-bit:v1.8.11 +kubesphere/log-sidecar-injector:1.1 +elastic/filebeat:6.7.0 +kubesphere/kube-events-operator:v0.4.0 +kubesphere/kube-events-exporter:v0.4.0 +kubesphere/kube-events-ruler:v0.4.0 +kubesphere/kube-auditing-operator:v0.2.0 +kubesphere/kube-auditing-webhook:v0.2.0 +##istio-images +istio/pilot:1.11.1 +istio/proxyv2:1.11.1 +jaegertracing/jaeger-operator:1.27 +jaegertracing/jaeger-agent:1.27 +jaegertracing/jaeger-collector:1.27 +jaegertracing/jaeger-query:1.27 +jaegertracing/jaeger-es-index-cleaner:1.27 +kubesphere/kiali-operator:v1.38.1 +kubesphere/kiali:v1.38 +##example-images +busybox:1.31.1 +nginx:1.14-alpine +joosthofman/wget:1.0 +nginxdemos/hello:plain-text +wordpress:4.8-apache +mirrorgooglecontainers/hpa-example:latest +fluent/fluentd:v1.4.2-2.0 +perl:latest +kubesphere/examples-bookinfo-productpage-v1:1.16.2 +kubesphere/examples-bookinfo-reviews-v1:1.16.2 +kubesphere/examples-bookinfo-reviews-v2:1.16.2 +kubesphere/examples-bookinfo-details-v1:1.16.2 +kubesphere/examples-bookinfo-ratings-v1:1.16.3 +##weave-scope-images +weaveworks/scope:1.13.0 +``` diff --git a/content/en/docs/v3.4/installing-on-kubernetes/uninstall-kubesphere-from-k8s.md b/content/en/docs/v3.4/installing-on-kubernetes/uninstall-kubesphere-from-k8s.md new file mode 100644 index 000000000..d9746e532 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-kubernetes/uninstall-kubesphere-from-k8s.md @@ -0,0 +1,15 @@ +--- +title: "Uninstall KubeSphere from Kubernetes" +keywords: 'Kubernetes, KubeSphere, uninstall, remove-cluster' +description: 'Remove KubeSphere from a Kubernetes cluster.' +linkTitle: "Uninstall KubeSphere from Kubernetes" +weight: 4400 +--- + +You can uninstall KubeSphere from your existing Kubernetes cluster by using [kubesphere-delete.sh](https://github.com/kubesphere/ks-installer/blob/release-3.1/scripts/kubesphere-delete.sh). Copy it from the [GitHub source file](https://raw.githubusercontent.com/kubesphere/ks-installer/release-3.1/scripts/kubesphere-delete.sh) and execute this script on your local machine. + +{{< notice warning >}} + +Uninstalling will remove KubeSphere from your Kubernetes cluster. This operation is irreversible and does not have any backup. Please be cautious with this operation. + +{{}} \ No newline at end of file diff --git a/content/en/docs/v3.4/installing-on-linux/_index.md b/content/en/docs/v3.4/installing-on-linux/_index.md new file mode 100644 index 000000000..f9a72d257 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-linux/_index.md @@ -0,0 +1,17 @@ +--- +title: "Installing on Linux" +description: "Demonstrate how to install KubeSphere on Linux on cloud and in on-premises environments." +layout: "second" + +linkTitle: "Installing on Linux" +weight: 3000 + +icon: "/images/docs/v3.3/docs.svg" +--- + +This chapter demonstrates how to use KubeKey to provision a production-ready Kubernetes and KubeSphere cluster on Linux in different environments. You can also use KubeKey to easily scale out and in your cluster and set various storage classes based on your needs. +## Most Popular Pages + +Below you will find some of the most viewed and helpful pages in this chapter. It is highly recommended that you refer to them first. + +{{< popularPage icon="/images/docs/v3.3/qingcloud-2.svg" title="Deploy KubeSphere on QingCloud" description="Provision an HA KubeSphere cluster on QingCloud." link="../installing-on-linux/public-cloud/install-kubesphere-on-qingcloud-vms/" >}} diff --git a/content/en/docs/v3.4/installing-on-linux/cluster-operation/_index.md b/content/en/docs/v3.4/installing-on-linux/cluster-operation/_index.md new file mode 100644 index 000000000..019354675 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-linux/cluster-operation/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Add and Delete Nodes" +weight: 3600 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/installing-on-linux/cluster-operation/add-edge-nodes.md b/content/en/docs/v3.4/installing-on-linux/cluster-operation/add-edge-nodes.md new file mode 100644 index 000000000..08d6cd7da --- /dev/null +++ b/content/en/docs/v3.4/installing-on-linux/cluster-operation/add-edge-nodes.md @@ -0,0 +1,259 @@ +--- +title: "Add Edge Nodes" +keywords: 'Kubernetes, KubeSphere, KubeEdge' +description: 'Add edge nodes to your cluster.' +linkTitle: "Add Edge Nodes" +weight: 3630 +--- + +KubeSphere leverages [KubeEdge](https://kubeedge.io/en/), to extend native containerized application orchestration capabilities to hosts at edge. With separate cloud and edge core modules, KubeEdge provides complete edge computing solutions while the installation may be complex and difficult. + +![kubeedge_arch](/images/docs/v3.3/installing-on-linux/add-and-delete-nodes/add-edge-nodes/kubeedge_arch.png) + +{{< notice note >}} + +For more information about different components of KubeEdge, see [the KubeEdge documentation](https://docs.kubeedge.io/en/docs/kubeedge/#components). + +{{}} + +This tutorial demonstrates how to add an edge node to your cluster. + +## Prerequisites + +- You have enabled [KubeEdge](../../../pluggable-components/kubeedge/). +- To prevent compatability issues, you are advised to install Kubernetes v1.21.x. +- You have an available node to serve as an edge node. The node can run either Ubuntu (recommended) or CentOS. This tutorial uses Ubuntu 18.04 as an example. +- Edge nodes, unlike Kubernetes cluster nodes, should work in a separate network. + +## Prevent non-edge workloads from being scheduled to edge nodes + +Due to the tolerations some daemonsets (for example, Calico) have, to ensure that the newly added edge nodes work properly, you need to run the following command to manually patch the pods so that non-edge workloads will not be scheduled to the edge nodes. + +```bash +#!/bin/bash + + +NoShedulePatchJson='{"spec":{"template":{"spec":{"affinity":{"nodeAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":{"nodeSelectorTerms":[{"matchExpressions":[{"key":"node-role.kubernetes.io/edge","operator":"DoesNotExist"}]}]}}}}}}}' + +ns="kube-system" + + +DaemonSets=("nodelocaldns" "kube-proxy" "calico-node") + +length=${#DaemonSets[@]} + +for((i=0;i}} + In `ClusterConfiguration` of the ks-installer, if you set an internal IP address, you need to set the forwarding rule. If you have not set the forwarding rule, you can directly connect to ports 30000 to 30004. + {{}} + +| Fields | External Ports | Fields | Internal Ports | +| ------------------- | -------------- | ----------------------- | -------------- | +| `cloudhubPort` | `10000` | `cloudhubNodePort` | `30000` | +| `cloudhubQuicPort` | `10001` | `cloudhubQuicNodePort` | `30001` | +| `cloudhubHttpsPort` | `10002` | `cloudhubHttpsNodePort` | `30002` | +| `cloudstreamPort` | `10003` | `cloudstreamNodePort` | `30003` | +| `tunnelPort` | `10004` | `tunnelNodePort` | `30004` | + +## Configure an Edge Node + +You need to configure the edge node as follows. + +### Install a container runtime + +[KubeEdge](https://docs.kubeedge.io/en/docs/) supports several container runtimes including Docker, containerd, CRI-O and Virtlet. For more information, see [the KubeEdge documentation](https://docs.kubeedge.io/en/docs/advanced/cri/). + +{{< notice note >}} + +If you use Docker as the container runtime for your edge node, Docker v19.3.0 or later must be installed so that KubeSphere can get Pod metrics of it. + +{{}} + +### Configure EdgeMesh + +Perform the following steps to configure [EdgeMesh](https://kubeedge.io/en/docs/advanced/edgemesh/) on your edge node. + +1. Edit `/etc/nsswitch.conf`. + + ```bash + vi /etc/nsswitch.conf + ``` + +2. Add the following content to this file: + + ```bash + hosts: dns files mdns4_minimal [NOTFOUND=return] + ``` + +3. Save the file and run the following command to enable IP forwarding: + + ```bash + sudo echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf + ``` + +4. Verify your modification: + + ```bash + sudo sysctl -p | grep ip_forward + ``` + + Expected result: + + ```bash + net.ipv4.ip_forward = 1 + ``` + +## Add an Edge Node + +1. Log in to the console as `admin` and click **Platform** in the upper-left corner. + +2. Select **Cluster Management** and navigate to **Edge Nodes** under **Nodes**. + + {{< notice note >}} + + If you have enabled [multi-cluster management](../../../multicluster-management/), you need to select a cluster first. + + {{}} + +3. Click **Add**. In the dialog that appears, set a node name and enter an internal IP address of your edge node. Click **Validate** to continue. + + ![add-edge-node](/images/docs/v3.3/installing-on-linux/add-and-delete-nodes/add-edge-nodes/add-edge-node.png) + + {{< notice note >}} + + - The internal IP address is only used for inter-node communication and you do not necessarily need to use the actual internal IP address of the edge node. As long as the IP address is successfully validated, you can use it. + - It is recommended that you check the box to add the default taint. + + {{}} + +4. Copy the command automatically created under **Edge Node Configuration Command** and run it on your edge node. + + ![edge-command](/images/docs/v3.3/installing-on-linux/add-and-delete-nodes/add-edge-nodes/edge-command.png) + + {{< notice note >}} + + Make sure `wget` is installed on your edge node before you run the command. + + {{}} + +5. Close the dialog, refresh the page, and the edge node will appear in the list. + + {{< notice note >}} + + After an edge node is added, if you cannot see CPU and memory resource usage on the **Edge Nodes** page, make sure [Metrics Server](../../../pluggable-components/metrics-server/) 0.4.1 or later is installed in your cluster. + + {{}} + +## Collect Monitoring Information on Edge Nodes + +To collect monitoring information on edge node, you need to enable `metrics_server` in `ClusterConfiguration` and `edgeStream` in KubeEdge. + +1. On the KubeSphere web console, choose **Platform > Cluster Management**. + +2. On the navigation pane on the left, click **CRDs**. + +3. In the search bar on the right pane, enter `clusterconfiguration`, and click the result to go to its details page. + +4. Click icon on the right of ks-installer, and click **Edit YAML**. + +5. Search for **metrics_server**, and change the value of `enabled` from `false` to `true`. + + ```yaml + metrics_server: + enabled: true # Change "false" to "true". + ``` + +6. Click **OK** in the lower right corner to save the change. + +7. Open the `/etc/kubeedge/config` file, search for `edgeStream`, change `false` to `true`, and save the change. + ```bash + cd /etc/kubeedge/config + vi edgecore.yaml + ``` + + ```bash + edgeStream: + enable: true #Change "false" to "true".。 + handshakeTimeout: 30 + readDeadline: 15 + server: xx.xxx.xxx.xxx:10004 #If port forwarding is not configured, change the port ID to 30004 here. + tlsTunnelCAFile: /etc/kubeedge/ca/rootCA.crt + tlsTunnelCertFile: /etc/kubeedge/certs/server.crt + tlsTunnelPrivateKeyFile: /etc/kubeedge/certs/server.key + writeDeadline: 15 + ``` + +8. Run the following command to restart `edgecore.service`. + ```bash + systemctl restart edgecore.service + ``` + +9. If you still cannot see the monitoring data, run the following command: + + ```bash + journalctl -u edgecore.service -b -r + ``` + + {{< notice note >}} + + If `failed to check the running environment: kube-proxy should not running on edge node when running edgecore` is displayed, refer to Step 8 to restart `edgecore.service` again. + + {{}} + +## Remove an Edge Node + +Before you remove an edge node, delete all your workloads running on it. + +1. On your edge node, run the following commands: + + ```bash + ./keadm reset + ``` + + ``` + apt remove mosquitto + ``` + + ```bash + rm -rf /var/lib/kubeedge /var/lib/edged /etc/kubeedge/ca /etc/kubeedge/certs + ``` + + {{< notice note >}} + + If you cannot delete the tmpfs-mounted folder, restart the node or unmount the folder first. + + {{}} + +2. Run the following command to remove the edge node from your cluster: + + ```bash + kubectl delete node + ``` + +3. To uninstall KubeEdge from your cluster, run the following commands: + + ```bash + helm uninstall kubeedge -n kubeedge + ``` + + ```bash + kubectl delete ns kubeedge + ``` + + {{< notice note >}} + + After uninstallation, you will not be able to add edge nodes to your cluster. + + {{}} \ No newline at end of file diff --git a/content/en/docs/v3.4/installing-on-linux/cluster-operation/add-new-nodes.md b/content/en/docs/v3.4/installing-on-linux/cluster-operation/add-new-nodes.md new file mode 100644 index 000000000..ca974fa05 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-linux/cluster-operation/add-new-nodes.md @@ -0,0 +1,156 @@ +--- +title: "Add New Nodes to a Kubernetes Cluster" +keywords: 'Kubernetes, KubeSphere, scale-out, add-nodes' +description: 'Add more nodes to scale out your cluster.' +linkTitle: "Add New Nodes" +weight: 3610 +--- + +After you use KubeSphere for a certain period of time, it is likely that you need to scale out your cluster with an increasing number of workloads. From KubeSphere v3.0.0, you can use the brand-new installer [KubeKey](https://github.com/kubesphere/kubekey) to add new nodes to a Kubernetes cluster. Fundamentally, the operation is based on Kubelet's registration mechanism. In other words, the new nodes will automatically join the existing Kubernetes cluster. KubeSphere supports hybrid environments, which means the newly-added host OS can be CentOS or Ubuntu. + +This tutorial demonstrates how to add new nodes to a single-node cluster. To scale out a multi-node cluster, the steps are basically the same. + +## Prerequisites + +- You need to have a single-node cluster. For more information, see [All-in-One Installation on Linux](../../../quick-start/all-in-one-on-linux/). + +- You have [downloaded KubeKey](../../../installing-on-linux/introduction/multioverview/#step-2-download-kubekey). + +## Add Worker Nodes to Kubernetes + +1. Retrieve your cluster information using KubeKey. The command below creates a configuration file (`sample.yaml`). + + ```bash + ./kk create config --from-cluster + ``` + + {{< notice note >}} + + +You can skip this step if you already have the configuration file on your machine. For example, if you want to add nodes to a multi-node cluster which was set up by KubeKey, you might still have the configuration file if you have not deleted it. + +{{}} + +2. In the configuration file, put the information of your new nodes under `hosts` and `roleGroups`. The example adds two new nodes (i.e. `node1` and `node2`). Here `master1` is the existing node. + + ```bash + ··· + spec: + hosts: + - {name: master1, address: 192.168.0.3, internalAddress: 192.168.0.3, user: root, password: Qcloud@123} + - {name: node1, address: 192.168.0.4, internalAddress: 192.168.0.4, user: root, password: Qcloud@123} + - {name: node2, address: 192.168.0.5, internalAddress: 192.168.0.5, user: root, password: Qcloud@123} + roleGroups: + etcd: + - master1 + control-plane: + - master1 + worker: + - node1 + - node2 + ··· + ``` + + {{< notice note >}} + +- For more information about the configuration file, see [Edit the configuration file](../../../installing-on-linux/introduction/multioverview/#2-edit-the-configuration-file). +- You are not allowed to modify the host name of existing nodes when adding new nodes. +- Replace the host name in the example with your own. + + {{}} +3. Execute the following command: + + ```bash + ./kk add nodes -f sample.yaml + ``` + +4. You will be able to see the new nodes and their information on the KubeSphere console when the installation finishes. On the **Cluster Management** page, select **Cluster Nodes** under **Nodes** from the left menu, or execute the command `kubectl get node` to check the changes. + + ```bash + $ kubectl get node + NAME STATUS ROLES AGE VERSION + master1 Ready master,worker 20d v1.17.9 + node1 Ready worker 31h v1.17.9 + node2 Ready worker 31h v1.17.9 + ``` + +## Add New Master Nodes for High Availability + +The steps of adding master nodes are generally the same as adding worker nodes while you need to configure a load balancer for your cluster. You can use any cloud load balancers or hardware load balancers (for example, F5). In addition, Keepalived and [HAproxy](https://www.haproxy.com/), or Nginx is also an alternative for creating highly available clusters. + +1. Create a configuration file using KubeKey. + + ``` + ./kk create config --from-cluster + ``` + +2. Open the file and you can see some fields are pre-populated with values. Add the information of new nodes and your load balancer to the file. Here is an example for your reference: + + ```yaml + apiVersion: kubekey.kubesphere.io/v1alpha1 + kind: Cluster + metadata: + name: sample + spec: + hosts: + # You should complete the ssh information of the hosts + - {name: master1, address: 172.16.0.2, internalAddress: 172.16.0.2, user: root, password: Testing123} + - {name: master2, address: 172.16.0.5, internalAddress: 172.16.0.5, user: root, password: Testing123} + - {name: master3, address: 172.16.0.6, internalAddress: 172.16.0.6, user: root, password: Testing123} + - {name: worker1, address: 172.16.0.3, internalAddress: 172.16.0.3, user: root, password: Testing123} + - {name: worker2, address: 172.16.0.4, internalAddress: 172.16.0.4, user: root, password: Testing123} + - {name: worker3, address: 172.16.0.7, internalAddress: 172.16.0.7, user: root, password: Testing123} + roleGroups: + etcd: + - master1 + - master2 + - master3 + control-plane: + - master1 + - master2 + - master3 + worker: + - worker1 + - worker2 + - worker3 + controlPlaneEndpoint: + # If loadbalancer is used, 'address' should be set to loadbalancer's ip. + domain: lb.kubesphere.local + address: 172.16.0.253 + port: 6443 + kubernetes: + version: v1.17.9 + imageRepo: kubesphere + clusterName: cluster.local + proxyMode: ipvs + masqueradeAll: false + maxPods: 110 + nodeCidrMaskSize: 24 + network: + plugin: calico + kubePodsCIDR: 10.233.64.0/18 + kubeServiceCIDR: 10.233.0.0/18 + registry: + privateRegistry: "" + ``` + +3. Pay attention to the `controlPlaneEndpoint` field. + + ```yaml + controlPlaneEndpoint: + # If you use a load balancer, the address should be set to the load balancer's ip. + domain: lb.kubesphere.local + address: 172.16.0.253 + port: 6443 + ``` + + - The domain name of the load balancer is `lb.kubesphere.local` by default for internal access. You can change it based on your needs. + - In most cases, you need to provide the **private IP address** of the load balancer for the field `address`. However, different cloud providers may have different configurations for load balancers. For example, if you configure a Server Load Balancer (SLB) on Alibaba Cloud, the platform assigns a public IP address to the SLB, which means you need to specify the public IP address for the field `address`. + - The field `port` indicates the port of `api-server`. + +4. Save the file and execute the following command to apply the configuration. + + ```bash + ./kk add nodes -f sample.yaml + ``` + diff --git a/content/en/docs/v3.4/installing-on-linux/cluster-operation/remove-nodes.md b/content/en/docs/v3.4/installing-on-linux/cluster-operation/remove-nodes.md new file mode 100644 index 000000000..b7ec0504b --- /dev/null +++ b/content/en/docs/v3.4/installing-on-linux/cluster-operation/remove-nodes.md @@ -0,0 +1,33 @@ +--- +title: "Delete Kubernetes Nodes" +keywords: 'Kubernetes, KubeSphere, scale-in, remove-nodes' +description: 'Cordon a node and even delete a node to scale in your cluster.' +linkTitle: "Delete Nodes" +weight: 3620 +--- + +## Cordon a Kubernetes Node + +Marking a node as unschedulable prevents the scheduler from placing new Pods onto that node while not affecting existing Pods on the node. This is useful as a preparatory step before a node reboot or other maintenance. + +Log in to the console as `admin` and go to the **Cluster Management** page. To mark a node unschedulable, choose **Cluster Nodes** under **Nodes** from the left menu, find a node you want to remove from the cluster, and click **Cordon**. Alternatively, you can run the command `kubectl cordon $NODENAME` directly. See [Kubernetes Nodes](https://kubernetes.io/docs/concepts/architecture/nodes/) for more details. + +{{< notice note >}} + +Pods that are part of a DaemonSet tolerate being run on an unschedulable node. DaemonSets typically provide node-local services that should run on the node even if it is being drained of workload applications. + +{{}} + +## Delete a Kubernetes Node + +1. To delete a node, you need to prepare the configuration file of your cluster first, which is the one created when you [set up your cluster](../../introduction/multioverview/#1-create-an-example-configuration-file). If you do not have it, use [KubeKey](https://github.com/kubesphere/kubekey) to retrieve cluster information (a file `sample.yaml` will be created by default). + + ```bash + ./kk create config --from-cluster + ``` + +2. Make sure you provide all the information of your hosts in the configuration file and run the following command to delete a node. + + ```bash + ./kk delete node -f sample.yaml + ``` diff --git a/content/en/docs/v3.4/installing-on-linux/high-availability-configurations/_index.md b/content/en/docs/v3.4/installing-on-linux/high-availability-configurations/_index.md new file mode 100644 index 000000000..b50d31a8c --- /dev/null +++ b/content/en/docs/v3.4/installing-on-linux/high-availability-configurations/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "High Availability Configurations" +weight: 3200 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/installing-on-linux/high-availability-configurations/ha-configuration.md b/content/en/docs/v3.4/installing-on-linux/high-availability-configurations/ha-configuration.md new file mode 100644 index 000000000..66fd1bde3 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-linux/high-availability-configurations/ha-configuration.md @@ -0,0 +1,216 @@ +--- +title: "Set up an HA Cluster Using a Load Balancer" +keywords: 'KubeSphere, Kubernetes, HA, high availability, installation, configuration' +description: 'Learn how to create a highly available cluster using a load balancer.' +linkTitle: "Set up an HA Cluster Using a Load Balancer" +weight: 3220 +--- + +You can set up Kubernetes cluster (a control plane node) with KubeSphere installed based on the tutorial of [Multi-node Installation](../../../installing-on-linux/introduction/multioverview/). Clusters with a control plane node may be sufficient for development and testing in most cases. For a production environment, however, you need to consider the high availability of the cluster. If key components (for example, kube-apiserver, kube-scheduler, and kube-controller-manager) are all running on the same control plane node, Kubernetes and KubeSphere will be unavailable once the control plane node goes down. Therefore, you need to set up a high-availability cluster by provisioning load balancers with multiple control plane nodes. You can use any cloud load balancer, or any hardware load balancer (for example, F5). In addition, Keepalived and [HAproxy](https://www.haproxy.com/), or Nginx is also an alternative for creating high-availability clusters. + +This tutorial demonstrates the general configurations of a high-availability cluster as you install KubeSphere on Linux. + +## Architecture + +Make sure you have prepared six Linux machines before you begin, with three of them serving as control plane nodes and the other three as worker nodes. The following image shows details of these machines, including their private IP address and role. For more information about system and network requirements, see [Multi-node Installation](../../../installing-on-linux/introduction/multioverview/#step-1-prepare-linux-hosts). + +![ha-architecture](/images/docs/v3.3/installing-on-linux/high-availability-configurations/set-up-ha-cluster-using-lb/ha-architecture.png) + +## Configure a Load Balancer + +You must create a load balancer in your environment to listen (also known as listeners on some cloud platforms) on key ports. Here is a table of recommended ports that need to be listened on. + +| Service | Protocol | Port | +| ---------- | -------- | ----- | +| apiserver | TCP | 6443 | +| ks-console | TCP | 30880 | +| http | TCP | 80 | +| https | TCP | 443 | + +{{< notice note >}} + +- Make sure your load balancer at least listens on the port of apiserver. + +- You may need to open ports in your security group to ensure external traffic is not blocked depending on where your cluster is deployed. For more information, see [Port Requirements](../../../installing-on-linux/introduction/port-firewall/). +- You can configure both internal and external load balancers on some cloud platforms. After assigning a public IP address to the external load balancer, you can use the IP address to access the cluster. +- For more information about how to configure load balancers, see [Installing on Public Cloud](../../../installing-on-linux/public-cloud/install-kubesphere-on-azure-vms/) to see specific steps on major public cloud platforms. + +{{}} + +## Download KubeKey + +[Kubekey](https://github.com/kubesphere/kubekey) is the next-gen installer which provides an easy, fast and flexible way to install Kubernetes and KubeSphere. Follow the steps below to download KubeKey. + +{{< tabs >}} + +{{< tab "Good network connections to GitHub/Googleapis" >}} + +Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or use the following command directly. + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "Poor network connections to GitHub/Googleapis" >}} + +Run the following command first to make sure you download KubeKey from the correct zone. + +```bash +export KKZONE=cn +``` + +Run the following command to download KubeKey: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +After you download KubeKey, if you transfer it to a new machine also with poor network connections to Googleapis, you must run `export KKZONE=cn` again before you proceed with the steps below. + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +The commands above download the latest release (v3.0.7) of KubeKey. You can change the version number in the command to download a specific version. + +{{}} + +Make `kk` executable: + +```bash +chmod +x kk +``` + +Create an example configuration file with default configurations. Here Kubernetes v1.22.12 is used as an example. + +```bash +./kk create config --with-kubesphere v3.3.2 --with-kubernetes v1.22.12 +``` + +{{< notice note >}} + +- Recommended Kubernetes versions for KubeSphere 3.3: v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, and * v1.24.x. For Kubernetes versions with an asterisk, some features of edge nodes may be unavailable due to incompatability. Therefore, if you want to use edge nodes, you are advised to install Kubernetes v1.21.x. If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.23.10 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../../installing-on-linux/introduction/kubekey/#support-matrix). + +- If you do not add the flag `--with-kubesphere` in the command in this step, KubeSphere will not be deployed unless you install it using the `addons` field in the configuration file or add this flag again when you use `./kk create cluster` later. +- If you add the flag `--with-kubesphere` without specifying a KubeSphere version, the latest version of KubeSphere will be installed. + +{{}} + +## Deploy KubeSphere and Kubernetes + +After you run the commands above, a configuration file `config-sample.yaml` will be created. Edit the file to add machine information, configure the load balancer and more. + +{{< notice note >}} + +The file name may be different if you customize it. + +{{}} + +### config-sample.yaml example + +```yaml +spec: + hosts: + - {name: master1, address: 192.168.0.2, internalAddress: 192.168.0.2, user: ubuntu, password: Testing123} + - {name: master2, address: 192.168.0.3, internalAddress: 192.168.0.3, user: ubuntu, password: Testing123} + - {name: master3, address: 192.168.0.4, internalAddress: 192.168.0.4, user: ubuntu, password: Testing123} + - {name: node1, address: 192.168.0.5, internalAddress: 192.168.0.5, user: ubuntu, password: Testing123} + - {name: node2, address: 192.168.0.6, internalAddress: 192.168.0.6, user: ubuntu, password: Testing123} + - {name: node3, address: 192.168.0.7, internalAddress: 192.168.0.7, user: ubuntu, password: Testing123} + roleGroups: + etcd: + - master1 + - master2 + - master3 + control-plane: + - master1 + - master2 + - master3 + worker: + - node1 + - node2 + - node3 +``` + +For more information about different fields in this configuration file, see [Kubernetes Cluster Configurations](../../../installing-on-linux/introduction/vars/) and [Multi-node Installation](../../../installing-on-linux/introduction/multioverview/#2-edit-the-configuration-file). + +### Configure the load balancer + +```yaml +spec: + controlPlaneEndpoint: + ##Internal loadbalancer for apiservers + #internalLoadbalancer: haproxy + + domain: lb.kubesphere.local + address: "192.168.0.xx" + port: 6443 +``` + +{{< notice note >}} + +- The address and port should be indented by two spaces in `config-sample.yaml`. +- In most cases, you need to provide the **private IP address** of the load balancer for the field `address`. However, different cloud providers may have different configurations for load balancers. For example, if you configure a Server Load Balancer (SLB) on Alibaba Cloud, the platform assigns a public IP address to the SLB, which means you need to specify the public IP address for the field `address`. +- The domain name of the load balancer is `lb.kubesphere.local` by default for internal access. +- To use an internal load balancer, uncomment the field `internalLoadbalancer`. + +{{}} + +### Persistent storage plugin configurations + +For a production environment, you need to prepare persistent storage and configure the storage plugin (for example, CSI) in `config-sample.yaml` to define which storage service you want to use. For more information, see [Persistent Storage Configurations](../../../installing-on-linux/persistent-storage-configurations/understand-persistent-storage/). + +### Enable pluggable components (Optional) + +KubeSphere has decoupled some core feature components since v2.1.0. These components are designed to be pluggable which means you can enable them either before or after installation. By default, KubeSphere will be installed with the minimal package if you do not enable them. + +You can enable any of them according to your demands. It is highly recommended that you install these pluggable components to discover the full-stack features and capabilities provided by KubeSphere. Make sure your machines have sufficient CPU and memory before enabling them. See [Enable Pluggable Components](../../../pluggable-components/) for details. + +### Start installation + +After you complete the configuration, you can execute the following command to start the installation: + +```bash +./kk create cluster -f config-sample.yaml +``` + +### Verify installation + +1. Run the following command to inspect the logs of installation. + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + +2. When you see the following message, it means your HA cluster is successfully created. + + ```bash + ##################################################### + ### Welcome to KubeSphere! ### + ##################################################### + + Console: http://192.168.0.3:30880 + Account: admin + Password: P@88w0rd + + NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. + + ##################################################### + https://kubesphere.io 2020-xx-xx xx:xx:xx + ##################################################### + ``` + diff --git a/content/en/docs/v3.4/installing-on-linux/high-availability-configurations/internal-ha-configuration.md b/content/en/docs/v3.4/installing-on-linux/high-availability-configurations/internal-ha-configuration.md new file mode 100644 index 000000000..b1f169875 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-linux/high-availability-configurations/internal-ha-configuration.md @@ -0,0 +1,198 @@ +--- +title: "Set Up an HA Cluster Using the Internal HAProxy of KubeKey" +keywords: 'KubeSphere, Kubernetes, KubeKey, HA, Installation' +description: 'Learn how to create a highly available cluster using the internal HAProxy of KubeKey.' +linkTitle: "Set Up an HA Cluster Using the Internal HAProxy of KubeKey" +weight: 3210 +--- + +[KubeKey](https://github.com/kubesphere/kubekey) is an easy-to-use tool for creating Kubernetes clusters. Starting from v1.2.1, KubeKey provides a built-in high availability mode to simplify the creation of highly available Kubernetes clusters. The high availability mode that KubeKey implements is called local load balancing mode. KubeKey deploys a load balancer (HAProxy) on each worker node, and the Kubernetes components on all control planes connect to their local kube-apiserver. The Kubernetes components on each worker node, on the other hand, connect to the kube-apiserver of multiple control planes through a reverse proxy, namely the load balancer deployed by KubeKey. Although this mode is less efficient than a dedicated load balancer because additional health check mechanisms are introduced, it brings a more practical, efficient, and convenient high availability deployment mode when current environment cannot provide an external load balancer or virtual IP (VIP). + +This document describes how to use the built-in high availability mode when installing KubeSphere on Linux. + +## Architecture + +The following figure shows the example architecture of the built-in high availability mode. For more information about system and network requirements, see [Multi-node Installation](../../../installing-on-linux/introduction/multioverview/#step-1-prepare-linux-hosts). + +![HA architecture](/images/docs/v3.3/zh-cn/installing-on-linux/introduction/internal-ha-configuration/internalLoadBalancer.png) + +{{< notice note >}} + +In the development environment, make sure you have prepared six Linux machines, among which three of them serve as control planes and the other three as worker nodes. + +{{}} + +## Download KubeKey + +Refer to the following steps to download KubeKey. + +{{< tabs >}} + +{{< tab "Good network connections to GitHub/Googleapis" >}} + +Download KubeKey from [its GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or run the following command. + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "Poor network connections to GitHub/Googleapis" >}} + +Run the following command first to make sure that you download KubeKey from the correct zone. + +```bash +export KKZONE=cn +``` + +Run the following command to download KubeKey: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +After you download KubeKey, if you transfer it to a new machine also with poor network connections to Googleapis, you must run `export KKZONE=cn` again before you proceed with the following steps. + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +The preceding commands download the latest release of KubeKey (v3.0.7). You can modify the version number in the command to download a specific version. + +{{}} + +Make `kk` executable: + +```bash +chmod +x kk +``` + +Create an example configuration file with default configurations. Here Kubernetes v1.22.12 is used as an example. + +```bash +./kk create config --with-kubesphere v3.3.2 --with-kubernetes v1.22.12 +``` + +{{< notice note >}} + +- Recommended Kubernetes versions for KubeSphere 3.3: v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, and * v1.24.x. For Kubernetes versions with an asterisk, some features of edge nodes may be unavailable due to incompatability. Therefore, if you want to use edge nodes, you are advised to install Kubernetes v1.21.x. If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.23.10 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../../installing-on-linux/introduction/kubekey/#support-matrix). +- If you do not add the flag `--with-kubesphere` in the command in this step, KubeSphere will not be deployed unless you install it using the `addons` field in the configuration file or add this flag again when you use `./kk create cluster` later. +- If you add the flag `--with-kubesphere` without specifying a KubeSphere version, the latest version of KubeSphere will be installed. + +{{}} + +## Deploy KubeSphere and Kubernetes + +After you run the preceding commands, a configuration file `config-sample.yaml` is created. Edit the file to add machine information, configure the load balancer and more. + +{{< notice note >}} + +The file name may be different if you customize it. + +{{}} + +### config-sample.yaml example + +```yaml +spec: + hosts: + - {name: master1, address: 192.168.0.2, internalAddress: 192.168.0.2, user: ubuntu, password: Testing123} + - {name: master2, address: 192.168.0.3, internalAddress: 192.168.0.3, user: ubuntu, password: Testing123} + - {name: master3, address: 192.168.0.4, internalAddress: 192.168.0.4, user: ubuntu, password: Testing123} + - {name: node1, address: 192.168.0.5, internalAddress: 192.168.0.5, user: ubuntu, password: Testing123} + - {name: node2, address: 192.168.0.6, internalAddress: 192.168.0.6, user: ubuntu, password: Testing123} + - {name: node3, address: 192.168.0.7, internalAddress: 192.168.0.7, user: ubuntu, password: Testing123} + roleGroups: + etcd: + - master1 + - master2 + - master3 + control-plane: + - master1 + - master2 + - master3 + worker: + - node1 + - node2 + - node3 +``` + +For more information about different fields in this configuration file, see [Kubernetes Cluster Configurations](../../../installing-on-linux/introduction/vars/) and [Multi-node Installation](../../../installing-on-linux/introduction/multioverview/#2-edit-the-configuration-file). + +### Enable the built-in high availability mode + +```yaml +spec: + controlPlaneEndpoint: + ##Internal loadbalancer for apiservers + #internalLoadbalancer: haproxy + + domain: lb.kubesphere.local + address: "" + port: 6443 +``` + +{{< notice note >}} + +- To enable the built-in high availability mode, uncomment the field `internalLoadbalancer`. +- The fields `address` and `port` in `config-sample.yaml` must be indented by two spaces against `controlPlaneEndpoint`. +- The default internal access domain name for the load balancer is `lb.kubesphere.local`. + +{{}} + +### Persistent storage plugin configurations + +For a production environment, you need to prepare persistent storage and configure the storage plugin (for example, CSI) in `config-sample.yaml` to define which storage service you want to use. For more information, see [Persistent Storage Configurations](../../../installing-on-linux/persistent-storage-configurations/understand-persistent-storage/). + +### (Optional) Enable pluggable components + +KubeSphere has decoupled some core feature components since v2.1.0. These components are designed to be pluggable which means you can enable them either before or after installation. By default, KubeSphere is installed with the minimal package if you do not enable them. + +You can enable any of them according to your demands. It is highly recommended that you install these pluggable components to discover the full-stack features and capabilities provided by KubeSphere. Make sure your machines have sufficient CPU and memory before enabling them. See [Enable Pluggable Components](../../../pluggable-components/) for details. + +### Start installation + +After you complete the configuration, run the following command to start installation: + +```bash +./kk create cluster -f config-sample.yaml +``` + +### Verify installation + +1. Run the following command to inspect the logs of installation. + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + +2. When you see the following message, it means your HA cluster is successfully created. + + ```bash + ##################################################### + ### Welcome to KubeSphere! ### + ##################################################### + + Console: http://192.168.0.3:30880 + Account: admin + Password: P@88w0rd + + NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. + + ##################################################### + https://kubesphere.io 2020-xx-xx xx:xx:xx + ##################################################### + ``` diff --git a/content/en/docs/v3.4/installing-on-linux/high-availability-configurations/set-up-ha-cluster-using-keepalived-haproxy.md b/content/en/docs/v3.4/installing-on-linux/high-availability-configurations/set-up-ha-cluster-using-keepalived-haproxy.md new file mode 100644 index 000000000..5443ebf85 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-linux/high-availability-configurations/set-up-ha-cluster-using-keepalived-haproxy.md @@ -0,0 +1,414 @@ +--- +title: "Set up an HA Kubernetes Cluster Using Keepalived and HAproxy" +keywords: 'Kubernetes, KubeSphere, HA, high availability, installation, configuration, Keepalived, HAproxy' +description: 'Learn how to create a highly available cluster using Keepalived and HAproxy.' +linkTitle: "Set up an HA Cluster Using Keepalived and HAproxy" +weight: 3230 +showSubscribe: true +--- + +A highly available Kubernetes cluster ensures your applications run without outages which is required for production. In this connection, there are plenty of ways for you to choose from to achieve high availability. + +This tutorial demonstrates how to configure Keepalived and HAproxy for load balancing and achieve high availability. The steps are listed as below: + +1. Prepare hosts. +2. Configure Keepalived and HAproxy. +3. Use KubeKey to set up a Kubernetes cluster and install KubeSphere. + +## Cluster Architecture + +The example cluster has three master nodes, three worker nodes, two nodes for load balancing and one virtual IP address. The virtual IP address in this example may also be called "a floating IP address". That means in the event of node failures, the IP address can be passed between nodes allowing for failover, thus achieving high availability. + +![architecture-ha-k8s-cluster](/images/docs/v3.3/installing-on-linux/high-availability-configurations/set-up-ha-cluster-using-keepalived-haproxy/architecture-ha-k8s-cluster.png) + +Notice that in this example, Keepalived and HAproxy are not installed on any of the master nodes. Admittedly, you can do that and high availability can also be achieved. That said, configuring two specific nodes for load balancing (You can add more nodes of this kind as needed) is more secure. Only Keepalived and HAproxy will be installed on these two nodes, avoiding any potential conflicts with any Kubernetes components and services. + +## Prepare Hosts + +| IP Address | Hostname | Role | +| ----------- | -------- | -------------------- | +| 172.16.0.2 | lb1 | Keepalived & HAproxy | +| 172.16.0.3 | lb2 | Keepalived & HAproxy | +| 172.16.0.4 | master1 | master, etcd | +| 172.16.0.5 | master2 | master, etcd | +| 172.16.0.6 | master3 | master, etcd | +| 172.16.0.7 | worker1 | worker | +| 172.16.0.8 | worker2 | worker | +| 172.16.0.9 | worker3 | worker | +| 172.16.0.10 | | Virtual IP address | + +For more information about requirements for nodes, network, and dependencies, see [Multi-node Installation](../../../installing-on-linux/introduction/multioverview/#step-1-prepare-linux-hosts). + +## Configure Load Balancing + +[Keepalived](https://www.keepalived.org/) provides a VRPP implementation and allows you to configure Linux machines for load balancing, preventing single points of failure. [HAProxy](https://www.haproxy.org/), providing reliable, high performance load balancing, works perfectly with Keepalived. + +As Keepalived and HAproxy are installed on `lb1` and `lb2`, if either one goes down, the virtual IP address (i.e. the floating IP address) will be automatically associated with another node so that the cluster is still functioning well, thus achieving high availability. If you want, you can add more nodes all with Keepalived and HAproxy installed for that purpose. + +Run the following command to install Keepalived and HAproxy first. + +```bash +yum install keepalived haproxy psmisc -y +``` + +### HAproxy Configuration + +1. The configuration of HAproxy is exactly the same on the two machines for load balancing. Run the following command to configure HAproxy. + + ```bash + vi /etc/haproxy/haproxy.cfg + ``` + +2. Here is an example configuration for your reference (Pay attention to the `server` field. Note that `6443` is the `apiserver` port): + + ```bash + global + log /dev/log local0 warning + chroot /var/lib/haproxy + pidfile /var/run/haproxy.pid + maxconn 4000 + user haproxy + group haproxy + daemon + + stats socket /var/lib/haproxy/stats + + defaults + log global + option httplog + option dontlognull + timeout connect 5000 + timeout client 50000 + timeout server 50000 + + frontend kube-apiserver + bind *:6443 + mode tcp + option tcplog + default_backend kube-apiserver + + backend kube-apiserver + mode tcp + option tcplog + option tcp-check + balance roundrobin + default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100 + server kube-apiserver-1 172.16.0.4:6443 check # Replace the IP address with your own. + server kube-apiserver-2 172.16.0.5:6443 check # Replace the IP address with your own. + server kube-apiserver-3 172.16.0.6:6443 check # Replace the IP address with your own. + ``` + +3. Save the file and run the following command to restart HAproxy. + + ```bash + systemctl restart haproxy + ``` + +4. Make it persist through reboots: + + ```bash + systemctl enable haproxy + ``` + +5. Make sure you configure HAproxy on the other machine (`lb2`) as well. + +### Keepalived Configuration + +Keepalived must be installed on both machines while the configuration of them is slightly different. + +1. Run the following command to configure Keepalived. + + ```bash + vi /etc/keepalived/keepalived.conf + ``` + +2. Here is an example configuration (`lb1`) for your reference: + + ```bash + global_defs { + notification_email { + } + router_id LVS_DEVEL + vrrp_skip_check_adv_addr + vrrp_garp_interval 0 + vrrp_gna_interval 0 + } + + vrrp_script chk_haproxy { + script "killall -0 haproxy" + interval 2 + weight 2 + } + + vrrp_instance haproxy-vip { + state BACKUP + priority 100 + interface eth0 # Network card + virtual_router_id 60 + advert_int 1 + authentication { + auth_type PASS + auth_pass 1111 + } + unicast_src_ip 172.16.0.2 # The IP address of this machine + unicast_peer { + 172.16.0.3 # The IP address of peer machines + } + + virtual_ipaddress { + 172.16.0.10/24 # The VIP address + } + + track_script { + chk_haproxy + } + } + ``` + + {{< notice note >}} + + - For the `interface` field, you must provide your own network card information. You can run `ifconfig` on your machine to get the value. + + - The IP address provided for `unicast_src_ip` is the IP address of your current machine. For other machines where HAproxy and Keepalived are also installed for load balancing, their IP address must be provided for the field `unicast_peer`. + + {{}} + +3. Save the file and run the following command to restart Keepalived. + + ```bash + systemctl restart keepalived + ``` + +4. Make it persist through reboots: + + ```bash + systemctl enable keepalived + ``` + +5. Make sure you configure Keepalived on the other machine (`lb2`) as well. + +## Verify High Availability + +Before you start to create your Kubernetes cluster, make sure you have tested the high availability. + +1. On the machine `lb1`, run the following command: + + ```bash + [root@lb1 ~]# ip a s + 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever + 2: eth0: mtu 1500 qdisc mq state UP group default qlen 1000 + link/ether 52:54:9e:27:38:c8 brd ff:ff:ff:ff:ff:ff + inet 172.16.0.2/24 brd 172.16.0.255 scope global noprefixroute dynamic eth0 + valid_lft 73334sec preferred_lft 73334sec + inet 172.16.0.10/24 scope global secondary eth0 # The VIP address + valid_lft forever preferred_lft forever + inet6 fe80::510e:f96:98b2:af40/64 scope link noprefixroute + valid_lft forever preferred_lft forever + ``` + +2. As you can see above, the virtual IP address is successfully added. Simulate a failure on this node: + + ```bash + systemctl stop haproxy + ``` + +3. Check the floating IP address again and you can see it disappear on `lb1`. + + ```bash + [root@lb1 ~]# ip a s + 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever + 2: eth0: mtu 1500 qdisc mq state UP group default qlen 1000 + link/ether 52:54:9e:27:38:c8 brd ff:ff:ff:ff:ff:ff + inet 172.16.0.2/24 brd 172.16.0.255 scope global noprefixroute dynamic eth0 + valid_lft 72802sec preferred_lft 72802sec + inet6 fe80::510e:f96:98b2:af40/64 scope link noprefixroute + valid_lft forever preferred_lft forever + ``` + +4. Theoretically, the virtual IP will be failed over to the other machine (`lb2`) if the configuration is successful. On `lb2`, run the following command and here is the expected output: + + ```bash + [root@lb2 ~]# ip a s + 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever + 2: eth0: mtu 1500 qdisc mq state UP group default qlen 1000 + link/ether 52:54:9e:3f:51:ba brd ff:ff:ff:ff:ff:ff + inet 172.16.0.3/24 brd 172.16.0.255 scope global noprefixroute dynamic eth0 + valid_lft 72690sec preferred_lft 72690sec + inet 172.16.0.10/24 scope global secondary eth0 # The VIP address + valid_lft forever preferred_lft forever + inet6 fe80::f67c:bd4f:d6d5:1d9b/64 scope link noprefixroute + valid_lft forever preferred_lft forever + ``` + +5. As you can see above, high availability is successfully configured. + +## Use KubeKey to Create a Kubernetes Cluster + +[KubeKey](https://github.com/kubesphere/kubekey) is an efficient and convenient tool to create a Kubernetes cluster. Follow the steps below to download KubeKey. + +{{< tabs >}} + +{{< tab "Good network connections to GitHub/Googleapis" >}} + +Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or use the following command directly. + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "Poor network connections to GitHub/Googleapis" >}} + +Run the following command first to make sure you download KubeKey from the correct zone. + +```bash +export KKZONE=cn +``` + +Run the following command to download KubeKey: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +After you download KubeKey, if you transfer it to a new machine also with poor network connections to Googleapis, you must run `export KKZONE=cn` again before you proceed with the steps below. + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +The commands above download the latest release of KubeKey. You can change the version number in the command to download a specific version. + +{{}} + +Make `kk` executable: + +```bash +chmod +x kk +``` + +Create an example configuration file with default configurations. Here Kubernetes v1.22.12 is used as an example. + +```bash +./kk create config --with-kubesphere v3.3.2 --with-kubernetes v1.22.12 +``` + +{{< notice note >}} + +- Recommended Kubernetes versions for KubeSphere 3.3: v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, and * v1.24.x. For Kubernetes versions with an asterisk, some features of edge nodes may be unavailable due to incompatability. Therefore, if you want to use edge nodes, you are advised to install Kubernetes v1.21.x. If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.23.10 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../../installing-on-linux/introduction/kubekey/#support-matrix). + +- If you do not add the flag `--with-kubesphere` in the command in this step, KubeSphere will not be deployed unless you install it using the `addons` field in the configuration file or add this flag again when you use `./kk create cluster` later. +- If you add the flag `--with-kubesphere` without specifying a KubeSphere version, the latest version of KubeSphere will be installed. + +{{}} + +## Deploy KubeSphere and Kubernetes + +After you run the commands above, a configuration file `config-sample.yaml` will be created. Edit the file to add machine information, configure the load balancer and more. + +{{< notice note >}} + +The file name may be different if you customize it. + +{{}} + +### config-sample.yaml example + +```yaml +... +spec: + hosts: + - {name: master1, address: 172.16.0.4, internalAddress: 172.16.0.4, user: root, password: Testing123} + - {name: master2, address: 172.16.0.5, internalAddress: 172.16.0.5, user: root, password: Testing123} + - {name: master3, address: 172.16.0.6, internalAddress: 172.16.0.6, user: root, password: Testing123} + - {name: worker1, address: 172.16.0.7, internalAddress: 172.16.0.7, user: root, password: Testing123} + - {name: worker2, address: 172.16.0.8, internalAddress: 172.16.0.8, user: root, password: Testing123} + - {name: worker3, address: 172.16.0.9, internalAddress: 172.16.0.9, user: root, password: Testing123} + roleGroups: + etcd: + - master1 + - master2 + - master3 + control-plane: + - master1 + - master2 + - master3 + worker: + - worker1 + - worker2 + - worker3 + controlPlaneEndpoint: + domain: lb.kubesphere.local + address: 172.16.0.10 # The VIP address + port: 6443 +... +``` + +{{< notice note >}} + +- Replace the value of `controlPlaneEndpoint.address` with your own VIP address. +- For more information about different parameters in this configuration file, see [Multi-node Installation](../../../installing-on-linux/introduction/multioverview/#2-edit-the-configuration-file). + +{{}} + +### Start installation + +After you complete the configuration, you can execute the following command to start the installation: + +```bash +./kk create cluster -f config-sample.yaml +``` + +### Verify installation + +1. Run the following command to inspect the logs of installation. + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + +2. When you see the following message, it means your HA cluster is successfully created. + + ```bash + ##################################################### + ### Welcome to KubeSphere! ### + ##################################################### + + Console: http://172.16.0.4:30880 + Account: admin + Password: P@88w0rd + + NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. + + ##################################################### + https://kubesphere.io 2020-xx-xx xx:xx:xx + ##################################################### + ``` diff --git a/content/en/docs/v3.4/installing-on-linux/introduction/_index.md b/content/en/docs/v3.4/installing-on-linux/introduction/_index.md new file mode 100644 index 000000000..05f734ddb --- /dev/null +++ b/content/en/docs/v3.4/installing-on-linux/introduction/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Introduction" +weight: 3100 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/installing-on-linux/introduction/air-gapped-installation.md b/content/en/docs/v3.4/installing-on-linux/introduction/air-gapped-installation.md new file mode 100644 index 000000000..fffcd507b --- /dev/null +++ b/content/en/docs/v3.4/installing-on-linux/introduction/air-gapped-installation.md @@ -0,0 +1,590 @@ +--- +title: "Air-gapped Installation" +keywords: 'Air-gapped, installation, KubeSphere' +description: 'Understand how to install KubeSphere and Kubernetes in the air-gapped environment.' + +linkTitle: "Air-gapped Installation" +weight: 3140 +--- + +KubeKey is an open-source, lightweight tool for deploying Kubernetes clusters. It allows you to install Kubernetes/K3s only, both Kubernetes/K3s and KubeSphere, and other cloud-native plugins in a flexible, fast, and convenient way. Additionally, it is an effective tool for scaling and upgrading clusters. + +In KubeKey v2.1.0, we bring in concepts of manifest and artifact, which provides a solution for air-gapped installation of Kubernetes clusters. A manifest file describes information of the current Kubernetes cluster and defines content in an artifact. Previously, users had to prepare deployment tools, image (.tar) file, and other binaries as the Kubernetes version and image to deploy are different. Now, with KubeKey, air-gapped installation can never be so easy. You simply use a manifest file to define what you need for your cluster in air-gapped environments, and then export the artifact file to quickly and easily deploy image registries and Kubernetes cluster. + +## Prerequisites + +|Host IP| Host Name | Usage | +| ---------------- | ---- | ---------------- | +|192.168.0.2 | node1 | Online host for packaging the source cluster | +|192.168.0.3 | node2 | Control plane node of the air-gapped environment | +|192.168.0.4 | node3 | Image registry node of the air-gapped environment | +## Preparations + +1. Run the following commands to download KubeKey. + {{< tabs >}} + + {{< tab "Good network connections to GitHub/Googleapis" >}} + + Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or use the following command directly. + + ```bash + curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - + ``` + + {{}} + + {{< tab "Poor network connections to GitHub/Googleapis" >}} + + Run the following command first to make sure you download KubeKey from the correct zone. + + ```bash + export KKZONE=cn + ``` + + Run the following command to download KubeKey: + + ```bash + curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - + ``` + {{}} + + {{}} + +2. On the online host, run the following command and copy content in the [manifest-example](https://github.com/kubesphere/kubekey/blob/master/docs/manifest-example.md). + + ```bash + vim manifest.yaml + ``` + + ```yaml + --- + apiVersion: kubekey.kubesphere.io/v1alpha2 + kind: Manifest + metadata: + name: sample + spec: + arches: + - amd64 + operatingSystems: + - arch: amd64 + type: linux + id: centos + version: "7" + repository: + iso: + localPath: + url: https://github.com/kubesphere/kubekey/releases/download/v3.0.7/centos7-rpms-amd64.iso + - arch: amd64 + type: linux + id: ubuntu + version: "20.04" + repository: + iso: + localPath: + url: https://github.com/kubesphere/kubekey/releases/download/v3.0.7/ubuntu-20.04-debs-amd64.iso + kubernetesDistributions: + - type: kubernetes + version: v1.22.12 + components: + helm: + version: v3.9.0 + cni: + version: v0.9.1 + etcd: + version: v3.4.13 + ## For now, if your cluster container runtime is containerd, KubeKey will add a docker 20.10.8 container runtime in the below list. + ## The reason is KubeKey creates a cluster with containerd by installing a docker first and making kubelet connect the socket file of containerd which docker contained. + containerRuntimes: + - type: docker + version: 20.10.8 + crictl: + version: v1.24.0 + docker-registry: + version: "2" + harbor: + version: v2.5.3 + docker-compose: + version: v2.2.2 + images: + - docker.io/kubesphere/kube-apiserver:v1.22.12 + - docker.io/kubesphere/kube-controller-manager:v1.22.12 + - docker.io/kubesphere/kube-proxy:v1.22.12 + - docker.io/kubesphere/kube-scheduler:v1.22.12 + - docker.io/kubesphere/pause:3.5 + - docker.io/coredns/coredns:1.8.0 + - docker.io/calico/cni:v3.23.2 + - docker.io/calico/kube-controllers:v3.23.2 + - docker.io/calico/node:v3.23.2 + - docker.io/calico/pod2daemon-flexvol:v3.23.2 + - docker.io/calico/typha:v3.23.2 + - docker.io/kubesphere/flannel:v0.12.0 + - docker.io/openebs/provisioner-localpv:3.3.0 + - docker.io/openebs/linux-utils:3.3.0 + - docker.io/library/haproxy:2.3 + - docker.io/kubesphere/nfs-subdir-external-provisioner:v4.0.2 + - docker.io/kubesphere/k8s-dns-node-cache:1.15.12 + - docker.io/kubesphere/ks-installer:v3.3.2 + - docker.io/kubesphere/ks-apiserver:v3.3.2 + - docker.io/kubesphere/ks-console:v3.3.2 + - docker.io/kubesphere/ks-controller-manager:v3.3.2 + - docker.io/kubesphere/ks-upgrade:v3.3.2 + - docker.io/kubesphere/kubectl:v1.22.0 + - docker.io/kubesphere/kubectl:v1.21.0 + - docker.io/kubesphere/kubectl:v1.20.0 + - docker.io/kubesphere/kubefed:v0.8.1 + - docker.io/kubesphere/tower:v0.2.0 + - docker.io/minio/minio:RELEASE.2019-08-07T01-59-21Z + - docker.io/minio/mc:RELEASE.2019-08-07T23-14-43Z + - docker.io/csiplugin/snapshot-controller:v4.0.0 + - docker.io/kubesphere/nginx-ingress-controller:v1.1.0 + - docker.io/mirrorgooglecontainers/defaultbackend-amd64:1.4 + - docker.io/kubesphere/metrics-server:v0.4.2 + - docker.io/library/redis:5.0.14-alpine + - docker.io/library/haproxy:2.0.25-alpine + - docker.io/library/alpine:3.14 + - docker.io/osixia/openldap:1.3.0 + - docker.io/kubesphere/netshoot:v1.0 + - docker.io/kubeedge/cloudcore:v1.9.2 + - docker.io/kubeedge/iptables-manager:v1.9.2 + - docker.io/kubesphere/edgeservice:v0.2.0 + - docker.io/openpolicyagent/gatekeeper:v3.5.2 + - docker.io/kubesphere/openpitrix-jobs:v3.3.2 + - docker.io/kubesphere/devops-apiserver:ks-v3.3.2 + - docker.io/kubesphere/devops-controller:ks-v3.3.2 + - docker.io/kubesphere/devops-tools:ks-v3.3.2 + - docker.io/kubesphere/ks-jenkins:v3.3.0-2.319.1 + - docker.io/jenkins/inbound-agent:4.10-2 + - docker.io/kubesphere/builder-base:v3.2.2 + - docker.io/kubesphere/builder-nodejs:v3.2.0 + - docker.io/kubesphere/builder-maven:v3.2.0 + - docker.io/kubesphere/builder-maven:v3.2.1-jdk11 + - docker.io/kubesphere/builder-python:v3.2.0 + - docker.io/kubesphere/builder-go:v3.2.0 + - docker.io/kubesphere/builder-go:v3.2.2-1.16 + - docker.io/kubesphere/builder-go:v3.2.2-1.17 + - docker.io/kubesphere/builder-go:v3.2.2-1.18 + - docker.io/kubesphere/builder-base:v3.2.2-podman + - docker.io/kubesphere/builder-nodejs:v3.2.0-podman + - docker.io/kubesphere/builder-maven:v3.2.0-podman + - docker.io/kubesphere/builder-maven:v3.2.1-jdk11-podman + - docker.io/kubesphere/builder-python:v3.2.0-podman + - docker.io/kubesphere/builder-go:v3.2.0-podman + - docker.io/kubesphere/builder-go:v3.2.2-1.16-podman + - docker.io/kubesphere/builder-go:v3.2.2-1.17-podman + - docker.io/kubesphere/builder-go:v3.2.2-1.18-podman + - docker.io/kubesphere/s2ioperator:v3.2.1 + - docker.io/kubesphere/s2irun:v3.2.0 + - docker.io/kubesphere/s2i-binary:v3.2.0 + - docker.io/kubesphere/tomcat85-java11-centos7:v3.2.0 + - docker.io/kubesphere/tomcat85-java11-runtime:v3.2.0 + - docker.io/kubesphere/tomcat85-java8-centos7:v3.2.0 + - docker.io/kubesphere/tomcat85-java8-runtime:v3.2.0 + - docker.io/kubesphere/java-11-centos7:v3.2.0 + - docker.io/kubesphere/java-8-centos7:v3.2.0 + - docker.io/kubesphere/java-8-runtime:v3.2.0 + - docker.io/kubesphere/java-11-runtime:v3.2.0 + - docker.io/kubesphere/nodejs-8-centos7:v3.2.0 + - docker.io/kubesphere/nodejs-6-centos7:v3.2.0 + - docker.io/kubesphere/nodejs-4-centos7:v3.2.0 + - docker.io/kubesphere/python-36-centos7:v3.2.0 + - docker.io/kubesphere/python-35-centos7:v3.2.0 + - docker.io/kubesphere/python-34-centos7:v3.2.0 + - docker.io/kubesphere/python-27-centos7:v3.2.0 + - quay.io/argoproj/argocd:v2.3.3 + - quay.io/argoproj/argocd-applicationset:v0.4.1 + - ghcr.io/dexidp/dex:v2.30.2 + - docker.io/library/redis:6.2.6-alpine + - docker.io/jimmidyson/configmap-reload:v0.5.0 + - docker.io/prom/prometheus:v2.34.0 + - docker.io/kubesphere/prometheus-config-reloader:v0.55.1 + - docker.io/kubesphere/prometheus-operator:v0.55.1 + - docker.io/kubesphere/kube-rbac-proxy:v0.11.0 + - docker.io/kubesphere/kube-state-metrics:v2.5.0 + - docker.io/prom/node-exporter:v1.3.1 + - docker.io/prom/alertmanager:v0.23.0 + - docker.io/thanosio/thanos:v0.25.2 + - docker.io/grafana/grafana:8.3.3 + - docker.io/kubesphere/kube-rbac-proxy:v0.8.0 + - docker.io/kubesphere/notification-manager-operator:v1.4.0 + - docker.io/kubesphere/notification-manager:v1.4.0 + - docker.io/kubesphere/notification-tenant-sidecar:v3.2.0 + - docker.io/kubesphere/elasticsearch-curator:v5.7.6 + - docker.io/kubesphere/elasticsearch-oss:6.8.22 + - docker.io/kubesphere/fluentbit-operator:v0.13.0 + - docker.io/library/docker:19.03 + - docker.io/kubesphere/fluent-bit:v1.8.11 + - docker.io/kubesphere/log-sidecar-injector:1.1 + - docker.io/elastic/filebeat:6.7.0 + - docker.io/kubesphere/kube-events-operator:v0.4.0 + - docker.io/kubesphere/kube-events-exporter:v0.4.0 + - docker.io/kubesphere/kube-events-ruler:v0.4.0 + - docker.io/kubesphere/kube-auditing-operator:v0.2.0 + - docker.io/kubesphere/kube-auditing-webhook:v0.2.0 + - docker.io/istio/pilot:1.11.1 + - docker.io/istio/proxyv2:1.11.1 + - docker.io/jaegertracing/jaeger-operator:1.27 + - docker.io/jaegertracing/jaeger-agent:1.27 + - docker.io/jaegertracing/jaeger-collector:1.27 + - docker.io/jaegertracing/jaeger-query:1.27 + - docker.io/jaegertracing/jaeger-es-index-cleaner:1.27 + - docker.io/kubesphere/kiali-operator:v1.38.1 + - docker.io/kubesphere/kiali:v1.38 + - docker.io/library/busybox:1.31.1 + - docker.io/library/nginx:1.14-alpine + - docker.io/joosthofman/wget:1.0 + - docker.io/nginxdemos/hello:plain-text + - docker.io/library/wordpress:4.8-apache + - docker.io/mirrorgooglecontainers/hpa-example:latest + - docker.io/fluent/fluentd:v1.4.2-2.0 + - docker.io/library/perl:latest + - docker.io/kubesphere/examples-bookinfo-productpage-v1:1.16.2 + - docker.io/kubesphere/examples-bookinfo-reviews-v1:1.16.2 + - docker.io/kubesphere/examples-bookinfo-reviews-v2:1.16.2 + - docker.io/kubesphere/examples-bookinfo-details-v1:1.16.2 + - docker.io/kubesphere/examples-bookinfo-ratings-v1:1.16.3 + - docker.io/weaveworks/scope:1.13.0 + ``` + + {{< notice note >}} + + - If the artifact file to export contains ISO dependencies, such as conntarck and chrony, set the IP address for downloading the ISO dependencies in **.repostiory.iso.url** of **operationSystem**. Alternatively, you can download the ISO package in advance and fill in the local path in **localPath** and delete the `url` configuration item. + + - You need to enable **harbor** and **docker-compose** configuration items, which will be used when you use KubeKey to build a Harbor registry for pushing images. + + - By default, the list of images in the created manifest is obtained from **docker.io**. + + - You can customize the **manifest-sample.yaml** file to export the desired artifact file. + + - You can download the ISO files at https://github.com/kubesphere/kubekey/releases/tag/v3.0.7. + + {{}} + +3. If you already deployed a cluster, you can run the following command in the cluster to create a manifest file and configure the file according to the sample in Step 2. + + ```bash + ./kk create manifest + ``` + +4. Export the artifact. + + {{< tabs >}} + + {{< tab "Good network connections to GitHub/Googleapis" >}} + + Run the following command directly: + + ```bash + ./kk artifact export -m manifest-sample.yaml -o kubesphere.tar.gz + ``` + + {{}} + + {{< tab "Poor network connections to GitHub/Googleapis" >}} + + Run the following commands: + + ```bash + export KKZONE=cn + + ./kk artifact export -m manifest-sample.yaml -o kubesphere.tar.gz + ``` + {{}} + + {{}} + + {{< notice note >}} + + An artifact is a .tgz package containing the image package (.tar) and associated binaries exported from the specified manifest file. You can specify an artifact in the KubeKey commands for initializing the image registry, creating clusters, adding nodes, and upgrading clusters, and then KubeKey will automatically unpack the artifact and use the unpacked file when running the command. + + - Make sure the network connection is working. + + - KubeKey will resolve image names in the image list. If the image registry requires authentication, you can configure it in **.registry.auths** in the manifest file. + {{}} +## Install Clusters in the Air-gapped Environment + +1. Copy the downloaded KubeKey and artifact to nodes in the air-gapped environment using a USB device. + +2. Run the following command to create a configuration file for the air-gapped cluster: + + ```bash + ./kk create config --with-kubesphere v3.3.2 --with-kubernetes v1.22.12 -f config-sample.yaml + ``` + +3. Run the following command to modify the configuration file: + + ```bash + vim config-sample.yaml + ``` + + {{< notice note >}} + + - Modify the node information according to the actual configuration of the air-gapped environment. + - You must specify the node where the `registry` to deploy (for KubeKey deployment of self-built Harbor registries). + - In `registry`, the value of `type` must be specified as that of `harbor`. Otherwise, the docker registry is installed by default. + + {{}} + + ```yaml + apiVersion: kubekey.kubesphere.io/v1alpha2 + kind: Cluster + metadata: + name: sample + spec: + hosts: + - {name: master, address: 192.168.0.3, internalAddress: 192.168.0.3, user: root, password: ""} + - {name: node1, address: 192.168.0.4, internalAddress: 192.168.0.4, user: root, password: ""} + + roleGroups: + etcd: + - master + control-plane: + - master + worker: + - node1 + # If you want to use KubeKey to automatically deploy the image registry, set this value. You are advised to separately deploy the registry and the cluster. + registry: + - node1 + controlPlaneEndpoint: + ## Internal loadbalancer for apiservers + # internalLoadbalancer: haproxy + + domain: lb.kubesphere.local + address: "" + port: 6443 + kubernetes: + version: v1.22.12 + clusterName: cluster.local + network: + plugin: calico + kubePodsCIDR: 10.233.64.0/18 + kubeServiceCIDR: 10.233.0.0/18 + ## multus support. https://github.com/k8snetworkplumbingwg/multus-cni + multusCNI: + enabled: false + registry: + # To use KubeKey to deploy Harbor, set the value of this parameter to harbor. If you do not set this parameter and still use KubeKey to create an container image registry, the docker registry is used by default. + type: harbor + # If Harbor or other registries deployed by using KubeKey requires login, you can set the auths parameter of the registry. However, if you create a docker registry using KubeKey, you do not need to set the auths parameter. + # Note: If you use KubeKey to deploy Harbor, do not set this parameter until Harbor is started. + #auths: + # "dockerhub.kubekey.local": + # username: admin + # password: Harbor12345 + # Set the private registry to use during cluster deployment. + privateRegistry: "" + namespaceOverride: "" + registryMirrors: [] + insecureRegistries: [] + addons: [] + ``` + + +4. Run the following command to install an image registry: + + ```bash + ./kk init registry -f config-sample.yaml -a kubesphere.tar.gz + ``` + {{< notice note >}} + + The parameters in the command are explained as follows: + + - **config-sample.yaml**: Specifies the configuration file of the cluster in the air-gapped environment. + + - **kubesphere.tar.gz**: Specifies the image package of the source cluster. + + {{}} + +5. Create a Harbor project. + + {{< notice note >}} + + As Harbor adopts the Role-based Access Control (RBAC) mechanism, which means that only specified users can perform certain operations. Therefore, you must create a project before pushing images to Harbor. Harbor supports two types of projects: + + - **Public**: All users can pull images from the project. + + - **Private**: Only project members can pull images from the project. + + The username and password for logging in to Harbor is **admin** and **Harbor12345** by default. The installation file of Harbor is located in **/opt/harbor**, where you can perform O&M of Harbor. + + + + {{}} + + Method 1: Run the following commands to create a Harbor project. + + a. Run the following command to download the specified script to initialize the Harbor registry: + + ```bash + curl -O https://raw.githubusercontent.com/kubesphere/ks-installer/master/scripts/create_project_harbor.sh + ``` + + b. Run the following command to modify the script configuration file: + + ```bash + vim create_project_harbor.sh + ``` + + ```yaml + #!/usr/bin/env bash + + # Copyright 2018 The KubeSphere Authors. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + url="https://dockerhub.kubekey.local" #Change the value of url to https://dockerhub.kubekey.local. + user="admin" + passwd="Harbor12345" + + harbor_projects=(library + kubesphereio + kubesphere + calico + coredns + openebs + csiplugin + minio + mirrorgooglecontainers + osixia + prom + thanosio + jimmidyson + grafana + elastic + istio + jaegertracing + jenkins + weaveworks + openpitrix + joosthofman + nginxdemos + fluent + kubeedge + ) + + for project in "${harbor_projects[@]}"; do + echo "creating $project" + curl -u "${user}:${passwd}" -X POST -H "Content-Type: application/json" "${url}/api/v2.0/projects" -d "{ \"project_name\": \"${project}\", \"public\": true}" -k #Add -k at the end of the curl command. + done + + ``` + + {{< notice note >}} + + - Change the value of **url** to **https://dockerhub.kubekey.local**. + + - The project name of the registry must be the same as that of the image list. + + - Add **-k** at the end of the **curl** command. + + {{}} + + c. Run the following commands to create a Harbor project: + + ```bash + chmod +x create_project_harbor.sh + ``` + + ```bash + ./create_project_harbor.sh + ``` + + Method 2: Log in to Harbor and create a project. Set the project to **Public**, so that any user can pull images from this project. For more information, please refer to [Create Projects]( https://goharbor.io/docs/1.10/working-with-projects/create-projects/). + + ![harbor-login](/images/docs/v3.3/appstore/built-in-apps/harbor-app/harbor-login.jpg) + +6. Run the following command again to modify the cluster configuration file: + + ```bash + vim config-sample.yaml + ``` + + {{< notice note >}} + + - In **auths**, add **dockerhub.kubekey.local** and the username and password. + - In **privateRegistry**, add **dockerhub.kubekey.local**. + + {{}} + + ```yaml + ... + registry: + type: harbor + auths: + "dockerhub.kubekey.local": + username: admin + password: Harbor12345 + privateRegistry: "dockerhub.kubekey.local" + namespaceOverride: "kubesphereio" + registryMirrors: [] + insecureRegistries: [] + addons: [] + ``` + {{< notice note >}} + + - In **auths**, enter **dockerhub.kubekey.local**, username (**admin**) and password (**Harbor12345**). + - In **privateRegistry**, enter **dockerhub.kubekey.local**. + - In **namespaceOverride**, enter **kubesphereio**. + + {{}} +7. Run the following command to install a KubeSphere cluster: + + ```bash + ./kk create cluster -f config-sample1.yaml -a kubesphere.tar.gz --with-packages + ``` + + The parameters are explained as follows: + + - **config-sample.yaml**: Specifies the configuration file for the cluster in the air-gapped environment. + - **kubesphere.tar.gz**: Specifies the tarball image from which the source cluster is packaged. + - **--with-packages**: This parameter is required if you want to install the ISO dependencies. + +8. Run the following command to view the cluster status: + + ```bash + $ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + After the installation is completed, the following information is displayed: + + ```bash + ************************************************** + ##################################################### + ### Welcome to KubeSphere! ### + ##################################################### + + Console: http://192.168.0.3:30880 + Account: admin + Password: P@88w0rd + + NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 1. Please change the default password after login. + + ##################################################### + https://kubesphere.io 2022-02-28 23:30:06 + ##################################################### + ``` + +9. Access KubeSphere's web console at `http://{IP}:30880` using the default account and password `admin/P@88w0rd`. + + ![login](/images/docs/v3.3/installing-on-kubernetes/introduction/overview/login.png) + + {{< notice note >}} + + To access the console, make sure that port 30880 is enabled in your security group. + + {{}} \ No newline at end of file diff --git a/content/en/docs/v3.4/installing-on-linux/introduction/intro.md b/content/en/docs/v3.4/installing-on-linux/introduction/intro.md new file mode 100644 index 000000000..c34eebcb8 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-linux/introduction/intro.md @@ -0,0 +1,70 @@ +--- +title: "Installing on Linux — Overview" +keywords: 'Kubernetes, KubeSphere, Linux, Installation' +description: 'Explore the general content in this chapter, including installation preparation, installation tool and method, and storage configurations.' +linkTitle: "Overview" +weight: 3110 +--- + +As an open-source project on [GitHub](https://github.com/kubesphere), KubeSphere is home to a community with thousands of users. Many of them are running KubeSphere for their production workloads. For the installation on Linux, KubeSphere can be deployed both in clouds and in on-premises environments, such as AWS EC2, Azure VM and bare metal. + +The installation process is easy and friendly as KubeSphere provides users with [KubeKey](https://github.com/kubesphere/kubekey), a lightweight installer that supports the installation of Kubernetes, KubeSphere and related add-ons. KubeKey not only helps users to create clusters online but also serves as an air-gapped installation solution. + +Here is a list of available installation options. + +- [All-in-one installation](../../../quick-start/all-in-one-on-linux/): Install KubeSphere on a single node. It is only for users to quickly get familiar with KubeSphere. +- [Multi-node installation](../multioverview/): Install KubeSphere on multiple nodes. It is for testing or development. +- [Air-gapped installation on Linux](../air-gapped-installation/): All images of KubeSphere have been encapsulated into a package. It is convenient for air-gapped installation on Linux machines. +- [High availability installation](../../../installing-on-linux/high-availability-configurations/ha-configuration/): Install a highly-available KubeSphere cluster with multiple nodes which is used for production. +- Minimal Packages: Only install the minimum required system components of KubeSphere. Here is the minimum resource requirement: + - 2 CPUs + - 4 GB RAM + - 40 GB Storage +- [Full Packages](../../../pluggable-components/): Install all available system components of KubeSphere such as DevOps, service mesh, and alerting. + +{{< notice note >}} + +Not all options are mutually exclusive. For instance, you can deploy KubeSphere with the minimal package on multiple nodes in an air-gapped environment. + +{{}} + +If you have an existing Kubernetes cluster, see [Overview of Installing on Kubernetes](../../../installing-on-kubernetes/introduction/overview/). + +## Before Installation + +- As images will be pulled from the Internet, your environment must have Internet access. Otherwise, you need to [install KubeSphere in an air-gapped environment](../air-gapped-installation/). +- For all-in-one installation, the only one node is both the control plane and the worker. +- For multi-node installation, you need to provide host information in a configuration file. +- See [Port Requirements](../port-firewall/) before installation. + +## KubeKey + +[KubeKey](https://github.com/kubesphere/kubekey) provides an efficient approach to the installation and configuration of your cluster. You can use it to create, scale, and upgrade your Kubernetes cluster. It also allows you to install cloud-native add-ons (YAML or Chart) as you set up your cluster. For more information, see [KubeKey](../kubekey). + +## Quick Installation for Development and Testing + +KubeSphere has decoupled some components since v2.1.0. KubeKey only installs necessary components by default as this way features fast installation and minimal resource consumption. If you want to enable enhanced pluggable functionalities, see [Enable Pluggable Components](../../../pluggable-components/) for details. + +The quick installation of KubeSphere is only for development or testing since it uses [Local Volume](https://kubernetes.io/docs/concepts/storage/volumes/#local) based on [openEBS](https://openebs.io/) to provide storage services by default. If you want a production installation, see [High Availability Configurations](../../../installing-on-linux/high-availability-configurations/ha-configuration/). + +## Storage Configurations + +KubeSphere allows you to configure persistent storage services both before and after installation. Meanwhile, KubeSphere supports a variety of open-source storage solutions (for example, Ceph and GlusterFS) as well as commercial storage products. Refer to [Persistent Storage Configurations](../../../installing-on-linux/persistent-storage-configurations/understand-persistent-storage/) for detailed instructions regarding how to configure the storage class before you install KubeSphere. + +For more information about how to set different storage classes for your workloads after you install KubeSphere, see [Storage Classes](../../../cluster-administration/storageclass/). + +## Cluster Operation and Maintenance + +### Add new nodes + +With KubeKey, you can increase the number of nodes to meet higher resource needs after the installation, especially in production. For more information, see [Add New Nodes](../../../installing-on-linux/cluster-operation/add-new-nodes/). + +### Remove nodes + +You need to drain a node before you remove it. For more information, see [Remove Nodes](../../../installing-on-linux/cluster-operation/remove-nodes/). + +## Uninstalling + +Uninstalling KubeSphere means it will be removed from your machine, which is irreversible. Please be cautious with the operation. + +For more information, see [Uninstall KubeSphere and Kubernetes](../../../installing-on-linux/uninstall-kubesphere-and-kubernetes/). diff --git a/content/en/docs/v3.4/installing-on-linux/introduction/kubekey.md b/content/en/docs/v3.4/installing-on-linux/introduction/kubekey.md new file mode 100644 index 000000000..7f721eaae --- /dev/null +++ b/content/en/docs/v3.4/installing-on-linux/introduction/kubekey.md @@ -0,0 +1,89 @@ +--- +title: "KubeKey" +keywords: 'KubeKey, Installation, KubeSphere' +description: 'Understand what KubeKey is and how it works to help you create, scale and upgrade your Kubernetes cluster.' +linkTitle: "KubeKey" +weight: 3120 +--- + +Developed in Go, [KubeKey](https://github.com/kubesphere/kubekey) represents a brand-new installation tool as a replacement for the ansible-based installer used before. KubeKey provides you with flexible installation choices, as you can install Kubernetes only or install both Kubernetes and KubeSphere. + +There are several scenarios to use KubeKey: + +- Install Kubernetes only; +- Install Kubernetes and KubeSphere together in one command; +- Scale a cluster; +- Upgrade a cluster; +- Install Kubernetes-related add-ons (Chart or YAML). + +## How Does KubeKey Work + +After you download KubeKey, you use an executable called `kk` to perform different operations. No matter you use it to create, scale or upgrade a cluster, you must prepare a configuration file using `kk` beforehand. This configuration file contains basic parameters of your cluster, such as host information, network configurations (CNI plugin and Pod and Service CIDR), registry mirrors, add-ons (YAML or Chart) and pluggable component options (if you install KubeSphere). For more information, see [an example configuration file](https://github.com/kubesphere/kubekey/blob/release-2.2/docs/config-example.md). + +With the configuration file in place, you execute the `./kk` command with varied flags for different operations. After that, KubeKey automatically installs Docker and pulls all the necessary images for installation. When the installation is complete, you can inspect installation logs. + +## Why KubeKey + +- The previous ansible-based installer has a bunch of software dependencies such as Python. KubeKey is developed in Go language to get rid of the problem in a variety of environments, making sure the installation is successful. +- KubeKey supports multiple installation options, such as [all-in-one installation](../../../quick-start/all-in-one-on-linux/), [multi-node installation](../multioverview/), and [air-gapped installation](../air-gapped-installation/). +- KubeKey uses Kubeadm to install Kubernetes clusters on nodes in parallel as much as possible in order to reduce installation complexity and improve efficiency. It greatly saves installation time compared to the older installer. +- KubeKey aims to install clusters as an object, i.e., CaaO. + +## Download KubeKey + +{{< tabs >}} + +{{< tab "Good network connections to GitHub/Googleapis" >}} + +Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or use the following command directly. + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "Poor network connections to GitHub/Googleapis" >}} + +Run the following command first to make sure you download KubeKey from the correct zone. + +```bash +export KKZONE=cn +``` + +Run the following command to download KubeKey: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +After you download KubeKey, if you transfer it to a new machine also with poor network connections to Googleapis, you must run `export KKZONE=cn` again before you proceed with the steps below. + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +The commands above download the latest release of KubeKey. You can change the version number in the command to download a specific version. + +{{}} + +## Support Matrix + +If you want to use KubeKey to install both Kubernetes and KubeSphere 3.3, see the following table of all supported Kubernetes versions. + +| KubeSphere version | Supported Kubernetes versions | +| ------------------ | ------------------------------------------------------------ | +| v3.3.2 | v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, and * v1.24.x | + +{{< notice note >}} + +- You can also run `./kk version --show-supported-k8s` to see all supported Kubernetes versions that can be installed by KubeKey. +- The Kubernetes versions that can be installed using KubeKey are different from the Kubernetes versions supported by KubeSphere 3.3. If you want to [install KubeSphere 3.3 on an existing Kubernetes cluster](../../../installing-on-kubernetes/introduction/overview/), your Kubernetes version must be v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, and * v1.24.x. For Kubernetes versions with an asterisk, some features of edge nodes may be unavailable due to incompatability. Therefore, if you want to use edge nodes, you are advised to install Kubernetes v1.21.x + +{{}} \ No newline at end of file diff --git a/content/en/docs/v3.4/installing-on-linux/introduction/multioverview.md b/content/en/docs/v3.4/installing-on-linux/introduction/multioverview.md new file mode 100644 index 000000000..75780825e --- /dev/null +++ b/content/en/docs/v3.4/installing-on-linux/introduction/multioverview.md @@ -0,0 +1,364 @@ +--- +title: "Install a Multi-node Kubernetes and KubeSphere Cluster" +keywords: 'Multi-node, Installation, KubeSphere' +description: 'Learn the general steps of installing KubeSphere and Kubernetes on a multi-node cluster.' +linkTitle: "Multi-node Installation" +weight: 3130 +--- + +In a production environment, a single-node cluster cannot satisfy most of the needs as the cluster has limited resources with insufficient compute capabilities. Thus, single-node clusters are not recommended for large-scale data processing. Besides, a cluster of this kind is not available with high availability as it only has one node. On the other hand, a multi-node architecture is the most common and preferred choice in terms of application deployment and distribution. + +This section gives you an overview of a single-master multi-node installation, including the concept, [KubeKey](https://github.com/kubesphere/kubekey/) and steps. For information about HA installation, refer to [High Availability Configurations](../../../installing-on-linux/high-availability-configurations/ha-configuration/), [Installing on Public Cloud](../../public-cloud/install-kubesphere-on-azure-vms/) and [Installing in On-premises Environment](../../on-premises/install-kubesphere-on-bare-metal/). + +## Video Demonstration + +{{< youtube nYOYk3VTSgo >}} + +## Concept + +A multi-node cluster is composed of at least one control plane and one worker node. You can use any node as the **taskbox** to carry out the installation task. You can add additional nodes based on your needs (for example, for high availability) both before and after the installation. + +- **Control plane node**. The control plane generally hosts the control plane and controls and manages the whole system. + +- **Worker node**. Worker nodes run the actual applications deployed on them. + +## Step 1: Prepare Linux Hosts + +Please see the requirements for hardware and operating system shown below. To get started with multi-node installation in this tutorial, you need to prepare at least three hosts according to the following requirements. It is possible to install the [KubeSphere Container Platform](https://kubesphere.io/) on two nodes if they have sufficient resources. + +### System requirements + +| Systems | Minimum Requirements (Each node) | +| ------------------------------------------------------ | ------------------------------------------- | +| **Ubuntu** *16.04, 18.04, 20.04* | CPU: 2 Cores, Memory: 4 G, Disk Space: 40 G | +| **Debian** *Buster, Stretch* | CPU: 2 Cores, Memory: 4 G, Disk Space: 40 G | +| **CentOS** *7*.x | CPU: 2 Cores, Memory: 4 G, Disk Space: 40 G | +| **Red Hat Enterprise Linux** *7* | CPU: 2 Cores, Memory: 4 G, Disk Space: 40 G | +| **SUSE Linux Enterprise Server** *15* **/openSUSE Leap** *15.2* | CPU: 2 Cores, Memory: 4 G, Disk Space: 40 G | + +{{< notice note >}} + +- The path `/var/lib/docker` is mainly used to store the container data, and will gradually increase in size during use and operation. In the case of a production environment, it is recommended that `/var/lib/docker` should mount a drive separately. + +- Only x86_64 CPUs are supported, and Arm CPUs are not fully supported at present. + +{{}} + +### Node requirements + +- All nodes must be accessible through `SSH`. +- Time synchronization for all nodes. +- `sudo`/`curl`/`openssl`/`tar` should be used in all nodes. + +### Container runtimes + +Your cluster must have an available container runtime. If you use KubeKey to set up a cluster, KubeKey will install the latest version of Docker by default. Alternatively, you can install Docker or other container runtimes by yourself before you create a cluster. + +| Supported Container Runtime | Version | +| --------------------------- | ------- | +| Docker | 19.3.8+ | +| containerd | Latest | +| CRI-O (experimental, not fully tested) | Latest | +| iSula (experimental, not fully tested) | Latest | + +{{< notice note >}} + +A container runtime must be installed in advance if you want to deploy KubeSphere in an offline environment. + +{{}} + +### Dependency requirements + +KubeKey can install Kubernetes and KubeSphere together. The dependency that needs to be installed may be different based on the Kubernetes version to be installed. You can refer to the list below to see if you need to install relevant dependencies on your node in advance. + +| Dependency | Kubernetes Version ≥ 1.18 | Kubernetes Version < 1.18 | +| ----------- | ------------------------- | ------------------------- | +| `socat` | Required | Optional but recommended | +| `conntrack` | Required | Optional but recommended | +| `ebtables` | Optional but recommended | Optional but recommended | +| `ipset` | Optional but recommended | Optional but recommended | + +### Network and DNS requirements + +- Make sure the DNS address in `/etc/resolv.conf` is available. Otherwise, it may cause some issues of DNS in clusters. +- If your network configuration uses firewall rules or security groups, you must ensure infrastructure components can communicate with each other through specific ports. It's recommended that you turn off the firewall or follow the guide [Port Requirements](../port-firewall/). +- Supported CNI plugins: Calico and Flannel. Others (such as Cilium and Kube-OVN) may also work but note that they have not been fully tested. + +{{< notice tip >}} + +- It's recommended that your OS be clean (without any other software installed). Otherwise, there may be conflicts. +- A registry mirror (booster) is recommended to be prepared if you have trouble downloading images from `dockerhub.io`. See [Configure a Booster for Installation](../../../faq/installation/configure-booster/) and [Configure registry mirrors for the Docker daemon](https://docs.docker.com/registry/recipes/mirror/#configure-the-docker-daemon). + +{{}} + +This example includes three hosts as below with the control plane serving as the taskbox. + +| Host IP | Host Name | Role | +| ----------- | --------- | ------------ | +| 192.168.0.2 | control plane | control plane, etcd | +| 192.168.0.3 | node1 | worker | +| 192.168.0.4 | node2 | worker | + +## Step 2: Download KubeKey + +Follow the step below to download [KubeKey](../kubekey). + +{{< tabs >}} + +{{< tab "Good network connections to GitHub/Googleapis" >}} + +Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or use the following command directly. + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "Poor network connections to GitHub/Googleapis" >}} + +Run the following command first to make sure you download KubeKey from the correct zone. + +```bash +export KKZONE=cn +``` + +Run the following command to download KubeKey: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +After you download KubeKey, if you transfer it to a new machine also with poor network connections to Googleapis, you must run `export KKZONE=cn` again before you proceed with the steps below. + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +The commands above download the latest release of KubeKey. You can change the version number in the command to download a specific version. + +{{}} + +Make `kk` executable: + +```bash +chmod +x kk +``` + +## Step 3: Create a Kubernetes Multi-node Cluster + +For multi-node installation, you need to create a cluster by specifying a configuration file. + +### 1. Create an example configuration file + +Command: + +```bash +./kk create config [--with-kubernetes version] [--with-kubesphere version] [(-f | --file) path] +``` + +{{< notice note >}} + +- Recommended Kubernetes versions for KubeSphere 3.3: v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, and * v1.24.x. For Kubernetes versions with an asterisk, some features of edge nodes may be unavailable due to incompatability. Therefore, if you want to use edge nodes, you are advised to install Kubernetes v1.21.x. If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.23.10 by default. For more information about supported Kubernetes versions, see [Support Matrix](../kubekey/#support-matrix). + +- If you do not add the flag `--with-kubesphere` in the command in this step, KubeSphere will not be deployed unless you install it using the `addons` field in the configuration file or add this flag again when you use `./kk create cluster` later. +- If you add the flag `--with-kubesphere` without specifying a KubeSphere version, the latest version of KubeSphere will be installed. + +{{}} + +Here are some examples for your reference: + +- You can create an example configuration file with default configurations. You can also specify the file with a different filename, or in a different folder. + + ```bash + ./kk create config [-f ~/myfolder/abc.yaml] + ``` + +- You can specify a KubeSphere version that you want to install (for example, `--with-kubesphere v3.3.2`). + + ```bash + ./kk create config --with-kubesphere [version] + ``` + +### 2. Edit the configuration file of a Kubernetes multi-node cluster + +A default file `config-sample.yaml` will be created if you do not change the name. Edit the file and here is an example of the configuration file of a multi-node cluster with the control plane. + +{{< notice note >}} + +To customize Kubernetes related parameters, refer to [Kubernetes Cluster Configurations](../vars/). + +{{}} + +```yaml +spec: + hosts: + - {name: master, address: 192.168.0.2, internalAddress: 192.168.0.2, user: ubuntu, password: Testing123} + - {name: node1, address: 192.168.0.3, internalAddress: 192.168.0.3, user: ubuntu, password: Testing123} + - {name: node2, address: 192.168.0.4, internalAddress: 192.168.0.4, user: ubuntu, password: Testing123} + roleGroups: + etcd: + - master + control-plane: + - master + worker: + - node1 + - node2 + controlPlaneEndpoint: + domain: lb.kubesphere.local + address: "" + port: 6443 +``` + +#### Hosts + +List all your machines under `hosts` and add their detailed information as above. + +`name`: The hostname of the instance. + +`address`: The IP address you use for the connection between the taskbox and other instances through SSH. This can be either the public IP address or the private IP address depending on your environment. For example, some cloud platforms provide every instance with a public IP address which you use to access instances through SSH. In this case, you can provide the public IP address for this field. + +`internalAddress`: The private IP address of the instance. + +At the same time, you must provide the login information used to connect to each instance. Here are some examples: + +- For password login: + + ```yaml + hosts: + - {name: master, address: 192.168.0.2, internalAddress: 192.168.0.2, port: 8022, user: ubuntu, password: Testing123} + ``` + + {{< notice note >}} + + In this tutorial, port `22` is the default port of SSH so you do not need to add it in the YAML file. Otherwise, you need to add the port number after the IP address as above. + + {{}} + +- For the default root user: + + ```yaml + hosts: + - {name: master, address: 192.168.0.2, internalAddress: 192.168.0.2, password: Testing123} + ``` + +- For passwordless login with SSH keys: + + ```yaml + hosts: + - {name: master, address: 192.168.0.2, internalAddress: 192.168.0.2, privateKeyPath: "~/.ssh/id_rsa"} + ``` + +{{< notice tip >}} + +- Before you install KubeSphere, you can use the information provided under `hosts` (for example, IP addresses and passwords) to test the network connection between the taskbox and other instances using SSH. +- Make sure port `6443` is not being used by other services before the installation. Otherwise, it may cause conflicts as the default port of the API server is `6443`. + +{{}} + +#### roleGroups + +- `etcd`: etcd node names +- `control-plane`: Control plane node names +- `worker`: Worker node names + +#### controlPlaneEndpoint (for HA installation only) + +The `controlPlaneEndpoint` is where you provide your external load balancer information for an HA cluster. You need to prepare and configure the external load balancer if and only if you need to install multiple control plane nodes. Please note that the address and port should be indented by two spaces in `config-sample.yaml`, and `address` should be your load balancer's IP address. See [HA Configurations](../../../installing-on-linux/high-availability-configurations/ha-configuration/) for details. + +#### addons + +You can customize persistent storage plugins (for example, NFS Client, Ceph RBD, and GlusterFS) by specifying storage under the field `addons` in `config-sample.yaml`. For more information, see [Persistent Storage Configurations](../../../installing-on-linux/persistent-storage-configurations/understand-persistent-storage/). + +KubeKey will install [OpenEBS](https://openebs.io/) to provision [LocalPV](https://kubernetes.io/docs/concepts/storage/volumes/#local) for development and testing environment by default, which is convenient for new users. In this example of multi-node installation, the default storage class (local volume) is used. For production, you can use Ceph/GlusterFS/CSI or commercial products as persistent storage solutions. + +{{< notice tip >}} + +- You can enable the multi-cluster feature by editing the configuration file. For more information, see [Multi-cluster Management](../../../multicluster-management/). +- You can also select the components you want to install. For more information, see [Enable Pluggable Components](../../../pluggable-components/). For an example of a complete `config-sample.yaml` file, see [this file](https://github.com/kubesphere/kubekey/blob/release-2.2/docs/config-example.md). + +{{}} + +When you finish editing, save the file. + +### 3. Create a cluster using the configuration file + +```bash +./kk create cluster -f config-sample.yaml +``` + +{{< notice note >}} + +You need to change `config-sample.yaml` above to your own file if you use a different name. + +{{}} + +The whole installation process may take 10-20 minutes, depending on your machine and network. + +### 4. Verify the installation + +When the installation finishes, you can see the content as follows: + +```bash +##################################################### +### Welcome to KubeSphere! ### +##################################################### + +Console: http://192.168.0.2:30880 +Account: admin +Password: P@88w0rd + +NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. + +##################################################### +https://kubesphere.io 20xx-xx-xx xx:xx:xx +##################################################### +``` + +Now, you will be able to access the web console of KubeSphere at `:30880` with the default account and password (`admin/P@88w0rd`). + +{{< notice note >}} + +To access the console, you may need to configure port forwarding rules depending on your environment. Please also make sure port `30880` is opened in your security group. + +{{}} + +![login](/images/docs/v3.3/installing-on-linux/introduction/multi-node-installation/login.png) + +## Enable kubectl Autocompletion + +KubeKey doesn't enable kubectl autocompletion. See the content below and turn it on: + +{{< notice note >}} + +Make sure bash-autocompletion is installed and works. + +{{}} + +```bash +# Install bash-completion +apt-get install bash-completion + +# Source the completion script in your ~/.bashrc file +echo 'source <(kubectl completion bash)' >>~/.bashrc + +# Add the completion script to the /etc/bash_completion.d directory +kubectl completion bash >/etc/bash_completion.d/kubectl +``` + +Detailed information can be found [here](https://kubernetes.io/docs/tasks/tools/install-kubectl/#enabling-shell-autocompletion). + +## Code Demonstration + diff --git a/content/en/docs/v3.4/installing-on-linux/introduction/port-firewall.md b/content/en/docs/v3.4/installing-on-linux/introduction/port-firewall.md new file mode 100644 index 000000000..c6928f823 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-linux/introduction/port-firewall.md @@ -0,0 +1,33 @@ +--- +title: "Port Requirements" +keywords: 'Kubernetes, KubeSphere, port-requirements, firewall-rules' +description: 'Understand the specific port requirements for different services in KubeSphere.' + +linkTitle: "Port Requirements" +weight: 3150 +--- + + +KubeSphere requires certain ports for the communications among services. If your network is configured with firewall rules, you need to ensure infrastructure components can communicate with each other through specific ports that act as communication endpoints for certain processes or services. + +|Service|Protocol|Action|Start Port|End Port|Notes +|---|---|---|---|---|---| +|ssh|TCP|allow|22| +|etcd|TCP|allow|2379|2380| +|apiserver|TCP|allow|6443| +|calico|TCP|allow|9099|9100| +|bgp|TCP|allow|179|| +|nodeport|TCP|allow|30000|32767| +|master|TCP|allow|10250|10258| +|dns|TCP|allow|53| +|dns|UDP|allow|53| +|local-registry|TCP|allow|5000||For the offline environment| +|local-apt|TCP|allow|5080||For the offline environment| +|rpcbind|TCP|allow|111|| Required if NFS is used| +|ipip| IPENCAP / IPIP|allow| | |Calico needs to allow the ipip protocol | +|metrics-server| TCP|allow| 8443 | + + +{{< notice note >}} +When you use the Calico network plugin and run your cluster in a classic network on cloud, you need to enable both IPENCAP and IPIP protocol for the source IP. +{{}} \ No newline at end of file diff --git a/content/en/docs/v3.4/installing-on-linux/introduction/vars.md b/content/en/docs/v3.4/installing-on-linux/introduction/vars.md new file mode 100644 index 000000000..3c1b5ae43 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-linux/introduction/vars.md @@ -0,0 +1,126 @@ +--- +title: "Kubernetes Cluster Configurations" +keywords: 'Kubernetes, cluster, configuration, KubeKey' +description: 'Customize your Kubernetes settings in the configuration file for your cluster.' +linkTitle: "Kubernetes Cluster Configurations" +weight: 3160 +--- + +When creating a Kubernetes cluster, you can use [KubeKey](../kubekey/) to define a configuration file (`config-sample.yaml`) which contains basic information of your cluster. Refer to the following example for Kubernetes-related parameters in the configuration file. + +```yaml + kubernetes: + version: v1.22.12 + imageRepo: kubesphere + clusterName: cluster.local + masqueradeAll: false + maxPods: 110 + nodeCidrMaskSize: 24 + proxyMode: ipvs + network: + plugin: calico + calico: + ipipMode: Always + vxlanMode: Never + vethMTU: 1440 + kubePodsCIDR: 10.233.64.0/18 + kubeServiceCIDR: 10.233.0.0/18 + registry: + registryMirrors: [] + insecureRegistries: [] + privateRegistry: "" + addons: [] +``` + +The below table describes the above parameters in detail. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterDescription
kubernetes
versionThe Kubernetes version to be installed. If you do not specify a Kubernetes version, {{< contentLink "docs/installing-on-linux/introduction/kubekey" "KubeKey" >}} v3.0.7 will install Kubernetes v1.23.10 by default. For more information, see {{< contentLink "docs/installing-on-linux/introduction/kubekey/#support-matrix" "Support Matrix" >}}.
imageRepoThe Docker Hub repository where images will be downloaded.
clusterNameThe Kubernetes cluster name.
masqueradeAll*masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode. It defaults to false.
maxPods*The maximum number of Pods that can run on this Kubelet. It defaults to 110.
nodeCidrMaskSize*The mask size for node CIDR in your cluster. It defaults to 24.
proxyMode*The proxy mode to use. It defaults to ipvs.
network
pluginThe CNI plugin to use. KubeKey installs Calico by default while you can also specify Flannel. Note that some features can only be used when Calico is adopted as the CNI plugin, such as Pod IP Pools.
calico.ipipMode*The IPIP Mode to use for the IPv4 POOL created at startup. If it is set to a value other than Never, vxlanMode should be set to Never. Allowed values are Always, CrossSubnet and Never. It defaults to Always.
calico.vxlanMode*The VXLAN Mode to use for the IPv4 POOL created at startup. If it is set to a value other than Never, ipipMode should be set to Never. Allowed values are Always, CrossSubnet and Never. It defaults to Never.
calico.vethMTU*The maximum transmission unit (MTU) setting determines the largest packet size that can be transmitted through your network. It defaults to 1440.
kubePodsCIDRA valid CIDR block for your Kubernetes Pod subnet. It should not overlap with your node subnet and your Kubernetes Services subnet.
kubeServiceCIDRA valid CIDR block for your Kubernetes Services. It should not overlap with your node subnet and your Kubernetes Pod subnet.
registry
registryMirrorsConfigure a Docker registry mirror to speed up downloads. For more information, see {{< contentLink "https://docs.docker.com/registry/recipes/mirror/#configure-the-docker-daemon" "Configure the Docker daemon" >}}.
insecureRegistriesSet an address of insecure image registry. For more information, see {{< contentLink "https://docs.docker.com/registry/insecure/" "Test an insecure registry" >}}.
privateRegistry*Configure a private image registry for air-gapped installation (for example, a Docker local registry or Harbor). For more information, see {{< contentLink "docs/v3.3/installing-on-linux/introduction/air-gapped-installation/" "Air-gapped Installation on Linux" >}}.
+ + +{{< notice note >}} + +- \* By default, KubeKey does not define these parameters in the configuration file while you can manually add them and customize their values. +- `addons` is used to install cloud-native add-ons (YAML or Chart). For more information, see [this file](https://github.com/kubesphere/kubekey/blob/release-2.2/docs/addons.md). +- This page only lists part of the parameters in the configuration file created by KubeKey. For more information about other parameters, see [this example file](https://github.com/kubesphere/kubekey/blob/release-2.2/docs/config-example.md). + +{{}} diff --git a/content/en/docs/v3.4/installing-on-linux/on-premises/_index.md b/content/en/docs/v3.4/installing-on-linux/on-premises/_index.md new file mode 100644 index 000000000..41d96d7f1 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-linux/on-premises/_index.md @@ -0,0 +1,9 @@ +--- +linkTitle: "Installing in On-premises Environments" +weight: 3500 + +_build: + render: false +--- + +In this chapter, we will demonstrate how to use KubeKey or Kubeadm to provision a new Kubernetes and KubeSphere cluster on some on on-premises environments, such as VMware vSphere, OpenStack, Bare Metal, etc. You just need prepare the machines with supported operating system before you start installation. The air-gapped installation guide is also included in this chapter. diff --git a/content/en/docs/v3.4/installing-on-linux/on-premises/install-kubesphere-and-k3s.md b/content/en/docs/v3.4/installing-on-linux/on-premises/install-kubesphere-and-k3s.md new file mode 100644 index 000000000..6d392ec00 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-linux/on-premises/install-kubesphere-and-k3s.md @@ -0,0 +1,181 @@ +--- +title: "Deploy K3s and KubeSphere" +keywords: 'Kubernetes, KubeSphere, K3s' +description: 'Learn how to use KubeKey to install K3s and KubeSphere.' +linkTitle: "Deploy K3s and KubeSphere" +weight: 3530 +--- + +[K3s](https://k3s.io/) is a lightweight Kubernetes distribution built for IoT and edge computing with external dependencies minimized. It is packaged as a single binary that reduces the dependencies and steps that are required to set up a Kubernetes cluster. + +You can use KubeKey to install both K3s and KubeSphere while KubeSphere can also be deployed on an existing K3s cluster. + +{{< notice note >}} + +Currently, KubeSphere on K3s is only for testing and development as some features have not been fully tested. + +{{}} + +## Prerequisites + +- For information about the prerequisites for K3s installation, see [the K3s documentation](https://rancher.com/docs/k3s/latest/en/installation/installation-requirements/). +- You may need to create necessary firewall rules or port forwarding rules depending on your environment. For more information, see [Port Requirements](../../../installing-on-linux/introduction/port-firewall/). + +## Step 1: Download KubeKey + +Follow the step below to download [KubeKey](../../../installing-on-linux/introduction/kubekey/). + +{{< tabs >}} + +{{< tab "Good network connections to GitHub/Googleapis" >}} + +Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or use the following command directly. + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "Poor network connections to GitHub/Googleapis" >}} + +Run the following command first to make sure you download KubeKey from the correct zone. + +```bash +export KKZONE=cn +``` + +Run the following command to download KubeKey: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +After you download KubeKey, if you transfer it to a new machine also with poor network connections to Googleapis, you must run `export KKZONE=cn` again before you proceed with the steps below. + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +The commands above download the latest release of KubeKey. Note that an earlier version of KubeKey cannot be used to install K3s. + +{{}} + +Make `kk` executable: + +```bash +chmod +x kk +``` + +## Step 2: Create a Cluster + +1. Create a configuration file of your cluster by running the following command: + + ```bash + ./kk create config --with-kubernetes v1.21.4-k3s --with-kubesphere v3.3.2 + ``` + + {{< notice note >}} + + KubeKey supports the installation of K3s v1.21.4. + + {{}} + +2. A default file `config-sample.yaml` will be created if you do not customize the name. Edit the file. + + ```bash + vi config-sample.yaml + ``` + + ```yaml + ... + metadata: + name: sample + spec: + hosts: + - {name: master, address: 192.168.0.2, internalAddress: 192.168.0.2, user: ubuntu, password: Testing123} + - {name: node1, address: 192.168.0.3, internalAddress: 192.168.0.3, user: ubuntu, password: Testing123} + - {name: node2, address: 192.168.0.4, internalAddress: 192.168.0.4, user: ubuntu, password: Testing123} + roleGroups: + etcd: + - master + control-plane: + - master + worker: + - node1 + - node2 + controlPlaneEndpoint: + domain: lb.kubesphere.local + address: "" + port: 6443 + kubernetes: + version: v1.21.4-k3s + imageRepo: kubesphere + clusterName: cluster.local + network: + plugin: calico + kubePodsCIDR: 10.233.64.0/18 + kubeServiceCIDR: 10.233.0.0/18 + registry: + registryMirrors: [] + insecureRegistries: [] + addons: [] + ... + ``` + + {{< notice note >}} + + For more information about each field in the configuration file, see [an example file](https://github.com/kubesphere/kubekey/blob/release-2.2/docs/config-example.md). + + {{}} + +3. Save the file and execute the following command to install K3s and KubeSphere: + + ``` + ./kk create cluster -f config-sample.yaml + ``` + +4. When the installation finishes, you can inspect installation logs with the following command: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + Expected output: + + ```bash + ##################################################### + ### Welcome to KubeSphere! ### + ##################################################### + + Console: http://192.168.0.2:30880 + Account: admin + Password: P@88w0rd + + NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. + + ##################################################### + https://kubesphere.io 20xx-xx-xx xx:xx:xx + ##################################################### + ``` + + +5. Access the KubeSphere console at `:30880` with the default account and password (`admin/P@88W0rd`). + +{{< notice note >}} + +You can enable pluggable components of KubeSphere after the installation while some features may not be compatible as KubeSphere on K3s is only experimental currently. + +{{}} \ No newline at end of file diff --git a/content/en/docs/v3.4/installing-on-linux/on-premises/install-kubesphere-on-bare-metal.md b/content/en/docs/v3.4/installing-on-linux/on-premises/install-kubesphere-on-bare-metal.md new file mode 100644 index 000000000..355d0751c --- /dev/null +++ b/content/en/docs/v3.4/installing-on-linux/on-premises/install-kubesphere-on-bare-metal.md @@ -0,0 +1,397 @@ +--- +title: "Deploy Kubernetes and Kubesphere on Bare Metal" +keywords: 'Kubernetes, KubeSphere, bare-metal' +description: 'Learn how to create a multi-node cluster with one master on bare metal.' +linkTitle: "Deploy KubeSphere on Bare Metal" +weight: 3520 +--- + +## Introduction + +In addition to the deployment on cloud, KubeSphere can also be installed on bare metal. As the virtualization layer is removed, the infrastructure overhead is drastically reduced, which brings more compute and storage resources to app deployments. As a result, hardware efficiency is improved. Refer to the example below to deploy KubeSphere on bare metal. + +## Prerequisites + +- Make sure you already know how to install KubeSphere on a multi-node cluster based on the tutorial [Multi-node Installation](../../../installing-on-linux/introduction/multioverview/). +- Server and network redundancy in your environment. +- For a production environment, it is recommended that you prepare persistent storage and create a StorageClass in advance. For development and testing, you can use the integrated OpenEBS to provision LocalPV as the storage service directly. + +## Prepare Linux Hosts + +This tutorial uses 3 physical machines of **DELL 620 Intel (R) Xeon (R) CPU E5-2640 v2 @ 2.00GHz (32G memory)**, on which **CentOS Linux release 7.6.1810 (Core)** will be installed for the minimal deployment of KubeSphere. + +### Install CentOS + +Download and install the [image](https://www.centos.org/download/) first, and CentOS Linux release 7.6.1810 (Core) is recommended. Make sure you allocate at least 200 GB to the root directory where it stores docker images (you can skip this if you are installing KubeSphere for testing). + +For more information about the supported systems, see [System Requirements](../../../installing-on-linux/introduction/multioverview/). + +Here is a list of the three hosts for your reference. + + +| Host IP | Host Name | Role | +| --- | --- | --- | +|192.168.60.152|master1|master1, etcd| +|192.168.60.153|worker1|worker| +|192.168.60.154|worker2|worker| + +### NIC settings + +1. Clear NIC configurations. + + ```bash + ifdown em1 + ``` + + ```bash + ifdown em2 + ``` + + ```bash + rm -rf /etc/sysconfig/network-scripts/ifcfg-em1 + ``` + + ```bash + rm -rf /etc/sysconfig/network-scripts/ifcfg-em2 + ``` + +2. Create the NIC bonding. + + ```bash + nmcli con add type bond con-name bond0 ifname bond0 mode 802.3ad ip4 192.168.60.152/24 gw4 192.168.60.254 + ``` + +3. Set the bonding mode. + + ```bash + nmcli con mod id bond0 bond.options mode=802.3ad,miimon=100,lacp_rate=fast,xmit_hash_policy=layer2+3 + ``` + +4. Bind the physical NIC. + + ```bash + nmcli con add type bond-slave ifname em1 con-name em1 master bond0 + ``` + + ```bash + nmcli con add type bond-slave ifname em2 con-name em2 master bond0 + ``` + +5. Change the NIC mode. + + ```bash + vi /etc/sysconfig/network-scripts/ifcfg-bond0 + BOOTPROTO=static + ``` + +6. Restart Network Manager. + + ```bash + systemctl restart NetworkManager + ``` + + ```bash + nmcli con # Display NIC information + ``` + +7. Change the host name and DNS. + + ```bash + hostnamectl set-hostname worker-1 + ``` + + ```bash + vim /etc/resolv.conf + ``` + +### Time settings + +1. Synchronize time. + + ```bash + yum install -y chrony + ``` + + ```bash + systemctl enable chronyd + ``` + + ```bash + systemctl start chronyd + ``` + + ```bash + timedatectl set-ntp true + ``` + +2. Set the time zone. + + ```bash + timedatectl set-timezone Asia/Shanghai + ``` + +3. Check if the ntp-server is available. + + ```bash + chronyc activity -v + ``` + +### Firewall settings + +Execute the following commands to stop and disable the FirewallD service: + +```bash +iptables -F +``` + +```bash +systemctl status firewalld +``` + +```bash +systemctl stop firewalld +``` + +```bash +systemctl disable firewalld +``` + +### Package updates and dependencies + +Execute the following commands to update system packages and install dependencies. + +```bash +yum update +``` + +```bash +yum install openssl openssl-devel +``` + +```bash +yum install socat +``` + +```bash +yum install epel-release +``` + +```bash +yum install conntrack-tools +``` + +{{< notice note >}} + +You may not need to install all the dependencies depending on the Kubernetes version to be installed. For more information, see [Dependency Requirements](../../../installing-on-linux/introduction/multioverview/). + +{{}} + +## Download KubeKey + +[Kubekey](https://github.com/kubesphere/kubekey) is the next-gen installer which provides an easy, fast and flexible way to install Kubernetes and KubeSphere. + +Follow the step below to download KubeKey. + +{{< tabs >}} + +{{< tab "Good network connections to GitHub/Googleapis" >}} + +Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or use the following command directly. + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "Poor network connections to GitHub/Googleapis" >}} + +Run the following command first to make sure you download KubeKey from the correct zone. + +```bash +export KKZONE=cn +``` + +Run the following command to download KubeKey: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +After you download KubeKey, if you transfer it to a new machine also with poor network connections to Googleapis, you must run `export KKZONE=cn` again before you proceed with the steps below. + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +The commands above download the latest release of KubeKey. You can change the version number in the command to download a specific version. + +{{}} + +Make `kk` executable: + +```bash +chmod +x kk +``` + +## Create a Multi-node Cluster + +With KubeKey, you can install Kubernetes and KubeSphere together. You have the option to create a multi-node cluster by customizing parameters in the configuration file. + +Create a Kubernetes cluster with KubeSphere installed (for example, `--with-kubesphere v3.3.2`): + +```bash +./kk create config --with-kubernetes v1.22.12 --with-kubesphere v3.3.2 +``` + +{{< notice note >}} + +- Recommended Kubernetes versions for KubeSphere 3.3: v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, and * v1.24.x. For Kubernetes versions with an asterisk, some features of edge nodes may be unavailable due to incompatability. Therefore, if you want to use edge nodes, you are advised to install Kubernetes v1.21.x. If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.23.10 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../../installing-on-linux/introduction/kubekey/#support-matrix). + +- If you do not add the flag `--with-kubesphere` in the command above, KubeSphere will not be deployed unless you install it using the `addons` field in the configuration file or add this flag again when you use `./kk create cluster` later. +- If you add the flag `--with-kubesphere` without specifying a KubeSphere version, the latest version of KubeSphere will be installed. + +{{}} + +A default file `config-sample.yaml` will be created. Modify it according to your environment. + +```bash +vi config-sample.yaml +``` + +```yaml +apiVersion: kubekey.kubesphere.io/v1alpha1 +kind: Cluster +metadata: + name: config-sample +spec: + hosts: + - {name: master1, address: 192.168.60.152, internalAddress: 192.168.60.152, user: root, password: P@ssw0rd} + - {name: worker1, address: 192.168.60.153, internalAddress: 192.168.60.153, user: root, password: P@ssw0rd} + - {name: worker2, address: 192.168.60.154, internalAddress: 192.168.60.154, user: root, password: P@ssw0rd} + roleGroups: + etcd: + - master1 + control-plane: + - master1 + worker: + - worker1 + - worker2 + controlPlaneEndpoint: + domain: lb.kubesphere.local + address: "" + port: 6443 +``` +Create a cluster using the configuration file you customized above: + +```bash +./kk create cluster -f config-sample.yaml +``` + +#### Verify the installation + +After the installation finishes, you can inspect the logs of installation by executing the command below: + +```bash +kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f +``` + +If you can see the welcome log return, it means the installation is successful. + +```bash +************************************************** +##################################################### +### Welcome to KubeSphere! ### +##################################################### +Console: http://192.168.60.152:30880 +Account: admin +Password: P@88w0rd +NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. +##################################################### +https://kubesphere.io 20xx-xx-xx xx:xx:xx +##################################################### +``` + +#### Log in to the console + +You will be able to use default account and password `admin/P@88w0rd` to log in to the console `http://{$IP}:30880` to take a tour of KubeSphere. Please change the default password after login. + +#### Enable pluggable components (Optional) +The example above demonstrates the process of a default minimal installation. To enable other components in KubeSphere, see [Enable Pluggable Components](../../../pluggable-components/) for more details. + +## System Improvements + +- Update your system. + + ```bash + yum update + ``` + +- Add the required options to the kernel boot arguments: + + ```bash + sudo /sbin/grubby --update-kernel=ALL --args='cgroup_enable=memory cgroup.memory=nokmem swapaccount=1' + ``` + +- Enable the `overlay2` kernel module. + + ```bash + echo "overlay2" | sudo tee -a /etc/modules-load.d/overlay.conf + ``` + +- Refresh the dynamically generated grub2 configuration. + + ```bash + sudo grub2-set-default 0 + ``` + +- Adjust kernel parameters and make the change effective. + + ```bash + cat <}} +You do not need to create a virtual machine for `vip` (i.e. Virtual IP) above, so only 8 virtual machines need to be created. +{{}} + +You can follow the New Virtual Machine wizard to create a virtual machine to place in the VMware Host Client inventory. + +![create](/images/docs/v3.3/vsphere/kubesphereOnVsphere-en-0-1-create.png) + +1. In the first step **Select a creation type**, you can deploy a virtual machine from an OVF or OVA file, or register an existing virtual machine directly. + + ![kubesphereOnVsphere-en-0-1-1-create-type](/images/docs/v3.3/vsphere/kubesphereOnVsphere-en-0-1-1-create-type.png) + +2. When you create a new virtual machine, provide a unique name for the virtual machine to distinguish it from existing virtual machines on the host you are managing. + + ![kubesphereOnVsphere-en-0-1-2-name](/images/docs/v3.3/vsphere/kubesphereOnVsphere-en-0-1-2-name.png) + +3. Select a compute resource and storage (datastore) for the configuration and disk files. You can select the datastore that has the most suitable properties, such as size, speed, and availability, for your virtual machine storage. + + ![kubesphereOnVsphere-en-0-1-3-resource](/images/docs/v3.3/vsphere/kubesphereOnVsphere-en-0-1-3-resource.png) + + ![kubesphereOnVsphere-en-0-1-4-storage](/images/docs/v3.3/vsphere/kubesphereOnVsphere-en-0-1-4-storage.png) + + ![kubesphereOnVsphere-en-0-1-5-compatibility](/images/docs/v3.3/vsphere/kubesphereOnVsphere-en-0-1-5-compatibility.png) + +4. Select a guest operating system. The wizard will provide the appropriate defaults for the operating system installation. + + ![kubesphereOnVsphere-en-0-1-6-system](/images/docs/v3.3/vsphere/kubesphereOnVsphere-en-0-1-6-system.png) + +5. Before you finish deploying a new virtual machine, you have the option to set **Virtual Hardware** and **VM Options**. You can refer to the images below for part of the fields. + + ![kubesphereOnVsphere-en-0-1-7-hardware-1](/images/docs/v3.3/vsphere/kubesphereOnVsphere-en-0-1-7-hardware-1.png) + + ![kubesphereOnVsphere-en-0-1-7-hardware-2](/images/docs/v3.3/vsphere/kubesphereOnVsphere-en-0-1-7-hardware-2.png) + + ![kubesphereOnVsphere-en-0-1-7-hardware-3](/images/docs/v3.3/vsphere/kubesphereOnVsphere-en-0-1-7-hardware-3.png) + + ![kubesphereOnVsphere-en-0-1-7-hardware-4](/images/docs/v3.3/vsphere/kubesphereOnVsphere-en-0-1-7-hardware-4.png) + +6. In **Ready to complete** page, you review the configuration selections that you have made for the virtual machine. Click **Finish** at the bottom-right corner to continue. + + ![kubesphereOnVsphere-en-0-1-8](/images/docs/v3.3/vsphere/kubesphereOnVsphere-en-0-1-8.png) + +## Install a Load Balancer using Keepalived and HAProxy + +For a production environment, you have to prepare an external load balancer for your cluster with multiple control plane nodes. If you do not have a load balancer, you can install it using Keepalived and HAProxy. If you are provisioning a development or testing environment by installing a cluster with a control plane node, please skip this section. + +### Yum Install + +host lb-0 (`10.10.71.77`) and host lb-1 (`10.10.71.66`). + +```bash +yum install keepalived haproxy psmisc -y +``` + +### Configure HAProxy + +On the servers with IP `10.10.71.77` and `10.10.71.66`, configure HAProxy as follows. + +{{< notice note >}} + +The configuration of the two lb machines is the same. Please pay attention to the backend service address. + +{{}} + +```yaml +# HAProxy Configure /etc/haproxy/haproxy.cfg +global + log 127.0.0.1 local2 + chroot /var/lib/haproxy + pidfile /var/run/haproxy.pid + maxconn 4000 + user haproxy + group haproxy + daemon + # turn on stats unix socket + stats socket /var/lib/haproxy/stats +#--------------------------------------------------------------------- +# common defaults that all the 'listen' and 'backend' sections will +# use if not designated in their block +#--------------------------------------------------------------------- +defaults + log global + option httplog + option dontlognull + timeout connect 5000 + timeout client 5000 + timeout server 5000 +#--------------------------------------------------------------------- +# main frontend which proxys to the backends +#--------------------------------------------------------------------- +frontend kube-apiserver + bind *:6443 + mode tcp + option tcplog + default_backend kube-apiserver +#--------------------------------------------------------------------- +# round robin balancing between the various backends +#--------------------------------------------------------------------- +backend kube-apiserver + mode tcp + option tcplog + balance roundrobin + default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100 + server kube-apiserver-1 10.10.71.214:6443 check + server kube-apiserver-2 10.10.71.73:6443 check + server kube-apiserver-3 10.10.71.62:6443 check +``` + +Check grammar first before you start it. + +```bash +haproxy -f /etc/haproxy/haproxy.cfg -c +``` + +Restart HAProxy and execute the command below to enable HAProxy. + +```bash +systemctl restart haproxy && systemctl enable haproxy +``` + +Stop HAProxy. + +```bash +systemctl stop haproxy +``` + +### Configure Keepalived + +Main HAProxy 77 lb-0-10.10.71.77 (/etc/keepalived/keepalived.conf). + +```bash +global_defs { + notification_email { + } + smtp_connect_timeout 30 + router_id LVS_DEVEL01 + vrrp_skip_check_adv_addr + vrrp_garp_interval 0 + vrrp_gna_interval 0 +} +vrrp_script chk_haproxy { + script "killall -0 haproxy" + interval 2 + weight 20 +} +vrrp_instance haproxy-vip { + state MASTER + priority 100 + interface ens192 + virtual_router_id 60 + advert_int 1 + authentication { + auth_type PASS + auth_pass 1111 + } + unicast_src_ip 10.10.71.77 + unicast_peer { + 10.10.71.66 + } + virtual_ipaddress { + #vip + 10.10.71.67/24 + } + track_script { + chk_haproxy + } +} +``` + +Remark HAProxy 66 lb-1-10.10.71.66 (/etc/keepalived/keepalived.conf). + +```bash +global_defs { + notification_email { + } + router_id LVS_DEVEL02 + vrrp_skip_check_adv_addr + vrrp_garp_interval 0 + vrrp_gna_interval 0 +} +vrrp_script chk_haproxy { + script "killall -0 haproxy" + interval 2 + weight 20 +} +vrrp_instance haproxy-vip { + state BACKUP + priority 90 + interface ens192 + virtual_router_id 60 + advert_int 1 + authentication { + auth_type PASS + auth_pass 1111 + } + unicast_src_ip 10.10.71.66 + unicast_peer { + 10.10.71.77 + } + virtual_ipaddress { + 10.10.71.67/24 + } + track_script { + chk_haproxy + } +} +``` + +Start keepalived and enable keepalived. + +```bash +systemctl restart keepalived && systemctl enable keepalived +``` + +```bash +systemctl stop keepalived +``` + +```bash +systemctl start keepalived +``` + +### Verify Availability + +Use `ip a s` to view the vip binding status of each lb node: + +```bash +ip a s +``` + +Pause VIP node HAProxy through the following command: + +```bash +systemctl stop haproxy +``` + +Use `ip a s` again to check the vip binding of each lb node, and check whether vip drifts: + +```bash +ip a s +``` + +Alternatively, use the command below: + +```bash +systemctl status -l keepalived +``` + +## Download KubeKey + +[Kubekey](https://github.com/kubesphere/kubekey) is the brand-new installer which provides an easy, fast and flexible way to install Kubernetes and KubeSphere 3.3. + +Follow the step below to download KubeKey. + +{{< tabs >}} + +{{< tab "Good network connections to GitHub/Googleapis" >}} + +Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or use the following command directly. + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "Poor network connections to GitHub/Googleapis" >}} + +Run the following command first to make sure you download KubeKey from the correct zone. + +```bash +export KKZONE=cn +``` + +Run the following command to download KubeKey: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +After you download KubeKey, if you transfer it to a new machine also with poor network connections to Googleapis, you must run `export KKZONE=cn` again before you proceed with the steps below. + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +The commands above download the latest release of KubeKey. You can change the version number in the command to download a specific version. + +{{}} + +Make `kk` executable: + +```bash +chmod +x kk +``` + +## Create a High Availability Cluster + +With KubeKey, you can install Kubernetes and KubeSphere together. You have the option to create a multi-node cluster by customizing parameters in the configuration file. + +Create a Kubernetes cluster with KubeSphere installed (for example, `--with-kubesphere v3.3.2`): + +```bash +./kk create config --with-kubernetes v1.22.12 --with-kubesphere v3.3.2 +``` + +{{< notice note >}} + +- Recommended Kubernetes versions for KubeSphere 3.3: v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, and * v1.24.x. For Kubernetes versions with an asterisk, some features of edge nodes may be unavailable due to incompatability. Therefore, if you want to use edge nodes, you are advised to install Kubernetes v1.21.x. If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.23.10 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../../installing-on-linux/introduction/kubekey/#support-matrix). + +- If you do not add the flag `--with-kubesphere` in the command in this step, KubeSphere will not be deployed unless you install it using the `addons` field in the configuration file or add this flag again when you use `./kk create cluster` later. +- If you add the flag `--with-kubesphere` without specifying a KubeSphere version, the latest version of KubeSphere will be installed. + +{{}} + +A default file `config-sample.yaml` will be created. Modify it according to your environment. + +```bash +vi config-sample.yaml +``` + +```yaml +apiVersion: kubekey.kubesphere.io/v1alpha1 +kind: Cluster +metadata: + name: config-sample +spec: + hosts: + - {name: master1, address: 10.10.71.214, internalAddress: 10.10.71.214, password: P@ssw0rd!} + - {name: master2, address: 10.10.71.73, internalAddress: 10.10.71.73, password: P@ssw0rd!} + - {name: master3, address: 10.10.71.62, internalAddress: 10.10.71.62, password: P@ssw0rd!} + - {name: node1, address: 10.10.71.75, internalAddress: 10.10.71.75, password: P@ssw0rd!} + - {name: node2, address: 10.10.71.76, internalAddress: 10.10.71.76, password: P@ssw0rd!} + - {name: node3, address: 10.10.71.79, internalAddress: 10.10.71.79, password: P@ssw0rd!} + roleGroups: + etcd: + - master1 + - master2 + - master3 + control-plane: + - master1 + - master2 + - master3 + worker: + - node1 + - node2 + - node3 + controlPlaneEndpoint: + domain: lb.kubesphere.local + # vip + address: "10.10.71.67" + port: 6443 + kubernetes: + version: v1.22.12 + imageRepo: kubesphere + clusterName: cluster.local + masqueradeAll: false # masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode. [Default: false] + maxPods: 110 # maxPods is the number of pods that can run on this Kubelet. [Default: 110] + nodeCidrMaskSize: 24 # internal network node size allocation. This is the size allocated to each node on your network. [Default: 24] + proxyMode: ipvs # mode specifies which proxy mode to use. [Default: ipvs] + network: + plugin: calico + calico: + ipipMode: Always # IPIP Mode to use for the IPv4 POOL created at start up. If set to a value other than Never, vxlanMode should be set to "Never". [Always | CrossSubnet | Never] [Default: Always] + vxlanMode: Never # VXLAN Mode to use for the IPv4 POOL created at start up. If set to a value other than Never, ipipMode should be set to "Never". [Always | CrossSubnet | Never] [Default: Never] + vethMTU: 1440 # The maximum transmission unit (MTU) setting determines the largest packet size that can be transmitted through your network. [Default: 1440] + kubePodsCIDR: 10.233.64.0/18 + kubeServiceCIDR: 10.233.0.0/18 + registry: + registryMirrors: [] + insecureRegistries: [] + privateRegistry: "" + storage: + defaultStorageClass: localVolume + localVolume: + storageClassName: local + +--- +apiVersion: installer.kubesphere.io/v1alpha1 +kind: ClusterConfiguration +metadata: + name: ks-installer + namespace: kubesphere-system + labels: + version: v3.3.2 +spec: + local_registry: "" + persistence: + storageClass: "" + authentication: + jwtSecret: "" + etcd: + monitoring: true # Whether to install etcd monitoring dashboard + endpointIps: 192.168.0.7,192.168.0.8,192.168.0.9 # etcd cluster endpointIps + port: 2379 # etcd port + tlsEnable: true + common: + mysqlVolumeSize: 20Gi # MySQL PVC size + minioVolumeSize: 20Gi # Minio PVC size + etcdVolumeSize: 20Gi # etcd PVC size + openldapVolumeSize: 2Gi # openldap PVC size + redisVolumSize: 2Gi # Redis PVC size + es: # Storage backend for logging, tracing, events and auditing. + elasticsearchMasterReplicas: 1 # total number of master nodes, it's not allowed to use even number + elasticsearchDataReplicas: 1 # total number of data nodes + elasticsearchMasterVolumeSize: 4Gi # Volume size of Elasticsearch master nodes + elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes + logMaxAge: 7 # Log retention time in built-in Elasticsearch, it is 7 days by default. + elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log + # externalElasticsearchHost: + # externalElasticsearchPort: + console: + enableMultiLogin: false # enable/disable multiple sing on, it allows a user can be used by different users at the same time. + port: 30880 + alerting: # Whether to install KubeSphere alerting system. It enables Users to customize alerting policies to send messages to receivers in time with different time intervals and alerting levels to choose from. + enabled: false + auditing: # Whether to install KubeSphere audit log system. It provides a security-relevant chronological set of records,recording the sequence of activities happened in platform, initiated by different tenants. + enabled: false + devops: # Whether to install KubeSphere DevOps System. It provides out-of-box CI/CD system based on Jenkins, and automated workflow tools including Source-to-Image & Binary-to-Image + enabled: false + jenkinsMemoryLim: 2Gi # Jenkins memory limit + jenkinsMemoryReq: 1500Mi # Jenkins memory request + jenkinsVolumeSize: 8Gi # Jenkins volume size + jenkinsJavaOpts_Xms: 512m # The following three fields are JVM parameters + jenkinsJavaOpts_Xmx: 512m + jenkinsJavaOpts_MaxRAM: 2g + events: # Whether to install KubeSphere events system. It provides a graphical web console for Kubernetes Events exporting, filtering and alerting in multi-tenant Kubernetes clusters. + enabled: false + logging: # Whether to install KubeSphere logging system. Flexible logging functions are provided for log query, collection and management in a unified console. Additional log collectors can be added, such as Elasticsearch, Kafka and Fluentd. + enabled: false + logsidecarReplicas: 2 + metrics_server: # Whether to install metrics-server. IT enables HPA (Horizontal Pod Autoscaler). + enabled: true + monitoring: # + prometheusReplicas: 1 # Prometheus replicas are responsible for monitoring different segments of data source and provide high availability as well. + prometheusMemoryRequest: 400Mi # Prometheus request memory + prometheusVolumeSize: 20Gi # Prometheus PVC size + alertmanagerReplicas: 1 # AlertManager Replicas + multicluster: + clusterRole: none # host | member | none # You can install a solo cluster, or specify it as the role of host or member cluster + networkpolicy: # Network policies allow network isolation within the same cluster, which means firewalls can be set up between certain instances (Pods). + enabled: false + notification: # It supports notification management in multi-tenant Kubernetes clusters. It allows you to set AlertManager as its sender, and receivers include Email, Wechat Work, and Slack. + enabled: false + openpitrix: # Whether to install KubeSphere App Store. It provides an application store for Helm-based applications, and offer application lifecycle management + enabled: false + servicemesh: # (0.3 Core, 300 MiB) Provide fine-grained traffic management, observability and tracing, and visualized traffic topology + enabled: false +``` + +Create a cluster using the configuration file you customized above: + +```bash +./kk create cluster -f config-sample.yaml +``` + +## Verify the Multi-node Installation + +Inspect the logs of installation by executing the command below: + +```bash +kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f +``` + +If you can see the welcome log return, it means the installation is successful. Your cluster is up and running. + +```yaml +************************************************** +##################################################### +### Welcome to KubeSphere! ### +##################################################### +Console: http://10.10.71.214:30880 +Account: admin +Password: P@88w0rd +NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. +##################################################### +https://kubesphere.io 2020-08-15 23:32:12 +##################################################### +``` + +### Log in to the Console + +You will be able to use default account and password `admin/P@88w0rd` to log in to the console `http://{$IP}:30880` to take a tour of KubeSphere. Please change the default password after login. + +## Enable Pluggable Components (Optional) + +The example above demonstrates the process of a default minimal installation. To enable other components in KubeSphere, see [Enable Pluggable Components](../../../pluggable-components/) for more details. diff --git a/content/en/docs/v3.4/installing-on-linux/on-premises/installing-kubesphere-on-minikube.md b/content/en/docs/v3.4/installing-on-linux/on-premises/installing-kubesphere-on-minikube.md new file mode 100644 index 000000000..7c02e6564 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-linux/on-premises/installing-kubesphere-on-minikube.md @@ -0,0 +1,148 @@ +--- +title: "Installing KubeSphere on Minikube" +keywords: 'KubeSphere, Minikube, Minimal, Installation' +description: 'Install KubeSphere on an existing Minikube cluster with a minimal installation package.' +linkTitle: "Installing KubeSphere on Minikube" +weight: 2200, +showSubscribe: true +--- + +In addition to installing KubeSphere on a Linux machine, you can also deploy it on minikube cluster. This tutorial demonstrates the general steps of completing a minimal KubeSphere installation on Minikube. + +## Prerequisites + +- To install KubeSphere 3.2.1 on Minikube, your Minikube version must be v1.23.x, v1.24.x, v1.25.x. +- Make sure your machine meets the minimal hardware requirement: CPU > 2 Core, Memory > 2 GB, 20GB free disk space, Container or virtual machine manager, such as: Docker, Hyperkit, Hyper-V, KVM, Parallels, Podman, VirtualBox, or VMware Fusion/Workstation. +- A **default** Storage Class in your Minikube cluster needs to be configured before the installation. + +## Pre-checks + +1. Make sure your minikube version is compatible by running `minikube version` in your terminal. The output may look as below: + + ```bash + ❯ minikube version + minikube version: v1.24.0 + commit: 76b94fb3c4e8ac5062daf70d60cf03ddcc0a741b + + ``` + +2. Check if the available resources in your cluster meet the minimum requirements. + + ```bash + ❯ free -g + total used free shared buff/cache available + Mem: 6 2 2 0 1 3 + Swap: 0 0 0 + ## Memory > 2GB + ❯ lscpu + Architecture: x86_64 + CPU op-mode(s): 32-bit, 64-bit + Byte Order: Little Endian + Address sizes: 40 bits physical, 48 bits virtual + CPU(s): 4 + ## More than 2 CPUs + ❯ df -h + Filesystem Size Used Avail Use% Mounted on + udev 3.4G 0 3.4G 0% /dev + tmpfs 694M 2.6M 692M 1% /run + /dev/sda3 198G 116G 73G 62% / ## Available more than 20GB free disk space + + + ``` + +3. Make sure your Kubectl version is compatible by running `kubectl version` in your minikube cluster node. The output may look as below: + + ```bash + ❯ kubectl version + Client Version: version.Info{Major:"1", Minor:"23", GitVersion:"v1.23.1", GitCommit:"86ec240af8cbd1b60bcc4c03c20da9b98005b92e", GitTreeState:"clean", BuildDate:"2021-12-16T11:41:01Z", GoVersion:"go1.17.5", Compiler:"gc", Platform:"linux/amd64"} + Server Version: version.Info{Major:"1", Minor:"22", GitVersion:"v1.22.3", GitCommit:"c92036820499fedefec0f847e2054d824aea6cd1", GitTreeState:"clean", BuildDate:"2021-10-27T18:35:25Z", GoVersion:"go1.16.9", Compiler:"gc", Platform:"linux/amd64"} + + ``` + + {{< notice note >}} +Pay attention to the `Server Version` line. If `GitVersion` shows an older one, you need to upgrade Kubectl first. + {{}} +4. Check if there is a **default** StorageClass in your cluster. An existing default StorageClass is a prerequisite for KubeSphere installation. + + ```bash + $ kubectl get sc + NAME PROVISIONER AGE + glusterfs (default) kubernetes.io/glusterfs 3d4h + ``` + +If your Minikube cluster environment meets all the requirements above, then you are ready to deploy KubeSphere on your Minikube. + +{{< notice note >}} + +- The CSR signing feature is activated in `kube-apiserver` when it is started with the `--cluster-signing-cert-file` and `--cluster-signing-key-file` parameters. See [RKE installation issue](https://github.com/kubesphere/kubesphere/issues/1925#issuecomment-591698309). + +{{}} + +## Video Demonstration + +{{< youtube gROOqfupRII>}} + +## Deploy KubeSphere + +After you make sure your machine meets the conditions, perform the following steps to install KubeSphere. + +1. Start minikube. + + ``` bash + ❯ minikube start + 😄 minikube v1.24.0 on Debian 10.1 + 🎉 minikube 1.25.2 is available! Download it: https://github.com/kubernetes/minikube/releases/tag/v1.25.2 + 💡 To disable this notice, run: 'minikube config set WantUpdateNotification false' + + ✨ Using the docker driver based on existing profile + 👍 Starting control plane node minikube in cluster minikube + 🚜 Pulling base image ... + 🔄 Restarting existing docker container for "minikube" ... + 🐳 Preparing Kubernetes v1.22.3 on Docker 20.10.8 ... + 🔎 Verifying Kubernetes components... + ▪ Using image gcr.io/k8s-minikube/storage-provisioner:v5 + 🌟 Enabled addons: storage-provisioner, default-storageclass + 🏄 Done! kubectl is now configured to use "minikube" cluster and "default" namespace by default + ``` + +2. Run the following commands to start installation: + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml + + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml + ``` + +3. After KubeSphere is successfully installed, you can run the following command to view the installation logs: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + +4. Use `kubectl get pod --all-namespaces` to see whether all Pods are running normally in relevant namespaces of KubeSphere. If they are, check the port (`30880` by default) of the console by running the following command: + + ```bash + kubectl get svc/ks-console -n kubesphere-system + ``` + +5. Make sure port `30880` is opened in your security group and access the web console through the NodePort (`IP:30880`) with the default account and password (`admin/P@88w0rd`). + +6. After logging in to the console, you can check the status of different components in **System Components**. You may need to wait for some components to be up and running if you want to use related services. + +## Enable Pluggable Components (Optional) + +This guide is used only for the minimal installation by default. For more information about how to enable other components in KubeSphere, see [Enable Pluggable Components](../../../pluggable-components/). + +## Code Demonstration + + + +## Uninstall KubeSphere from Minikube + +You can uninstall KubeSphere from your existing Minikube cluster by using [kubesphere-delete.sh](https://github.com/kubesphere/ks-installer/blob/release-3.1/scripts/kubesphere-delete.sh). Copy it from the [GitHub source file](https://raw.githubusercontent.com/kubesphere/ks-installer/release-3.1/scripts/kubesphere-delete.sh) and execute this script on your local machine. + +{{< notice warning >}} + +Uninstalling will remove KubeSphere from your Minikube cluster. This operation is irreversible and does not have any backup. Please be cautious with this operation. + +{{}} diff --git a/content/en/docs/v3.4/installing-on-linux/persistent-storage-configurations/_index.md b/content/en/docs/v3.4/installing-on-linux/persistent-storage-configurations/_index.md new file mode 100644 index 000000000..5324b6146 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-linux/persistent-storage-configurations/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Persistent Storage Configurations" +weight: 3300 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-ceph-csi-rbd.md b/content/en/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-ceph-csi-rbd.md new file mode 100644 index 000000000..4bc325712 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-ceph-csi-rbd.md @@ -0,0 +1,128 @@ +--- +title: "Install Ceph" +keywords: 'KubeSphere, Kubernetes, Ceph, installation, configurations, storage' +description: 'Use KubeKey to create a cluster with Ceph providing storage services.' +linkTitle: "Install Ceph" +weight: 3350 +--- + +With a Ceph server, you can choose [Ceph RBD](https://kubernetes.io/docs/concepts/storage/storage-classes/#ceph-rbd) or [Ceph CSI](https://github.com/ceph/ceph-csi) as the underlying storage plugin. Ceph RBD is an in-tree storage plugin on Kubernetes, and Ceph CSI is a Container Storage Interface (CSI) driver for RBD, CephFS. + +### Which plugin to select for Ceph + +Ceph CSI RBD is the preferred choice if you work with **14.0.0 (Nautilus)+** Ceph cluster. Here are some reasons: + +- The in-tree plugin will be deprecated in the future. +- Ceph RBD only works on Kubernetes with **hyperkube** images, and **hyperkube** images were + [deprecated since Kubernetes 1.17](https://github.com/kubernetes/kubernetes/pull/85094). +- Ceph CSI has more features such as cloning, expanding and snapshots. + +### Ceph CSI RBD + +Ceph-CSI needs to be installed on v1.14.0+ Kubernetes, and work with 14.0.0 (Nautilus)+ Ceph Cluster. +For details about compatibility, see [Ceph CSI Support Matrix](https://github.com/ceph/ceph-csi#support-matrix). + +The following is an example of KubeKey add-on configurations for Ceph CSI RBD installed by **Helm Charts**. +As the StorageClass is not included in the chart, a StorageClass needs to be configured in the add-on config. + +#### Chart configurations + +```yaml +csiConfig: + - clusterID: "cluster1" + monitors: + - "192.168.0.8:6789" # <--TobeReplaced--> + - "192.168.0.9:6789" # <--TobeReplaced--> + - "192.168.0.10:6789" # <--TobeReplaced--> +``` + +If you want to configure more values, see [chart configuration for ceph-csi-rbd](https://github.com/ceph/ceph-csi/tree/master/charts/ceph-csi-rbd). + +#### StorageClass (including secret) + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: csi-rbd-secret + namespace: kube-system +stringData: + userID: admin + userKey: "AQDoECFfYD3DGBAAm6CPhFS8TQ0Hn0aslTlovw==" # <--ToBeReplaced--> +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: csi-rbd-sc + annotations: + storageclass.beta.kubernetes.io/is-default-class: "true" + storageclass.kubesphere.io/supported-access-modes: '["ReadWriteOnce","ReadOnlyMany","ReadWriteMany"]' +provisioner: rbd.csi.ceph.com +parameters: + clusterID: "cluster1" + pool: "rbd" # <--ToBeReplaced--> + imageFeatures: layering + csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret + csi.storage.k8s.io/provisioner-secret-namespace: kube-system + csi.storage.k8s.io/controller-expand-secret-name: csi-rbd-secret + csi.storage.k8s.io/controller-expand-secret-namespace: kube-system + csi.storage.k8s.io/node-stage-secret-name: csi-rbd-secret + csi.storage.k8s.io/node-stage-secret-namespace: kube-system + csi.storage.k8s.io/fstype: ext4 +reclaimPolicy: Delete +allowVolumeExpansion: true +mountOptions: + - discard +``` + +#### Add-on configurations + +Save the above chart config and StorageClass locally (for example, `/root/ceph-csi-rbd.yaml` and `/root/ceph-csi-rbd-sc.yaml`). The add-on configuration can be set like: + +```yaml +addons: +- name: ceph-csi-rbd + namespace: kube-system + sources: + chart: + name: ceph-csi-rbd + repo: https://ceph.github.io/csi-charts + valuesFile: /root/ceph-csi-rbd.yaml +- name: ceph-csi-rbd-sc + sources: + yaml: + path: + - /root/ceph-csi-rbd-sc.yaml +``` + +### Ceph RBD + +KubeKey will never use **hyperkube** images. Hence, in-tree Ceph RBD may not work on Kubernetes installed by KubeKey. However, if your Ceph cluster is lower than 14.0.0 which means Ceph CSI can't be used, [rbd provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/ceph/rbd) can be used as a substitute for Ceph RBD. Its format is the same with [in-tree Ceph RBD](https://kubernetes.io/docs/concepts/storage/storage-classes/#ceph-rbd). +The following is an example of KubeKey add-on configurations for rbd provisioner installed by **Helm Charts including a StorageClass**. + +#### Chart configurations + +```yaml +ceph: + mon: "192.168.0.12:6789" # <--ToBeReplaced--> + adminKey: "QVFBS1JkdGRvV0lySUJBQW5LaVpSKzBRY2tjWmd6UzRJdndmQ2c9PQ==" # <--ToBeReplaced--> + userKey: "QVFBS1JkdGRvV0lySUJBQW5LaVpSKzBRY2tjWmd6UzRJdndmQ2c9PQ==" # <--ToBeReplaced--> +sc: + isDefault: false +``` + +If you want to configure more values, see [chart configuration for rbd-provisioner](https://github.com/kubesphere/helm-charts/tree/master/src/test/rbd-provisioner#configuration). + +#### Add-on configurations + +Save the above chart config locally (for example, `/root/rbd-provisioner.yaml`). The add-on config for rbd provisioner could be like: + +```yaml +- name: rbd-provisioner + namespace: kube-system + sources: + chart: + name: rbd-provisioner + repo: https://charts.kubesphere.io/test + valuesFile: /root/rbd-provisioner.yaml +``` diff --git a/content/en/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-glusterfs.md b/content/en/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-glusterfs.md new file mode 100644 index 000000000..1c2302bb7 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-glusterfs.md @@ -0,0 +1,297 @@ +--- +title: "Install GlusterFS" +keywords: 'KubeSphere, Kubernetes, GlusterFS, installation, configurations, storage' +description: 'Use KubeKey to create a cluster with GlusterFS providing storage services.' +linkTitle: "Install GlusterFS" +weight: 3340 +--- + +[GlusterFS](https://kubernetes.io/docs/concepts/storage/storage-classes/#glusterfs) is an in-tree storage plugin in Kubernetes. Hence, you only need to install the storage class. + +This tutorial demonstrates how to use KubeKey to set up a KubeSphere cluster and configure GlusterFS to provide storage services. + +{{< notice note >}} + +Ubuntu 16.04 is used as an example in this tutorial. + +{{}} + +## Prerequisites + +You have set up your GlusterFS cluster and configured Heketi. For more information, see [Set up a GlusterFS Server](../../../reference/storage-system-installation/glusterfs-server/). + +## Step 1: Configure the Client Machine + +You need to install the GlusterFS client package on all your client machines. + +1. Install `software-properties-common`. + + ```bash + apt-get install software-properties-common + ``` + +2. Add the community GlusterFS PPA. + + ```bash + add-apt-repository ppa:gluster/glusterfs-7 + ``` + +3. Make sure you are using the latest package. + + ```bash + apt-get update + ``` + +4. Install the GlusterFS client. + + ```bash + apt-get install glusterfs-server -y + ``` + +5. Verify your GlusterFS version. + + ```bash + glusterfs -V + ``` + +## Step 2: Create a Configuration File for GlusterFS + +The separate configuration file contains all parameters of GlusterFS storage which will be used by KubeKey during installation. + +1. Go to one of the nodes (taskbox) where you want to download KubeKey later and run the following command to create a configuration file. + + ``` + vi glusterfs-sc.yaml + ``` + + An example configuration file (include a Heketi Secret): + + ```yaml + apiVersion: v1 + kind: Secret + metadata: + name: heketi-secret + namespace: kube-system + type: kubernetes.io/glusterfs + data: + key: "MTIzNDU2" # Replace it with your own key. Base64 coding. + --- + apiVersion: storage.k8s.io/v1 + kind: StorageClass + metadata: + annotations: + storageclass.beta.kubernetes.io/is-default-class: "true" + storageclass.kubesphere.io/supported-access-modes: '["ReadWriteOnce","ReadOnlyMany","ReadWriteMany"]' + name: glusterfs + parameters: + clusterid: "21240a91145aee4d801661689383dcd1" # Replace it with your own GlusterFS cluster ID. + gidMax: "50000" + gidMin: "40000" + restauthenabled: "true" + resturl: "http://192.168.0.2:8080" # The Gluster REST service/Heketi service url which provision gluster volumes on demand. Replace it with your own. + restuser: admin + secretName: heketi-secret + secretNamespace: kube-system + volumetype: "replicate:3" # Replace it with your own volume type. + provisioner: kubernetes.io/glusterfs + reclaimPolicy: Delete + volumeBindingMode: Immediate + allowVolumeExpansion: true + ``` + + {{< notice note >}} + + - Use the field `storageclass.beta.kubernetes.io/is-default-class` to set `glusterfs` as your default storage class. If it is `false`, KubeKey will install OpenEBS as the default storage class. + - For more information about parameters in the storage class manifest, see [the Kubernetes documentation](https://kubernetes.io/docs/concepts/storage/storage-classes/#glusterfs). + + {{}} + +2. Save the file. + +## Step 3: Download KubeKey + +Follow the steps below to download [KubeKey](../../../installing-on-linux/introduction/kubekey/) on the taskbox. + +{{< tabs >}} + +{{< tab "Good network connections to GitHub/Googleapis" >}} + +Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or use the following command directly. + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "Poor network connections to GitHub/Googleapis" >}} + +Run the following command first to make sure you download KubeKey from the correct zone. + +```bash +export KKZONE=cn +``` + +Run the following command to download KubeKey: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +After you download KubeKey, if you transfer it to a new machine also with poor network connections to Googleapis, you must run `export KKZONE=cn` again before you proceed with the steps below. + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +The commands above download the latest release of KubeKey. You can change the version number in the command to download a specific version. + +{{}} + +Make `kk` executable: + +```bash +chmod +x kk +``` + +## Step 4: Create a Cluster + +1. Specify a Kubernetes version and a KubeSphere version that you want to install. For example: + + ```bash + ./kk create config --with-kubernetes v1.22.12 --with-kubesphere v3.3.2 + ``` + + {{< notice note >}} + + - Recommended Kubernetes versions for KubeSphere 3.3: v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, and * v1.24.x. For Kubernetes versions with an asterisk, some features of edge nodes may be unavailable due to incompatability. Therefore, if you want to use edge nodes, you are advised to install Kubernetes v1.21.x. If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.23.10 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../../installing-on-linux/introduction/kubekey/#support-matrix). + + - If you do not add the flag `--with-kubesphere` in the command in this step, KubeSphere will not be deployed unless you install it using the `addons` field in the configuration file or add this flag again when you use `./kk create cluster` later. + - If you add the flag `--with-kubesphere` without specifying a KubeSphere version, the latest version of KubeSphere will be installed. + + {{}} + +2. A default file `config-sample.yaml` will be created if you do not customize the name. Edit the file. + + ```bash + vi config-sample.yaml + ``` + + ```yaml + ... + metadata: + name: sample + spec: + hosts: + - {name: client1, address: 192.168.0.5, internalAddress: 192.168.0.5, user: ubuntu, password: Testing123} + - {name: client2, address: 192.168.0.6, internalAddress: 192.168.0.6, user: ubuntu, password: Testing123} + - {name: client3, address: 192.168.0.7, internalAddress: 192.168.0.7, user: ubuntu, password: Testing123} + roleGroups: + etcd: + - client1 + control-plane: + - client1 + worker: + - client2 + - client3 + controlPlaneEndpoint: + domain: lb.kubesphere.local + address: "" + port: 6443 + kubernetes: + version: v1.22.12 + imageRepo: kubesphere + clusterName: cluster.local + network: + plugin: calico + kubePodsCIDR: 10.233.64.0/18 + kubeServiceCIDR: 10.233.0.0/18 + registry: + registryMirrors: [] + insecureRegistries: [] + addons: + - name: glusterfs + namespace: kube-system + sources: + yaml: + path: + - /root/glusterfs-sc.yaml + ... + ``` + +3. Pay special attention to the field of `addons`, under which you must provide the information of the storage class to be created as well as the Heketi Secret. For more information about each parameter in this file, see [Multi-node Installation](../../../installing-on-linux/introduction/multioverview/#2-edit-the-configuration-file). + +4. Save the file and execute the following command to install Kubernetes and KubeSphere: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +5. When the installation finishes, you can inspect installation logs with the following command: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + Expected output: + + ```bash + ##################################################### + ### Welcome to KubeSphere! ### + ##################################################### + + Console: http://192.168.0.4:30880 + Account: admin + Password: P@88w0rd + + NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. + + ##################################################### + https://kubesphere.io 20xx-xx-xx xx:xx:xx + ##################################################### + ``` + +## Step 5: Verify Installation + +You can verify that GlusterFS has been successfully installed either from the command line or from the KubeSphere web console. + +### Command line + +Run the following command to check your storage class. + +```bash +kubectl get sc +``` + +Expected output: + +```bash +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +glusterfs (default) kubernetes.io/glusterfs Delete Immediate true 104m +``` + +### KubeSphere console + +1. Log in to the web console with the default account and password (`admin/P@88w0rd`) at `:30880`. Click **Platform** in the upper-left corner and select **Cluster Management**. + +3. Go to **Volumes** under **Storage**, and you can see PVCs in use. + + {{< notice note >}} + + For more information about how to create volumes on the KubeSphere console, see [Volumes](../../../project-user-guide/storage/volumes/). + + {{}} + +4. On the **Storage Classes** page, you can see the storage class available in your cluster. \ No newline at end of file diff --git a/content/en/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-nfs-client.md b/content/en/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-nfs-client.md new file mode 100644 index 000000000..e610656a6 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-nfs-client.md @@ -0,0 +1,270 @@ +--- +title: "Install NFS Client" +keywords: 'KubeSphere, Kubernetes, storage, installation, configurations, NFS' +description: 'Use KubeKey to create a cluster with NFS Client providing storage services.' +linkTitle: "Install NFS Client" +weight: 3330 +--- + +This tutorial demonstrates how to set up a KubeSphere cluster and configure NFS storage. + +{{< notice note >}} + +- Ubuntu 16.04 is used as an example in this tutorial. +- NFS is incompatible with some applications, for example, Prometheus, which may result in pod creation failures. If you need to use NFS in the production environment, ensure that you have understood the risks. For more information, contact support@kubesphere.cloud. + +{{}} + +## Prerequisites + +You must have an NFS server ready providing external storage services. Make sure you have created and exported a directory on the NFS server which your permitted client machines can access. For more information, see [Set up an NFS Server](../../../reference/storage-system-installation/nfs-server/). + +## Step 1: Configure the Client Machine + +Install `nfs-common` on all of the clients. It provides necessary NFS functions while you do not need to install any server components. + +1. Execute the following command to make sure you are using the latest package. + + ```bash + sudo apt-get update + ``` + +2. Install `nfs-common` on all the clients. + + ```bash + sudo apt-get install nfs-common + ``` + +3. Go to one of the client machines (taskbox) where you want to download KubeKey later. Create a configuration file that contains all the necessary parameters of your NFS server which will be referenced by KubeKey during installation. + + ```bash + vi nfs-client.yaml + ``` + + An example configuration file: + + ```yaml + nfs: + server: "192.168.0.2" # This is the server IP address. Replace it with your own. + path: "/mnt/demo" # Replace the exported directory with your own. + storageClass: + defaultClass: false + ``` + + {{< notice note >}} + + - If you want to configure more values, see [chart configurations for NFS-client](https://github.com/kubesphere/helm-charts/tree/master/src/main/nfs-client-provisioner#configuration). + - The `storageClass.defaultClass` field controls whether you want to set the storage class of NFS-client Provisioner as the default one. If you enter `false` for it, KubeKey will install [OpenEBS](https://github.com/openebs/openebs) to provide local volumes, while they are not provisioned dynamically as you create workloads on your cluster. After you install KubeSphere, you can change the default storage class on the console directly. + + {{}} + +4. Save the file. + +## Step 2: Download KubeKey + +Follow the steps below to download [KubeKey](../../../installing-on-linux/introduction/kubekey/) on the taskbox. + +{{< tabs >}} + +{{< tab "Good network connections to GitHub/Googleapis" >}} + +Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or use the following command directly. + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "Poor network connections to GitHub/Googleapis" >}} + +Run the following command first to make sure you download KubeKey from the correct zone. + +```bash +export KKZONE=cn +``` + +Run the following command to download KubeKey: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +After you download KubeKey, if you transfer it to a new machine also with poor network connections to Googleapis, you must run `export KKZONE=cn` again before you proceed with the steps below. + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +The commands above download the latest release of KubeKey. You can change the version number in the command to download a specific version. + +{{}} + +Make `kk` executable: + +```bash +chmod +x kk +``` + +## Step 3: Create a Cluster + +1. Specify a Kubernetes version and a KubeSphere version that you want to install. For example: + + ```bash + ./kk create config --with-kubernetes v1.22.12 --with-kubesphere v3.3.2 + ``` + + {{< notice note >}} + + - Recommended Kubernetes versions for KubeSphere 3.3: v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, and * v1.24.x. For Kubernetes versions with an asterisk, some features of edge nodes may be unavailable due to incompatability. Therefore, if you want to use edge nodes, you are advised to install Kubernetes v1.21.x. If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.23.10 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../../installing-on-linux/introduction/kubekey/#support-matrix). + + - If you do not add the flag `--with-kubesphere` in the command in this step, KubeSphere will not be deployed unless you install it using the `addons` field in the configuration file or add this flag again when you use `./kk create cluster` later. + - If you add the flag `--with-kubesphere` without specifying a KubeSphere version, the latest version of KubeSphere will be installed. + + {{}} + +2. A default file `config-sample.yaml` will be created if you do not customize the name. Edit the file. + + ```bash + vi config-sample.yaml + ``` + + ```yaml + ... + metadata: + name: sample + spec: + hosts: + - {name: client1, address: 192.168.0.3, internalAddress: 192.168.0.3, user: ubuntu, password: Testing123} + - {name: client2, address: 192.168.0.4, internalAddress: 192.168.0.4, user: ubuntu, password: Testing123} + - {name: client3, address: 192.168.0.5, internalAddress: 192.168.0.5, user: ubuntu, password: Testing123} + roleGroups: + etcd: + - client1 + control-plane: + - client1 + worker: + - client2 + - client3 + controlPlaneEndpoint: + domain: lb.kubesphere.local + address: "" + port: 6443 + kubernetes: + version: v1.22.12 + imageRepo: kubesphere + clusterName: cluster.local + network: + plugin: calico + kubePodsCIDR: 10.233.64.0/18 + kubeServiceCIDR: 10.233.0.0/18 + registry: + registryMirrors: [] + insecureRegistries: [] + addons: + - name: nfs-client + namespace: kube-system + sources: + chart: + name: nfs-client-provisioner + repo: https://charts.kubesphere.io/main + valuesFile: /home/ubuntu/nfs-client.yaml # Use the path of your own NFS-client configuration file. + ... + ``` + +3. Pay special attention to the field of `addons`, under which you must provide the information of NFS-client. For more information about each parameter in this file, see [Multi-node Installation](../../../installing-on-linux/introduction/multioverview/#2-edit-the-configuration-file). + +4. Save the file and execute the following command to install Kubernetes and KubeSphere: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +5. When the installation finishes, you can inspect installation logs with the following command: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + Expected output: + + ```bash + ##################################################### + ### Welcome to KubeSphere! ### + ##################################################### + + Console: http://192.168.0.3:30880 + Account: admin + Password: P@88w0rd + + NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. + + ##################################################### + https://kubesphere.io 20xx-xx-xx xx:xx:xx + ##################################################### + ``` + +## Step 4: Verify Installation + +You can verify that NFS-client has been successfully installed either from the command line or from the KubeSphere web console. + +### Command line + +1. Run the following command to check your storage class. + + ```bash + kubectl get sc + ``` + + Expected output: + + ```bash + NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE + local (default) openebs.io/local Delete WaitForFirstConsumer false 16m + nfs-client cluster.local/nfs-client-nfs-client-provisioner Delete Immediate true 16m + ``` + + {{< notice note >}} + + If you set `nfs-client` as the default storage class, OpenEBS will not be installed by KubeKey. + + {{}} + +2. Run the following command to check the statuses of Pods. + + ```bash + kubectl get pod -n kube-system + ``` + + Note that `nfs-client` is installed in the namespace `kube-system`. Expected output (exclude irrelevant Pods): + + ```bash + NAME READY STATUS RESTARTS AGE + nfs-client-nfs-client-provisioner-6fc95f4f79-92lsh 1/1 Running 0 16m + ``` + +### KubeSphere console + +1. Log in to the web console as `admin` with the default account and password at `:30880`. Click **Platform** in the upper-left corner and select **Cluster Management**. + +2. Go to **Pods** in **Application Workloads** and select `kube-system` from the project drop-down list. You can see that the Pod of `nfs-client` is up and running. + +3. Go to **Storage Classes** under **Storage**, and you can see available storage classes in your cluster. + + {{< notice note >}} + + For more information about how to create volumes on the KubeSphere console, see [Volumes](../../../project-user-guide/storage/volumes/). + + {{}} \ No newline at end of file diff --git a/content/en/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-qingcloud-csi.md b/content/en/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-qingcloud-csi.md new file mode 100644 index 000000000..1c096476b --- /dev/null +++ b/content/en/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-qingcloud-csi.md @@ -0,0 +1,274 @@ +--- +title: "Install QingCloud CSI" +keywords: 'KubeSphere, Kubernetes, QingCloud CSI, installation, configurations, storage' +description: 'Use KubeKey to create a cluster with QingCloud CSI providing storage services.' +linkTitle: "Install QingCloud CSI" +weight: 3320 +--- + +If you plan to install KubeSphere on [QingCloud](https://www.qingcloud.com/), [QingCloud CSI](https://github.com/yunify/qingcloud-csi) can be chosen as the underlying storage plugin. + +This tutorial demonstrates how to use KubeKey to set up a KubeSphere cluster and configure QingCloud CSI to provide storage services. + +## Prerequisites + +Your cluster nodes are created on [QingCloud Platform](https://intl.qingcloud.com/). + +## Step 1: Create Access Keys on QingCloud Platform + +To make sure the platform can create cloud disks for your cluster, you need to provide the access key (`qy_access_key_id` and `qy_secret_access_key`) in a separate configuration file of QingCloud CSI. + +1. Log in to the web console of [QingCloud](https://console.qingcloud.com/login) and select **Access Key** from the drop-down list in the top-right corner. + + ![access-key](/images/docs/v3.3/installing-on-linux/introduction/persistent-storage-configuration/access-key.jpg) + +2. Click **Create** to generate keys. Download the key after it is created, which is stored in a csv file. + +## Step 2: Create a Configuration File for QingCloud CSI + +The separate configuration file contains all parameters of QingCloud CSI which will be used by KubeKey during installation. + +1. Go to one of the nodes (taskbox) where you want to download KubeKey later and run the following command to create a configuration file. + + ``` + vi csi-qingcloud.yaml + ``` + + An example configuration file: + + ```yaml + config: + qy_access_key_id: "MBKTPXWCIRIEDQYQKXYL" # Replace it with your own key id. + qy_secret_access_key: "cqEnHYZhdVCVif9qCUge3LNUXG1Cb9VzKY2RnBdX" # Replace it with your own access key. + zone: "pek3a" # Lowercase letters only. + sc: + isDefaultClass: true # Set it as the default storage class. + ``` + +2. The field `zone` specifies where your cloud disks are created. On QingCloud Platform, you must select a zone before you create them. + + ![storage-zone](/images/docs/v3.3/installing-on-linux/introduction/persistent-storage-configuration/storage-zone.jpg) + + Make sure the value you specify for `zone` matches the region ID below: + + | Zone | Region ID | + | ------------------------------------------- | ----------------------- | + | Shanghai1-A/Shanghai1-B | sh1a/sh1b | + | Beijing3-A/Beijing3-B/Beijing3-C/Beijing3-D | pek3a/pek3b/pek3c/pek3d | + | Guangdong2-A/Guangdong2-B | gd2a/gd2b | + | Asia-Pacific 2-A | ap2a | + + If you want to configure more values, see [chart configuration for QingCloud CSI](https://github.com/kubesphere/helm-charts/tree/master/src/test/csi-qingcloud#configuration). + +3. Save the file. + +## Step 3: Download KubeKey + +Follow the steps below to download [KubeKey](../../../installing-on-linux/introduction/kubekey/) on the taskbox. + +{{< tabs >}} + +{{< tab "Good network connections to GitHub/Googleapis" >}} + +Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or use the following command directly. + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "Poor network connections to GitHub/Googleapis" >}} + +Run the following command first to make sure you download KubeKey from the correct zone. + +```bash +export KKZONE=cn +``` + +Run the following command to download KubeKey: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +After you download KubeKey, if you transfer it to a new machine also with poor network connections to Googleapis, you must run `export KKZONE=cn` again before you proceed with the steps below. + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +The commands above download the latest release of KubeKey. You can change the version number in the command to download a specific version. + +{{}} + +Make `kk` executable: + +```bash +chmod +x kk +``` + +## Step 4: Create a Cluster + +1. Specify a Kubernetes version and a KubeSphere version that you want to install. For example: + + ```bash + ./kk create config --with-kubernetes v1.22.12 --with-kubesphere v3.3.2 + ``` + + {{< notice note >}} + + - Recommended Kubernetes versions for KubeSphere 3.3: v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, and * v1.24.x. For Kubernetes versions with an asterisk, some features of edge nodes may be unavailable due to incompatability. Therefore, if you want to use edge nodes, you are advised to install Kubernetes v1.21.x. If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.23.10 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../../installing-on-linux/introduction/kubekey/#support-matrix). + + - If you do not add the flag `--with-kubesphere` in the command in this step, KubeSphere will not be deployed unless you install it using the `addons` field in the configuration file or add this flag again when you use `./kk create cluster` later. + - If you add the flag `--with-kubesphere` without specifying a KubeSphere version, the latest version of KubeSphere will be installed. + + {{}} + +2. A default file `config-sample.yaml` will be created if you do not customize the name. Edit the file. + + ```bash + vi config-sample.yaml + ``` + + ```yaml + ... + metadata: + name: sample + spec: + hosts: + - {name: master, address: 192.168.0.2, internalAddress: 192.168.0.2, user: root, password: Testing123} + - {name: node1, address: 192.168.0.3, internalAddress: 192.168.0.3, user: root, password: Testing123} + - {name: node2, address: 192.168.0.4, internalAddress: 192.168.0.4, user: root, password: Testing123} + roleGroups: + etcd: + - master + control-plane: + - master + worker: + - node1 + - node2 + controlPlaneEndpoint: + domain: lb.kubesphere.local + address: "" + port: 6443 + kubernetes: + version: v1.22.12 + imageRepo: kubesphere + clusterName: cluster.local + network: + plugin: calico + kubePodsCIDR: 10.233.64.0/18 + kubeServiceCIDR: 10.233.0.0/18 + registry: + registryMirrors: [] + insecureRegistries: [] + addons: + - name: csi-qingcloud + namespace: kube-system + sources: + chart: + name: csi-qingcloud + repo: https://charts.kubesphere.io/test + valuesFile: /root/csi-qingcloud.yaml + ... + ``` + +3. Pay special attention to the field of `addons`, under which you must provide the information of QingCloud CSI. For more information about each parameter in this file, see [Multi-node Installation](../../../installing-on-linux/introduction/multioverview/#2-edit-the-configuration-file). + + {{< notice note >}} + + KubeKey will install QingCloud CSI by Helm charts together with its StorageClass. + + {{}} + +4. Save the file and execute the following command to install Kubernetes and KubeSphere: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +5. When the installation finishes, you can inspect installation logs with the following command: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + Expected output: + + ```bash + ##################################################### + ### Welcome to KubeSphere! ### + ##################################################### + + Console: http://192.168.0.3:30880 + Account: admin + Password: P@88w0rd + + NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. + + ##################################################### + https://kubesphere.io 20xx-xx-xx xx:xx:xx + ##################################################### + ``` + +## Step 5: Verify Installation + +You can verify that QingCloud CSI has been successfully installed either from the command line or from the KubeSphere web console. + +### Command line + +1. Run the following command to check your storage class. + + ```bash + kubectl get sc + ``` + + Expected output: + + ```bash + NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE + csi-qingcloud (default) disk.csi.qingcloud.com Delete WaitForFirstConsumer true 28m + ``` + +2. Run the following command to check the statuses of Pods. + + ```bash + kubectl get pod -n kube-system + ``` + + Note that `csi-qingcloud` is installed in the namespace `kube-system`. Expected output (exclude other irrelevant Pods): + + ```bash + NAME READY STATUS RESTARTS AGE + csi-qingcloud-controller-f95dcddfb-2gfck 5/5 Running 0 28m + csi-qingcloud-node-7dzz8 2/2 Running 0 28m + csi-qingcloud-node-k4hsj 2/2 Running 0 28m + csi-qingcloud-node-sptdb 2/2 Running 0 28m + ``` + +### KubeSphere console + +1. Log in to the web console with the default account and password (`admin/P@88w0rd`) at `:30880`. Click **Platform** in the upper-left corner and select **Cluster Management**. + +2. Go to **Pods** in **Application Workloads** and select `kube-system` from the project drop-down list. You can see that the Pods of `csi-qingcloud` are up and running. + +3. Go to **Storage Classes** under **Storage**, and you can see available storage classes in your cluster. + + {{< notice note >}} + + For more information about how to create volumes on the KubeSphere console, see [Volumes](../../../project-user-guide/storage/volumes/). + + {{}} \ No newline at end of file diff --git a/content/en/docs/v3.4/installing-on-linux/persistent-storage-configurations/understand-persistent-storage.md b/content/en/docs/v3.4/installing-on-linux/persistent-storage-configurations/understand-persistent-storage.md new file mode 100644 index 000000000..58814eed4 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-linux/persistent-storage-configurations/understand-persistent-storage.md @@ -0,0 +1,55 @@ +--- +title: "Understand Persistent Storage Installation" +keywords: 'KubeSphere, Kubernetes, storage, installation, configuration' +description: 'Understand how to use KubeKey to install different storage systems.' +linkTitle: "Understand Persistent Storage Installation" +weight: 3310 +--- + +Persistent volumes are a **must** for installing KubeSphere. When you use [KubeKey](../../../installing-on-linux/introduction/kubekey/) to set up a KubeSphere cluster, you can install different storage systems as [add-ons](https://github.com/kubesphere/kubekey/blob/master/docs/addons.md). The general steps of installing KubeSphere by KubeKey on Linux are: + +1. Install Kubernetes. +2. Install any provided add-ons. +3. Install KubeSphere by [ks-installer](https://github.com/kubesphere/ks-installer). + +In the second step, an available StorageClass **must** be installed. It includes: + +- The StorageClass itself +- The storage plugin for the StorageClass if necessary + +{{< notice note >}} + +Some storage systems require you to prepare a storage server in advance to provide external storage services. + +{{}} + +## How Does KubeKey Install Different Storage Systems + +KubeKey creates [a configuration file](../../../installing-on-linux/introduction/multioverview/#2-edit-the-configuration-file) (`config-sample.yaml` by default) for your cluster which contains all the necessary parameters you can define for different resources, including various add-ons. Different storage systems, such as QingCloud CSI, can also be installed as add-ons by Helm charts or YAML. To let KubeKey install them in the desired way, you must provide KubeKey with necessary configurations of these storage systems. + +There are generally two ways for you to let KubeKey apply configurations of the storage system to be installed. + +1. Enter necessary parameters under the `addons` field directly in `config-sample.yaml`. +2. Create a separate configuration file for your add-on to list all the necessary parameters and provide the path of the file in `config-sample.yaml` so that KubeKey can reference it during installation. + +For more information, see [add-ons](https://github.com/kubesphere/kubekey/blob/master/docs/addons.md). + +## Default Storage Class + +KubeKey supports the installation of different storage plugins and storage classes. No matter what storage systems you will be installing, you can specify whether it is a default storage class in its configuration file. If KubeKey detects that no default storage class is specified, it will install [OpenEBS](https://github.com/openebs/openebs) by default. + +OpenEBS Dynamic Local PV provisioner can create Kubernetes Local Persistent Volumes using a unique HostPath (directory) on the node to persist data. It is very convenient for users to get started with OpenEBS when they have no specific storage system. + +## Multi-storage Solutions + +If you intend to install more than one storage plugins, only one of them can be set as the default storage class. Otherwise, KubeKey will be confused about which storage class to use. + +## Supported CSI Plugins + +Kubernetes has announced that in-tree volume plugins will be removed from Kubernetes in version 1.21. For more information, see [Kubernetes In-Tree to CSI Volume Migration Moves to Beta](https://kubernetes.io/blog/2019/12/09/kubernetes-1-17-feature-csi-migration-beta/). Therefore, it is recommended that you install CSI plugins instead. + +Supported CSI plugins: + +- [neonsan-csi](https://github.com/yunify/qingstor-csi) +- [qingcloud-csi](../install-qingcloud-csi/) +- [ceph-csi](../install-ceph-csi-rbd/) diff --git a/content/en/docs/v3.4/installing-on-linux/public-cloud/_index.md b/content/en/docs/v3.4/installing-on-linux/public-cloud/_index.md new file mode 100644 index 000000000..c6c002204 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-linux/public-cloud/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Installing on Public Cloud" +weight: 3400 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/installing-on-linux/public-cloud/install-kubesphere-on-azure-vms.md b/content/en/docs/v3.4/installing-on-linux/public-cloud/install-kubesphere-on-azure-vms.md new file mode 100644 index 000000000..3f0a729d9 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-linux/public-cloud/install-kubesphere-on-azure-vms.md @@ -0,0 +1,264 @@ +--- +title: "Deploy KubeSphere on Azure VM Instances" +keywords: "KubeSphere, Installation, HA, high availability, load balancer, Azure" +description: "Learn how to create a high-availability cluster on Azure virtual machines." +linkTitle: "Deploy KubeSphere on Azure VM Instances" +weight: 3410 +--- + +Using the [Azure cloud platform](https://azure.microsoft.com/en-us/overview/what-is-azure/), you can either install and manage Kubernetes by yourself or adopt a managed Kubernetes solution. If you want to use a fully-managed platform solution, see [Deploy KubeSphere on AKS](../../../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks/) for more details. + +Alternatively, you can set up a highly-available cluster on Azure instances. This tutorial demonstrates how to create a production-ready Kubernetes and KubeSphere cluster. + +## Introduction + +This tutorial uses two key features of Azure virtual machines (VMs): + +- [Virtual Machine Scale Sets (VMSS)](https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/overview): Azure VMSS let you create and manage a group of load balanced VMs. The number of VM instances can automatically increase or decrease in response to demand or a defined schedule (Kubernetes Autoscaler is available, but not covered in this tutorial. See [autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/azure) for more details), which perfectly fits Worker nodes. +- Availability Sets: An availability set is a logical grouping of VMs within a datacenter that are automatically distributed across fault domains. This approach limits the impact of potential physical hardware failures, network outages, or power interruptions. All the Master and etcd VMs will be placed in an availability set to achieve high availability. + +Besides these VMs, other resources like Load Balancer, Virtual Network and Network Security Group will also be used. + +## Prerequisites + +- You need an [Azure](https://portal.azure.com) account to create all the resources. +- Basic knowledge of [Azure Resource Manager](https://docs.microsoft.com/en-us/azure/azure-resource-manager/templates/) (ARM) templates, which are files that define the infrastructure and configuration for your project. +- For a production environment, it is recommended that you prepare persistent storage and create a StorageClass in advance. For development and testing, you can use [OpenEBS](https://openebs.io/), which is installed by KubeKey by default, to provision LocalPV directly. + +## Architecture + +Six machines of **Ubuntu 18.04** will be deployed in an Azure Resource Group. Three of them are grouped into an availability set, serving as both the control plane and etcd nodes. The other three VMs will be defined as a VMSS where Worker nodes will be running. + +![Architecture](/images/docs/v3.3/aks/Azure-architecture.png) + +These VMs will be attached to a load balancer. There are two predefined rules in the load balancer: + +- **Inbound NAT**: The SSH port will be mapped for each machine so that you can easily manage VMs. +- **Load Balancing**: The http and https ports will be mapped to Node pools by default. Other ports can be added on demand. + +| Service | Protocol | Rule | Backend Port | Frontend Port/Ports | Pools | +|---|---|---|---|---|---| +| ssh | TCP | Inbound NAT | 22 |50200, 50201, 50202, 50100~50199| Master, Node | +| apiserver | TCP | Load Balancing | 6443 | 6443 | Master | +| ks-console | TCP | Load Balancing | 30880 | 30880 | Master | +| http | TCP | Load Balancing | 80 | 80 | Node | +| https | TCP | Load Balancing | 443 | 443 | Node | + +## Create HA Cluster Infrastructrue + +You don't have to create these resources one by one. According to the best practice of **infrastructure as code** on Azure, all resources in the architecture are already defined as ARM templates. + +### Prepare machines + +1. Click the **Deploy** button below, and you will be redirected to Azure and asked to fill in deployment parameters. + + Deploy to Azure Visualize + +2. On the page that appears, only few parameters need to be changed. Click **Create new** under **Resource group** and enter a name such as `KubeSphereVMRG`. + +3. Enter **Admin Username**. + +4. Copy your public SSH key for the field **Admin Key**. Alternatively, create a new one with `ssh-keygen`. + + ![azure-template-parameters](/images/docs/v3.3/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-azure-vms/azure-template-parameters.png) + + {{< notice note >}} + +Password authentication is restricted in Linux configurations. Only SSH is acceptable. + +{{}} + +5. Click **Purchase** at the bottom to continue. + +### Review Azure resources in the Portal + +After successfully created, all the resources will display in the resource group `KubeSphereVMRG`. Record the public IP of the load balancer and the private IP addresses of the VMs. You will need them later. + +![New Created Resources](/images/docs/v3.3/aks/azure-vm-all-resources.png) + +## Deploy Kubernetes and KubeSphere + +Execute the following commands on your device or connect to one of the Master VMs through SSH. During the installation, files will be downloaded and distributed to each VM. + +```bash +# copy your private ssh to master-0 +scp -P 50200 ~/.ssh/id_rsa kubesphere@40.81.5.xx:/home/kubesphere/.ssh/ + +# ssh to the master-0 +ssh -i .ssh/id_rsa2 -p50200 kubesphere@40.81.5.xx +``` + +### Download KubeKey + +[Kubekey](../../../installing-on-linux/introduction/kubekey/) is a brand-new installation tool which provides an easy, fast and flexible way to install Kubernetes and KubeSphere. + +1. Download it so that you can generate a configuration file in the next step. + + {{< tabs >}} + + {{< tab "Good network connections to GitHub/Googleapis" >}} + +Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or use the following command directly: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "Poor network connections to GitHub/Googleapis" >}} + +Run the following command first to make sure you download KubeKey from the correct zone. + +```bash +export KKZONE=cn +``` + +Run the following command to download KubeKey: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +After you download KubeKey, if you transfer it to a new machine also with poor network connections to Googleapis, you must run `export KKZONE=cn` again before you proceed with the steps below. + +{{}} + +{{}} + +{{}} + + {{< notice note >}} + +The commands above download the latest release of KubeKey. You can change the version number in the command to download a specific version. + +{{}} + + Make `kk` executable: + + ```bash + chmod +x kk + ``` + +1. Create an example configuration file with default configurations. Here Kubernetes v1.22.12 is used as an example. + + ```bash + ./kk create config --with-kubesphere v3.3.2 --with-kubernetes v1.22.12 + ``` + + {{< notice note >}} + +- Recommended Kubernetes versions for KubeSphere 3.3: v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, and * v1.24.x. For Kubernetes versions with an asterisk, some features of edge nodes may be unavailable due to incompatability. Therefore, if you want to use edge nodes, you are advised to install Kubernetes v1.21.x. If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.23.10 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../../installing-on-linux/introduction/kubekey/#support-matrix). + +- If you do not add the flag `--with-kubesphere` in the command in this step, KubeSphere will not be deployed unless you install it using the `addons` field in the configuration file or add this flag again when you use `./kk create cluster` later. +- If you add the flag `--with-kubesphere` without specifying a KubeSphere version, the latest version of KubeSphere will be installed. + +{{}} + +### Example configurations + +```yaml +spec: + hosts: + - {name: master-0, address: 40.81.5.xx, port: 50200, internalAddress: 10.0.1.4, user: kubesphere, privateKeyPath: "~/.ssh/id_rsa"} + - {name: master-1, address: 40.81.5.xx, port: 50201, internalAddress: 10.0.1.5, user: kubesphere, privateKeyPath: "~/.ssh/id_rsa"} + - {name: master-2, address: 40.81.5.xx, port: 50202, internalAddress: 10.0.1.6, user: kubesphere, privateKeyPath: "~/.ssh/id_rsa"} + - {name: node000000, address: 40.81.5.xx, port: 50100, internalAddress: 10.0.0.4, user: kubesphere, privateKeyPath: "~/.ssh/id_rsa"} + - {name: node000001, address: 40.81.5.xx, port: 50101, internalAddress: 10.0.0.5, user: kubesphere, privateKeyPath: "~/.ssh/id_rsa"} + - {name: node000002, address: 40.81.5.xx, port: 50102, internalAddress: 10.0.0.6, user: kubesphere, privateKeyPath: "~/.ssh/id_rsa"} + roleGroups: + etcd: + - master-0 + - master-1 + - master-2 + control-plane: + - master-0 + - master-1 + - master-2 + worker: + - node000000 + - node000001 + - node000002 +``` +For more information, see [this file](https://github.com/kubesphere/kubekey/blob/release-2.2/docs/config-example.md). + +### Configure the load balancer + +In addition to node information, you need to configure your load balancer in the same YAML file. For the IP address, you can find it in **Azure > KubeSphereVMRG > PublicLB**. Assume the IP address and listening port of the load balancer are `40.81.5.xx` and `6443` respectively, and you can refer to the following example. + +```yaml +## Public LB config example +## apiserver_loadbalancer_domain_name: "lb.kubesphere.local" + controlPlaneEndpoint: + domain: lb.kubesphere.local + address: "40.81.5.xx" + port: 6443 +``` + +{{< notice note >}} + +The public load balancer is used directly instead of an internal load balancer due to Azure [Load Balancer limits](https://docs.microsoft.com/en-us/azure/load-balancer/load-balancer-troubleshoot#cause-4-accessing-the-internal-load-balancer-frontend-from-the-participating-load-balancer-backend-pool-vm). + +{{}} + +### Persistent storage plugin configurations + +See [Persistent Storage Configurations](../../../installing-on-linux/persistent-storage-configurations/understand-persistent-storage/) for details. + +### Configure the network plugin + +Azure Virtual Network doesn't support the IPIP mode used by [Calico](https://docs.projectcalico.org/reference/public-cloud/azure#about-calico-on-azure). You need to change the network plugin to `flannel`. + +```yaml + network: + plugin: flannel + kubePodsCIDR: 10.233.64.0/18 + kubeServiceCIDR: 10.233.0.0/18 +``` + +### Create a cluster + +1. After you complete the configuration, you can execute the following command to start the installation: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +2. Inspect the logs of installation: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + +3. When the installation finishes, you can see the following message: + + ```bash + ##################################################### + ### Welcome to KubeSphere! ### + ##################################################### + Console: http://10.128.0.44:30880 + Account: admin + Password: P@88w0rd + NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. + ##################################################### + https://kubesphere.io 2020-xx-xx xx:xx:xx + ``` + +4. Access the KubeSphere console using `:30880` with the default account and password (`admin/P@88w0rd`). + +## Add Additional Ports + +As the Kubernetes cluster is set up on Azure instances directly, the load balancer is not integrated with [Kubernetes Services](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). However, you can still manually map the NodePort to the load balancer. There are 2 steps required. + +1. Create a new Load Balance Rule in the load balancer. + ![Load Balancer](/images/docs/v3.3/aks/azure-vm-loadbalancer-rule.png) +2. Create an Inbound Security rule to allow Internet access in the Network Security Group. + ![Firewall](/images/docs/v3.3/aks/azure-vm-firewall.png) diff --git a/content/en/docs/v3.4/installing-on-linux/public-cloud/install-kubesphere-on-qingcloud-vms.md b/content/en/docs/v3.4/installing-on-linux/public-cloud/install-kubesphere-on-qingcloud-vms.md new file mode 100644 index 000000000..ebb7b9877 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-linux/public-cloud/install-kubesphere-on-qingcloud-vms.md @@ -0,0 +1,341 @@ +--- +title: "Deploy KubeSphere on QingCloud Instances" +keywords: "KubeSphere, Installation, HA, High-availability, LoadBalancer" +description: "Learn how to create a high-availability cluster on QingCloud platform." +linkTitle: "Deploy KubeSphere on QingCloud Instances" +Weight: 3420 +--- + +## Introduction + +For a production environment, you need to consider the high availability of the cluster. If key components (for example, kube-apiserver, kube-scheduler, and kube-controller-manager) are all running on the same control plane node, Kubernetes and KubeSphere will be unavailable once the control plane node goes down. Therefore, you need to set up a high-availability cluster by provisioning load balancers with multiple control plane nodes. You can use any cloud load balancer, or any hardware load balancer (for example, F5). In addition, Keepalived and [HAproxy](https://www.haproxy.com/), or Nginx is also an alternative for creating high-availability clusters. + +This tutorial walks you through an example of how to create two [QingCloud load balancers](https://docs.qingcloud.com/product/network/loadbalancer), serving as the internal load balancer and external load balancer respectively, and of how to implement high availability of control plane and etcd nodes using the load balancers. + +## Prerequisites + +- Make sure you already know how to install KubeSphere on a multi-node cluster by following the [guide](../../../installing-on-linux/introduction/multioverview/). For detailed information about the configuration file that is used for installation, see [Edit the configuration file](../../../installing-on-linux/introduction/multioverview/#2-edit-the-configuration-file). This tutorial focuses more on how to configure load balancers. +- You need a [QingCloud](https://console.qingcloud.com/login) account to create load balancers, or follow the guide of any other cloud provider to create load balancers. +- For a production environment, it is recommended that you prepare persistent storage and create a StorageClass in advance. For development and testing, you can use the integrated OpenEBS to provision LocalPV as the storage service directly. + +## Architecture + +This example prepares six machines of **Ubuntu 16.04.6**. You will create two load balancers, and deploy three control plane nodes and etcd nodes on three of the machines. You can configure these control plane and etcd nodes in `config-sample.yaml` created by KubeKey (Please note that this is the default name, which can be changed by yourself). + +![ha-architecture](/images/docs/v3.3/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-qingcloud-instances/ha-architecture.png) + +{{< notice note >}} + +The Kubernetes document [Options for Highly Available topology](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/) demonstrates that there are two options for configuring the topology of a highly available (HA) Kubernetes cluster, i.e. stacked etcd topology and external etcd topology. You should carefully consider the advantages and disadvantages of each topology before setting up an HA cluster according to [this document](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/). This tutorial adopts stacked etcd topology to bootstrap an HA cluster for demonstration purposes. + +{{}} + +## Install an HA Cluster + +### Step 1: Create load balancers + +This step demonstrates how to create load balancers on the QingCloud platform. + +#### Create an internal load balancer + +1. Log in to the [QingCloud console](https://console.qingcloud.com/login). In the menu on the left, under **Network & CDN**, select **Load Balancers**. Click **Create** to create a load balancer. + + ![create-lb](/images/docs/v3.3/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-qingcloud-instances/create-lb.png) + +2. In the pop-up window, set a name for the load balancer. Choose the VxNet where your machines are created from the **Network** drop-down list. Here is `pn`. Other fields can be default values as shown below. Click **Submit** to finish. + + ![qingcloud-lb](/images/docs/v3.3/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-qingcloud-instances/qingcloud-lb.png) + +3. Click the load balancer. On the detail page, create a listener that listens on port `6443` with the **Listener Protocol** set to `TCP`. + + ![listener](/images/docs/v3.3/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-qingcloud-instances/listener.png) + + - **Name**: Define a name for this Listener + - **Listener Protocol**: Select `TCP` protocol + - **Port**: `6443` + - **Balance mode**: `Poll` + + Click **Submit** to continue. + + {{< notice note >}} + + After you create the listener, check the firewall rules of the load balancer. Make sure that port `6443` has been added to the firewall rules and that external traffic is allowed to port `6443`. Otherwise, the installation will fail. You can find the information in **Security Groups** under **Security** on the QingCloud platform. + + {{}} + +4. Click **Add Backend**, and choose the VxNet you just selected (in this example, it is `pn`). Click **Advanced Search**, choose the three control plane nodes, and set the port to `6443` which is the default secure port of api-server. + + ![3-master](/images/docs/v3.3/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-qingcloud-instances/3-master.png) + + Click **Submit** when you finish. + +5. Click **Apply Changes** to use the configurations. At this point, you can find the three control plane nodes have been added as the backend servers of the listener that is behind the internal load balancer. + + {{< notice note >}} + + The status of all control plane nodes might show **Not Available** after you added them as backends. This is normal since port `6443` of api-server is not active on control plane nodes yet. The status will change to **Active** and the port of api-server will be exposed after the installation finishes, which means the internal load balancer you configured works as expected. + + {{}} + + ![apply-change](/images/docs/v3.3/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-qingcloud-instances/apply-change.png) + + Record the Intranet VIP shown under **Networks**. The IP address will be added later to the configuration file. + +#### Create an external load balancer + +You need to create an EIP in advance. To create an EIP, go to **Elastic IPs** under **Networks & CDN**. + +{{< notice note >}} + +Two elastic IPs are needed for this tutorial, one for the VPC network and the other for the external load balancer created in this step. You cannot associate the same EIP to the VPC network and the load balancer at the same time. + +{{}} + +1. Similarly, create an external load balancer while don't select VxNet for the **Network** field. Bind the EIP that you created to this load balancer by clicking **Add IPv4**. + + ![bind-eip](/images/docs/v3.3/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-qingcloud-instances/bind-eip.png) + +2. On the load balancer's detail page, create a listener that listens on port `30880` (NodePort of KubeSphere console) with **Listener Protocol** set to `HTTP`. + + {{< notice note >}} + + After you create the listener, check the firewall rules of the load balancer. Make sure that port `30880` has been added to the firewall rules and that external traffic is allowed to port `30880`. Otherwise, the installation will fail. You can find the information in **Security Groups** under **Security** on the QingCloud platform. + + {{}} + + ![listener2](/images/docs/v3.3/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-qingcloud-instances/listener2.png) + +3. Click **Add Backend**. In **Advanced Search**, choose the `six` machines on which you are going to install KubeSphere within the VxNet `pn`, and set the port to `30880`. + + ![six-instances](/images/docs/v3.3/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-qingcloud-instances/six-instances.png) + + Click **Submit** when you finish. + +4. Click **Apply Changes** to use the configurations. At this point, you can find the six machines have been added as the backend servers of the listener that is behind the external load balancer. + +### Step 2: Download KubeKey + +[Kubekey](https://github.com/kubesphere/kubekey) is the next-gen installer which provides an easy, fast and flexible way to install Kubernetes and KubeSphere. + +Follow the step below to download KubeKey. + +{{< tabs >}} + +{{< tab "Good network connections to GitHub/Googleapis" >}} + +Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or use the following command directly. + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "Poor network connections to GitHub/Googleapis" >}} + +Run the following command first to make sure you download KubeKey from the correct zone. + +```bash +export KKZONE=cn +``` + +Run the following command to download KubeKey: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +After you download KubeKey, if you transfer it to a new machine also with poor network connections to Googleapis, you must run `export KKZONE=cn` again before you proceed with the steps below. + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +The commands above download the latest release of KubeKey. You can change the version number in the command to download a specific version. + +{{}} + +Make `kk` executable: + +```bash +chmod +x kk +``` + +Create an example configuration file with default configurations. Here Kubernetes v1.22.12 is used as an example. + +```bash +./kk create config --with-kubesphere v3.3.2 --with-kubernetes v1.22.12 +``` + +{{< notice note >}} + +- Recommended Kubernetes versions for KubeSphere 3.3: v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, and * v1.24.x. For Kubernetes versions with an asterisk, some features of edge nodes may be unavailable due to incompatability. Therefore, if you want to use edge nodes, you are advised to install Kubernetes v1.21.x. If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.23.10 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../../installing-on-linux/introduction/kubekey/#support-matrix). + +- If you do not add the flag `--with-kubesphere` in the command in this step, KubeSphere will not be deployed unless you install it using the `addons` field in the configuration file or add this flag again when you use `./kk create cluster` later. + +- If you add the flag `--with-kubesphere` without specifying a KubeSphere version, the latest version of KubeSphere will be installed. + +{{}} + +### Step 3: Set cluster nodes + +As you adopt the HA topology with stacked control plane nodes, the control plane nodes and etcd nodes are on the same three machines. + +| **Property** | **Description** | +| :----------- | :-------------------------------- | +| `hosts` | Detailed information of all nodes | +| `etcd` | etcd node names | +| `control-plane` | Control plane node names | +| `worker` | Worker node names | + +Put the control plane nodes (`master1`, `master2` and `master3`) under `etcd` and `master` respectively as below, which means these three machines will serve as both the control plane and etcd nodes. Note that the number of etcd needs to be odd. Meanwhile, it is not recommended that you install etcd on worker nodes since the memory consumption of etcd is very high. + +#### config-sample.yaml Example + +```yaml +spec: + hosts: + - {name: master1, address: 192.168.0.2, internalAddress: 192.168.0.2, user: ubuntu, password: Testing123} + - {name: master2, address: 192.168.0.3, internalAddress: 192.168.0.3, user: ubuntu, password: Testing123} + - {name: master3, address: 192.168.0.4, internalAddress: 192.168.0.4, user: ubuntu, password: Testing123} + - {name: node1, address: 192.168.0.5, internalAddress: 192.168.0.5, user: ubuntu, password: Testing123} + - {name: node2, address: 192.168.0.6, internalAddress: 192.168.0.6, user: ubuntu, password: Testing123} + - {name: node3, address: 192.168.0.7, internalAddress: 192.168.0.7, user: ubuntu, password: Testing123} + roleGroups: + etcd: + - master1 + - master2 + - master3 + control-plane: + - master1 + - master2 + - master3 + worker: + - node1 + - node2 + - node3 +``` + +For a complete configuration sample explanation, see [this file](https://github.com/kubesphere/kubekey/blob/release-2.2/docs/config-example.md). + +### Step 4: Configure the load balancer + +In addition to the node information, you need to provide the load balancer information in the same YAML file. For the Intranet VIP address, you can find it in the last part when you create [an internal load balancer](#step-1-create-load-balancers). Assume the VIP address and listening port of the **internal load balancer** are `192.168.0.253` and `6443` + +respectively, and you can refer to the following example. + +#### The configuration example in config-sample.yaml + +```yaml +## Internal LB config example +## apiserver_loadbalancer_domain_name: "lb.kubesphere.local" + controlPlaneEndpoint: + domain: lb.kubesphere.local + address: "192.168.0.253" + port: 6443 +``` + +{{< notice note >}} + +- The address and port should be indented by two spaces in `config-sample.yaml`, and the address should be VIP. +- The domain name of the load balancer is `lb.kubesphere.local` by default for internal access. If you need to change the domain name, uncomment and modify it. + +{{}} + +### Step 5: Kubernetes cluster configurations (Optional) + +Kubekey provides some fields and parameters to allow the cluster administrator to customize Kubernetes installation, including Kubernetes version, network plugins and image registry. There are some default values provided in `config-sample.yaml`. You can modify Kubernetes-related configurations in the file based on your needs. For more information, see [Kubernetes Cluster Configurations](../../../installing-on-linux/introduction/vars/). + +### Step 6: Persistent storage plugin configurations + +Considering data persistence in a production environment, you need to prepare persistent storage and configure the storage plugin (for example, CSI) in `config-sample.yaml` to define which storage service you want. + +{{< notice note >}} + +For testing or development, you can skip this part. KubeKey will use the integrated OpenEBS to provision LocalPV as the storage service directly. + +{{}} + +**Available storage plugins and clients** + +- Ceph RBD & CephFS +- GlusterFS +- QingCloud CSI +- QingStor CSI +- More plugins will be supported in future releases + +Make sure you have configured the storage plugin before you get started. KubeKey will create a StorageClass and persistent volumes for related workloads during the installation. For more information, see [Persistent Storage Configurations](../../../installing-on-linux/persistent-storage-configurations/understand-persistent-storage/). + +### Step 7: Enable pluggable components (Optional) + +KubeSphere has decoupled some core feature components since v2.1.0. These components are designed to be pluggable which means you can enable them either before or after installation. By default, KubeSphere will be installed with the minimal package if you do not enable them. + +You can enable any of them according to your demands. It is highly recommended that you install these pluggable components to discover the full-stack features and capabilities provided by KubeSphere. Make sure your machines have sufficient CPU and memory before you enable them. See [Enable Pluggable Components](../../../pluggable-components/) for details. + +### Step 8: Start to bootstrap a cluster + +After you complete the configuration, you can execute the following command to start the installation: + +```bash +./kk create cluster -f config-sample.yaml +``` + +### Step 9: Verify the installation + +Inspect the logs of installation. When you see output logs as follows, it means KubeSphere has been successfully deployed. + +```bash +kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f +``` + +```bash +##################################################### +### Welcome to KubeSphere! ### +##################################################### + +Console: http://192.168.0.3:30880 +Account: admin +Password: P@88w0rd + +NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. + +##################################################### +https://kubesphere.io 2020-08-13 10:50:24 +##################################################### +``` + +### Step 10: Verify the HA cluster + +Now that you have finished the installation, go back to the detail page of both the internal and external load balancers to see the status. + +![active](/images/docs/v3.3/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-qingcloud-instances/active.png) + +Both listeners show that the status is **Active**, meaning nodes are up and running. + +![active-listener](/images/docs/v3.3/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-qingcloud-instances/active-listener.png) + +In the web console of KubeSphere, you can also see that all the nodes are functioning well. + +To verify if the cluster is highly available, you can turn off an instance on purpose. For example, the above console is accessed through the address `IP: 30880` (the EIP address here is the one bound to the external load balancer). If the cluster is highly available, the console will still work well even if you shut down a control plane node. + +## See Also + +[Multi-node Installation](../../../installing-on-linux/introduction/multioverview/) + +[Kubernetes Cluster Configurations](../../../installing-on-linux/introduction/vars/) + +[Persistent Storage Configurations](../../../installing-on-linux/persistent-storage-configurations/understand-persistent-storage/) + +[Enable Pluggable Components](../../../pluggable-components/) \ No newline at end of file diff --git a/content/en/docs/v3.4/installing-on-linux/uninstall-kubesphere-and-Kubernetes.md b/content/en/docs/v3.4/installing-on-linux/uninstall-kubesphere-and-Kubernetes.md new file mode 100644 index 000000000..dec6bdf57 --- /dev/null +++ b/content/en/docs/v3.4/installing-on-linux/uninstall-kubesphere-and-Kubernetes.md @@ -0,0 +1,25 @@ +--- +title: "Uninstall KubeSphere and Kubernetes" +keywords: 'Kubernetes, KubeSphere, uninstalling, remove-cluster' +description: 'Remove KubeSphere and Kubernetes from your machines.' +linkTitle: "Uninstall KubeSphere and Kubernetes" +weight: 3700 +--- + + +Uninstalling KubeSphere and Kubernetes means they will be removed from your machine. This operation is irreversible and does not have any backup. Please be cautious with the operation. + +To delete your cluster, execute the following command. + +- If you installed KubeSphere with the quickstart ([all-in-one](../../quick-start/all-in-one-on-linux/)): + + ```bash + ./kk delete cluster + ``` + +- If you installed KubeSphere with the advanced mode ([created with a configuration file](../introduction/multioverview/#step-3-create-a-cluster)): + + ```bash + ./kk delete cluster [-f config-sample.yaml] + ``` + diff --git a/content/en/docs/v3.4/introduction/_index.md b/content/en/docs/v3.4/introduction/_index.md new file mode 100644 index 000000000..f32ccee95 --- /dev/null +++ b/content/en/docs/v3.4/introduction/_index.md @@ -0,0 +1,14 @@ +--- +title: "Introduction to KubeSphere" +description: "Help you to better understand KubeSphere with detailed graphics and contents" +layout: "second" + +linkTitle: "Introduction" + +weight: 1000 + +icon: "/images/docs/v3.3/docs.svg" + +--- + +This chapter gives you an overview of the basic concept of KubeSphere, features, advantages, uses cases and more. diff --git a/content/en/docs/v3.4/introduction/advantages.md b/content/en/docs/v3.4/introduction/advantages.md new file mode 100644 index 000000000..e0ec4d1f5 --- /dev/null +++ b/content/en/docs/v3.4/introduction/advantages.md @@ -0,0 +1,92 @@ +--- +title: "Advantages" +keywords: "KubeSphere, Kubernetes, Advantages" +description: "KubeSphere Advantages" +linkTitle: "Advantages" +weight: 1600 +--- + +## Vision + +Kubernetes has become the de facto standard for deploying containerized applications at scale in private, public and hybrid cloud environments. However, many people can easily get confused when they start to use Kubernetes as it is complicated and has many additional components to manage. Some components need to be installed and deployed by users themselves, such as storage and network services. At present, Kubernetes only provides open-source solutions or projects, which can be difficult to install, maintain and operate to some extent. For users, it is not always easy to quickly get started as they are faced with a steep learning curve. + +KubeSphere is designed to reduce or eliminate many Kubernetes headaches related to building, deployment, management, observability and so on. It provides comprehensive services and automates provisioning, scaling and management of applications so that you can focus on code writing. More specifically, KubeSphere boasts an extensive portfolio of features including multi-cluster management, application lifecycle management, multi-tenant management, CI/CD pipelines, service mesh, and observability (monitoring, logging, alerting, notifications, auditing and events). + +As a comprehensive open-source platform, KubeSphere strives to make the container platform more user-friendly and powerful. For example, KubeSphere provides a highly interactive web console for test and operation. For users who are accustomed to command-line tools, they can quickly get familiar with KubeSphere as kubectl is integrated in the platform. As such, users can create and modify their resources with the minimal learning curve. + +In addition, KubeSphere offers excellent solutions to storage and network. Apart from the major open-source storage solutions such as Ceph RBD and GlusterFS, users are also provided with [QingCloud Block Storage](https://docs.qingcloud.com/product/storage/volume/) and [QingStor NeonSAN](https://docs.qingcloud.com/product/storage/volume/super_high_performance_shared_volume/), developed by QingCloud for persistent storage. With the integrated QingCloud CSI and NeonSAN CSI plugins, enterprises can enjoy a more stable and secure services of their apps and data. + +## Why KubeSphere + +KubeSphere provides high-performance and scalable container service management for enterprises. It aims to help them accomplish digital transformation driven by cutting-edge technologies, and accelerate app iteration and business delivery to meet the ever-changing needs of enterprises. + +Here are the six major advantages of KubeSphere. + +### Unified management of clusters across cloud providers + +As container usage ramps up, enterprises are faced with increased complexity of cluster management as they deploy clusters across cloud and on-premises environments. To address the urgent need of users for a uniform platform to manage heterogeneous clusters, KubeSphere sees a major feature enhancement with substantial benefits. Users can leverage KubeSphere to manage, monitor, import and operate clusters across regions, clouds and environments. + +The feature can be enabled both before and after the installation. In particular, it features: + +**Unified Management**. Users can import Kubernetes clusters either through direct connection or agent connection. With simple configurations, the process can be done within minutes on the interactive web console. Once clusters are imported, users are able to monitor the status and operate on cluster resources through a central control plane. + +**High Availability**. In the multi-cluster architecture of KubeSphere, a cluster can run major services with another one serving as the backup. When the major one goes down, services can be quickly taken over by another cluster. Besides, when clusters are deployed in different regions, requests can be sent to the closest one for low latency. In this way, high availability is achieved across zones and clusters. + +For more information, see [Multi-cluster Management](../../multicluster-management/). + +### Powerful observability + +The observability feature of KubeSphere has been greatly improved with key building blocks enhanced, including monitoring, logging, auditing, events, alerting and notification. The highly functional system allows users to observe virtually everything that happens in the platform. It has much to offer for users with distinct advantages listed as below: + +**Customized**. Users are allowed to customize their own monitoring dashboard with multiple display forms available. They can set their own templates based on their needs, add the metric they want to monitor and even choose the display color they prefer. Alerting policies and rules can all be customized as well, including repetition interval, time and threshold. + +**Diversified**. Ops teams are freed from the complicated work of recording massive data as KubeSphere monitors resources from virtually all dimensions. It also features an efficient notification system with diversified channels for users to choose from, such as email, Slack and WeChat Work. On the back of the multi-tenant system of KubeSphere, different tenants are able to query logs, events and auditing logs which are only accessible to them. Filters, keywords, and fuzzy and exact query are supported. + +**Visualized and Interactive**. KubeSphere presents users with a graphic web console, especially for the monitoring of different resources. They are displayed in highly interactive graphs that give users a clear view of what is happening inside a cluster. Resources at different levels can also be sorted based on their usage, which is convenient for users to compare for further data analysis. + +**Accurate**. The entire monitoring system functions at second-level precision that allow users to quickly locate any component failures. In terms of events and auditing, all activities are accurately recorded for future reference. + +For more information, see related sections in [Cluster Administration](../../cluster-administration/), [Project User Guide](../../project-user-guide/) and [Toolbox](../../toolbox/). + +### Automated DevOps + +Automation represents a key part of implementing DevOps. With automatic, streamlined pipelines in place, users are better positioned to distribute apps in terms of continuous delivery and integration. + +**Jenkins-powered**. The KubeSphere DevOps system is built with Jenkins as the engine, which is abundant in plugins. On top of that, Jenkins provides an enabling environment for extension development, making it possible for the DevOps team to work smoothly across the whole process (developing, testing, building, deploying, monitoring, logging, notifying, etc.) in a unified platform. The KubeSphere account can also be used for the built-in Jenkins, meeting the demand of enterprises for multi-tenant isolation of CI/CD pipelines and unified authentication. + +**Convenient built-in tools**. Users can easily take advantage of automation tools (for example, Binary-to-Image and Source-to-Image) even without a thorough understanding of how Docker or Kubernetes works. They only need to submit a registry address or upload binary files (for example, JAR/WAR/Binary). Ultimately, services will be released to Kubernetes automatically without any coding in a Dockerfile. + +For more information, see [DevOps User Guide](../../devops-user-guide/). + +### Fine-grained access control + +KubeSphere supports fine-grained access control across different levels, including clusters, workspaces and projects. Users with specific roles can operate on different resources. + +**Self-defined**. Apart from system roles, KubeSphere empowers users to define their roles with a spectrum of operations that they can assign to tenants. This meets the need of enterprises for detailed task allocation as they can decide who should be responsible for what while not being affected by irrelevant resources. + +**Secure**. As tenants at different levels are completely isolated from each other, they can share resources while not affecting one another. The network can also be completely isolated to ensure data security. + +For more information, see Role and Member Management in [Workspaces](../../workspace-administration/role-and-member-management/) and [Projects](../../project-administration/role-and-member-management/) respectively. + +### Out-of-box microservices governance + +On the back of Istio, KubeSphere features multiple grayscale strategies. All these features are out of the box, which means consistent user experiences without any code hacking. Here are two major advantages of microservices governance, or service mesh in KubeSphere: + +- **Comprehensive**. KubeSphere provides users with a well-diversified portfolio of solutions to traffic management, including canary release, blue-green deployment, traffic mirroring and circuit breaking. +- **Visualized**. With a highly interactive web console, KubeSphere allows users to view how microservices interconnect with each other in a straightforward way. This helps users to monitor apps, locate failures, and improve performance. + +KubeSphere aims to make service-to-service calls within the microservices architecture reliable and fast. For more information, see [Project User Guide](../../project-user-guide/). + +### Vibrant open source community + +As an open-source project, KubeSphere represents more than just a container platform for app deployment and distribution. The KubeSphere team believes that a true open-source model focuses more on sharing, discussions and problem solving with everyone involved. Together with partners, ambassadors and contributors, and other community members, the KubeSphere team files issues, submits pull requests, participates in meetups, and exchanges ideas of innovation. + +The KubeSphere community has the capabilities and technical know-how to help you share the benefits that the open-source model can offer. More importantly, it is home to open-source enthusiasts from around the world who make everything here possible. + +**Partners**. KubeSphere partners play a critical role in KubeSphere's go-to-market strategy. They can be app developers, technology companies, cloud providers or go-to-market partners, all of whom drive the community ahead in their respective aspects. + +**Ambassadors**. As community representatives, ambassadors promote KubeSphere in a variety of ways (for example, activities, blogs and user cases) so that more people can join the community. + +**Contributors**. KubeSphere contributors help the whole community by contributing to code or documentation. You don't need to be an expert while you can still make a difference even it is a minor code fix or language improvement. + +For more information, see [Partner Program](https://kubesphere.io/partner/) and [Community Governance](https://kubesphere.io/contribution/). \ No newline at end of file diff --git a/content/en/docs/v3.4/introduction/architecture.md b/content/en/docs/v3.4/introduction/architecture.md new file mode 100644 index 000000000..bc38abdfc --- /dev/null +++ b/content/en/docs/v3.4/introduction/architecture.md @@ -0,0 +1,44 @@ +--- +title: "Architecture" +keywords: "kubesphere, kubernetes, docker, helm, jenkins, istio, prometheus, devops, service mesh" +description: "KubeSphere architecture" + +linkTitle: "Architecture" +weight: 1500 +--- + +## Separation of frontend and backend + +KubeSphere separates [frontend](https://github.com/kubesphere/console) from [backend](https://github.com/kubesphere/kubesphere), and it itself is a cloud native application and provides open standard REST APIs for external systems to use. Please see [API documentation](../../reference/api-docs/) for details. The following figure is the system architecture. KubeSphere can run anywhere from on-premise datacenter to any cloud to edge. In addition, it can be deployed on any Kubernetes distribution. + +![Architecture](https://pek3b.qingstor.com/kubesphere-docs/png/20190810073322.png) + +## Components List + +| Back-end component | Function description | +|---|---| +| ks-apiserver | The KubeSphere API server validates and configures data for the API objects which include Kubernetes objects. The API Server services REST operations and provides the frontend to the cluster's shared state through which all other components interact. | +| ks-console | KubeSphere console offers KubeSphere console service | +| ks-controller-manager | KubeSphere controller takes care of business logic, for example, when create a workspace, the controller will automatically create corresponding permissions and configurations for it. | +| metrics-server | Kubernetes monitoring component collects metrics from Kubelet on each node. | +| Prometheus | provides monitoring metrics and services of clusters, nodes, workloads, API objects. | +| Elasticsearch | provides log indexing, querying and data management. Besides the built-in service, KubeSphere supports the integration of external Elasticsearch service. | +| Fluent Bit | collects logs and forwarding them to ElasticSearch or Kafka. | +| Jenkins | provides CI/CD pipeline service. | +| SonarQube | is an optional component that provides code static checking and quality analysis. | +| Source-to-Image | automatically compiles and packages source code into Docker image. | +| Istio | provides microservice governance and traffic control, such as grayscale release, canary release, circuit break, traffic mirroring and so on. | +| Jaeger | collects sidecar data and provides distributed tracing service. | +| OpenPitrix | provides application lifecycle management such as template management, deployment, app store management, etc. | +| Alert | provides configurable alert service for cluster, workload, Pod, and container etc. | +| Notification | is an integrated notification service; it currently supports mail delivery method. | +| Redis | caches the data of ks-console and ks-account. | +| MySQL | is the shared database for cluster back-end components including monitoring, alarm, DevOps, OpenPitrix etc. | +| PostgreSQL | SonarQube and Harbor's back-end database | +| OpenLDAP | is responsible for centralized storage and management of user account and integrates with external LDAP server. | +| Storage | built-in CSI plug-in collecting cloud platform storage services. It supports open source NFS/Ceph/Gluster client. | +| Network | supports Calico/Flannel and other open source network plug-ins to integrate with cloud platform SDN. | + +## Service Components + +Each component has many services. See [Overview](../../pluggable-components/overview/) for more details. diff --git a/content/en/docs/v3.4/introduction/ecosystem.md b/content/en/docs/v3.4/introduction/ecosystem.md new file mode 100644 index 000000000..0df39cb38 --- /dev/null +++ b/content/en/docs/v3.4/introduction/ecosystem.md @@ -0,0 +1,15 @@ +--- +title: "KubeSphere Ecosystem Tools" +keywords: 'Kubernetes, KubeSphere, ecosystem tools' +description: 'KubeSphere ecosystem tools' +linkTitle: "KubeSphere Ecosystem Tools" +weight: 1200 +--- + +## Abundant Ecosystem Tools + +KubeSphere integrates **a wide breadth of major ecosystem tools related to Kubernetes**, ranging from cloud-native apps to the underlying container runtimes. These open-source projects serve as the backend components of KubeSphere, which interact with the KubeSphere console through standard APIs, thus providing consistent user experiences to reduce complexity. + +KubeSphere also features new capabilities that are not yet available in upstream Kubernetes, alleviating the pain points of Kubernetes including storage, network, security and usability. Not only does KubeSphere allow developers and DevOps teams use their favorite tools in a unified console, but, most importantly, these functionalities are loosely coupled with the platform since they are pluggable and optional. + +![kubesphere-ecosystem](/images/docs/v3.3/introduction/kubesphere-ecosystem/kubesphere-ecosystem.png) \ No newline at end of file diff --git a/content/en/docs/v3.4/introduction/features.md b/content/en/docs/v3.4/introduction/features.md new file mode 100644 index 000000000..08ec50860 --- /dev/null +++ b/content/en/docs/v3.4/introduction/features.md @@ -0,0 +1,172 @@ +--- +title: "Features" +keywords: "KubeSphere, Kubernetes, Docker, Jenkins, Istio, Features" +description: "KubeSphere Key Features" + +linkTitle: "Features" +weight: 1300 +--- + +## Overview + +As an [open source container platform](https://kubesphere.io/), KubeSphere provides enterprises with a robust, secure and feature-rich platform, boasting the most common functionalities needed for enterprises adopting Kubernetes, such as multi-cluster deployment and management, network policy configuration, Service Mesh (Istio-based), DevOps projects (CI/CD), security management, Source-to-Image and Binary-to-Image, multi-tenant management, multi-dimensional monitoring, log query and collection, alerting and notification, auditing, application management, and image registry management. + +It also supports various open source storage and network solutions, as well as cloud storage services. For example, KubeSphere presents users with a powerful cloud-native tool [OpenELB](https://openelb.github.io/), a CNCF-certified load balancer developed for bare metal Kubernetes clusters. + +With an easy-to-use web console in place, KubeSphere eases the learning curve for users and drives the adoption of Kubernetes. + +![Overview](https://pek3b.qingstor.com/kubesphere-docs/png/20200202153355.png) + +The following modules elaborate on the key features and benefits provided by KubeSphere. For detailed information, see the respective chapter in this guide. + +## Provisioning and Maintaining Kubernetes + +### Provisioning Kubernetes Clusters + +[KubeKey](https://github.com/kubesphere/kubekey) allows you to deploy Kubernetes on your infrastructure out of box, provisioning Kubernetes clusters with high availability. It is recommended that at least three control plane nodes are configured behind a load balancer for production environment. + +### Kubernetes Resource Management + +KubeSphere provides a graphical web console, giving users a clear view of a variety of Kubernetes resources, including Pods and containers, clusters and nodes, workloads, secrets and ConfigMaps, services and Ingress, jobs and CronJobs, and applications. With wizard user interfaces, users can easily interact with these resources for service discovery, HPA, image management, scheduling, high availability implementation, container health check and more. + +As KubeSphere 3.3 features enhanced observability, users are able to keep track of resources from multi-tenant perspectives, such as custom monitoring, events, auditing logs, alerts and notifications. + +### Cluster Upgrade and Scaling + +The next-gen installer [KubeKey](https://github.com/kubesphere/kubekey) provides an easy way of installation, management and maintenance. Moreover, it supports rolling upgrades of Kubernetes clusters so that the cluster service is always available while being upgraded. Also, you can add new nodes to a Kubernetes cluster to include more workloads by using KubeKey. + +## Multi-cluster Management and Deployment + +As the IT world sees a growing number of cloud-native applications reshaping software portfolios for enterprises, users tend to deploy their clusters across locations, geographies, and clouds. Against this backdrop, KubeSphere has undergone a significant upgrade to address the pressing need of users with its brand-new multi-cluster feature. + +With KubeSphere, users can manage the infrastructure underneath, such as adding or deleting clusters. Heterogeneous clusters deployed on any infrastructure (for example, Amazon EKS and Google Kubernetes Engine) can be managed in a unified way. This is made possible by a central control plane of KubeSphere with two efficient management approaches available. + +- **Solo**. Independently deployed Kubernetes clusters can be maintained and managed together in KubeSphere container platform. +- **Federation**. Multiple Kubernetes clusters can be aggregated together as a Kubernetes resource pool. When users deploy applications, replicas can be deployed on different Kubernetes clusters in the pool. In this regard, high availability is achieved across zones and clusters. + +KubeSphere allows users to deploy applications across clusters. More importantly, an application can also be configured to run on a certain cluster. Besides, the multi-cluster feature, paired with [OpenPitrix](https://github.com/openpitrix/openpitrix), an industry-leading application management platform, enables users to manage apps across their whole lifecycle, including release, removal and distribution. + +For more information, see [Multi-cluster Management](../../multicluster-management/). + +## DevOps Support + +KubeSphere provides a pluggable DevOps component based on popular CI/CD tools such as Jenkins. It features automated workflows and tools including binary-to-image (B2I) and source-to-image (S2I) to package source code or binary artifacts into ready-to-run container images. + +![DevOps](https://pek3b.qingstor.com/kubesphere-docs/png/20200202220455.png) + +### CI/CD Pipeline + +- **Automation**. CI/CD pipelines and build strategies are based on Jenkins, streamlining and automating the development, test and production process. Dependency caches are used to accelerate build and deployment. +- **Out-of-box**. Users can ship their Jenkins build strategy and client plugin to create a Jenkins pipeline based on Git repository/SVN. They can define any step and stage in the built-in Jenkinsfile. Common agent types are embedded, such as Maven, Node.js and Go. Users can customize the agent type as well. +- **Visualization**. Users can easily interact with a visualized control panel to set conditions and manage CI/CD pipelines. +- **Quality Management**. Static code analysis is supported to detect bugs, code smells and security vulnerabilities. +- **Logs**. The entire running process of CI/CD pipelines is recorded. + +### Source-to-Image + +Source-to-Image (S2I) is a toolkit and automated workflow for building reproducible container images from source code. S2I produces ready-to-run images by injecting source code into a container image and making the container ready to execute from source code. + +S2I allows you to publish your service to Kubernetes without writing a Dockerfile. You just need to provide a source code repository address, and specify the target image registry. All configurations will be stored as different resources in Kubernetes. Your service will be automatically published to Kubernetes, and the image will be pushed to the target registry as well. + +![S2I](https://pek3b.qingstor.com/kubesphere-docs/png/20200204131749.png) + +### Binary-to-Image + +Similar to S2I, Binary-to-Image (B2I) is a toolkit and automated workflow for building reproducible container images from binary (for example, Jar, War, Binary package). + +You just need to upload your application binary package, and specify the image registry to which you want to push. The rest is exactly the same as S2I. + +For more information, see [DevOps User Guide](../../devops-user-guide/). + +## Istio-based Service Mesh + +KubeSphere service mesh is composed of a set of ecosystem projects, such as Istio, Envoy and Jaeger. We design a unified user interface to use and manage these tools. Most features are out-of-box and have been designed from the developer's perspective, which means KubeSphere can help you to reduce the learning curve since you do not need to deep dive into those tools individually. + +KubeSphere service mesh provides fine-grained traffic management, observability, tracing, and service identity and security management for a distributed application. Therefore, developers can focus on core business. With service mesh management of KubeSphere, users can better track, route and optimize communications within Kubernetes for cloud-native apps. + +### Traffic Management + +- **Canary release** represents an important deployment strategy of new versions for testing purposes. Traffic is separated with a pre-configured ratio into a canary release and a production release respectively. If everything goes well, users can change the percentage and gradually replace the old version with the new one. +- **Blue-green deployment** allows users to run two versions of an application at the same time. Blue stands for the current app version and green represents the new version tested for functionality and performance. Once the testing results are successful, application traffic is routed from the in-production version (blue) to the new one (green). +- **Traffic mirroring** enables teams to bring changes to production with as little risk as possible. Mirroring sends a copy of live traffic to a mirrored service. +- **Circuit breaker** allows users to set limits for calls to individual hosts within a service, such as the number of concurrent connections or how many times calls to this host have failed. + +For more information, see [Grayscale Release](../../project-user-guide/grayscale-release/overview/). + +### Visualization + +KubeSphere service mesh has the ability to visualize the connections between microservices and the topology of how they interconnect. In this regard, observability is extremely useful in understanding the interconnection of cloud-native microservices. + +### Distributed Tracing + +Based on Jaeger, KubeSphere service mesh enables users to track how services interact with each other. It helps users gain a deeper understanding of request latency, bottlenecks, serialization and parallelism via visualization. + +## Multi-tenant Management + +In KubeSphere, resources (for example, clusters) can be shared between tenants. First, administrators or managers need to set different account roles with different authorizations. After that, members in the platform can be assigned with these roles to perform specific actions on varied resources. Meanwhile, as KubeSphere completely isolates tenants, they will not affect each other at all. + +- **Multi-tenancy**. It provides role-based fine-grained authentication in a unified way and a three-tier authorization system. +- **Unified authentication**. For enterprises, KubeSphere is compatible with their central authentication system that is base on LDAP or AD protocol. Single sign-on (SSO) is also supported to achieve unified authentication of tenant identity. +- **Authorization system**. It is organized into three levels: cluster, workspace and project. KubeSphere ensures resources can be shared while different roles at multiple levels are completely isolated for resource security. + +For more information, see [Role and Member Management in Workspace](../../workspace-administration/role-and-member-management/). + +## Observability + +### Multi-dimensional Monitoring + +KubeSphere features a self-updating monitoring system with graphical interfaces that streamline the whole process of operation and maintenance. It provides customized monitoring of a variety of resources and includes a set of alerts that can immediately notify users of any occurring issues. + +- **Customized monitoring dashboard**. Users can decide exactly what metics need to be monitored in what kind of form. Different templates are available in KubeSphere for users to select, such as Elasticsearch, MySQL, and Redis. Alternatively, they can also create their own monitoring templates, including charts, colors, intervals and units. +- **O&M-friendly**. The monitoring system can be operated in a visualized interface with open standard APIs for enterprises to integrate their existing systems. Therefore, they can implement operation and maintenance in a unified way. +- **Third-party compatibility**. KubeSphere is compatible with Prometheus, which is the de facto metrics collection platform for monitoring in Kubernetes environments. Monitoring data can be seamlessly displayed in the web console of KubeSphere. + +- **Multi-dimensional monitoring at second-level precision**. + - For infrastructure monitoring, the system provides comprehensive metrics such as CPU utilization, memory utilization, CPU load average, disk usage, inode utilization, disk throughput, IOPS, network outbound/inbound rate, Pod status, etcd service status, and API Server status. + - For application resource monitoring, the system provides five key monitoring metrics: CPU utilization, memory consumption, Pod number, network outbound and inbound rate. Besides, users can sort data based on resource consumption and search metics by customizing the time range. In this way, occurring problems can be quickly located so that users can take necessary action. +- **Ranking**. Users can sort data by node, workspace and project, which gives them a graphical view of how their resources are running in a straightforward way. +- **Component monitoring**. It allows users to quickly locate any component failures to avoid unnecessary business downtime. + +### Alerting, Events, Auditing and Notifications + +- **Customized alerting policies and rules**. The alerting system is based on multi-tenant monitoring of multi-dimensional metrics. The system will send alerts related to a wide spectrum of resources such as pod, network and workload. In this regard, users can customize their own alerting policy by setting specific rules, such as repetition interval and time. The threshold and alerting level can also be defined by users themselves. +- **Accurate event tracking**. KubeSphere allows users to know what is happening inside a cluster, such as container running status (successful or failed), node scheduling, and image pulling result. They will be accurately recorded with the specific reason, status and message displayed in the web console. In a production environment, this will help users to respond to any issues in time. +- **Enhanced auditing security**. As KubeSphere features fine-grained management of user authorization, resources and network can be completely isolated to ensure data security. The comprehensive auditing feature allows users to search for activities related to any operation or alert. +- **Diversified notification methods**. Emails represent a key approach for users to receive notifications of relevant activities they want to know. They can be sent based on the rule set by users themselves, who are able to customize the sender email address and their receiver lists. Besides, other channels, such as Slack and WeChat, are also supported to meet the need of our users. In this connection, KubeSphere provides users with more notification preferences as they are updated on the latest development in KubeSphere no matter what channel they select. + +For more information, please see [Project User Guide](../../project-user-guide/). + +## Log Query and Collection + +- **Multi-tenant log management**. In KubeSphere log search system, different tenants can only see their own log information. Logs can be exported as records for future reference. +- **Multi-level log query**. Users can search for logs related to various resources, such as projects, workloads, and pods. Flexible and convenient log collection configuration options are available. +- **Multiple log collectors**. Users can choose log collectors such as Elasticsearch, Kafka, and Fluentd. +- **On-disk log collection**. For applications whose logs are saved in a Pod sidecar as a file, users can enable Disk Log Collection. + +## Application Management and Orchestration + +- **App Store**. KubeSphere provides an app store based on [OpenPitrix](https://github.com/openpitrix/openpitrix), an industry-leading open source system for app management across the whole lifecycle, including release, removal, and distribution. +- **App repository**. In KubeSphere, users can create an app repository hosted either in object storage (such as [QingStor](https://www.qingcloud.com/products/objectstorage/) or [AWS S3](https://aws.amazon.com/what-is-cloud-object-storage/)) or in [GitHub](https://github.com/). App packages submitted to the app repository are composed of Helm Chart template files of the app. +- **App template**. With app templates, KubeSphere provides a visualized way for app deployment with just one click. Internally, app templates can help different teams in the enterprise to share middleware and business systems. Externally, they can serve as an industry standard for application delivery based on different scenarios and needs. + +## Multiple Storage Solutions + +- Open source storage solutions are available such as GlusterFS, CephRBD, and NFS. +- NeonSAN CSI plugin connects to QingStor NeonSAN to meet core business requirements for low latency, high resilience, and high performance. +- QingCloud CSI plugin connects to various block storage services in QingCloud platform. + +## Multiple Network Solutions + +- Open source network solutions are available such as Calico and Flannel. + +- [OpenELB](https://github.com/kubesphere/openelb), a load balancer developed for bare metal Kubernetes clusters, is designed by KubeSphere development team. This CNCF-certified tool serves as an important solution for developers. It mainly features: + + 1. ECMP routing load balancing + 2. BGP dynamic routing configuration + 3. VIP management + 4. LoadBalancerIP assignment in Kubernetes services (v0.3.0) + 5. Installation with Helm Chart (v0.3.0) + 6. Dynamic BGP server configuration through CRD (v0.3.0) + 7. Dynamic BGP peer configuration through CRD (v0.3.0) + + For more information, please see [this article](https://kubesphere.io/conferences/porter/). diff --git a/content/en/docs/v3.4/introduction/scen b/content/en/docs/v3.4/introduction/scen new file mode 100644 index 000000000..e69de29bb diff --git a/content/en/docs/v3.4/introduction/scenarios.md b/content/en/docs/v3.4/introduction/scenarios.md new file mode 100644 index 000000000..931a39ab9 --- /dev/null +++ b/content/en/docs/v3.4/introduction/scenarios.md @@ -0,0 +1,105 @@ +--- +title: 'Use Cases' +keywords: 'KubeSphere, Kubernetes, Multi-cluster, Observability, DevOps' +description: 'Applicable in a variety of scenarios, KubeSphere provides enterprises with containerized environments with a complete set of features for management and operation.' + +weight: 1700 +--- + +KubeSphere is applicable in a variety of scenarios. For enterprises that deploy their business system on bare metal, their business modules are tightly coupled with each other. That means it is extremely difficult for resources to be horizontally scaled. In this connection, KubeSphere provides enterprises with containerized environments with a complete set of features for management and operation. It empowers enterprises to rise to the challenges in the middle of their digital transformation, including agile software development, automated operation and maintenance, microservices governance, traffic management, autoscaling, high availability, as well as DevOps and CI/CD. + +At the same time, with the strong support for network and storage offered by QingCloud, KubeSphere is highly compatible with the existing monitoring and O&M system of enterprises. This is how they can upgrade their system for IT containerization. + +## Multi-cluster Deployment + +It is generally believed that using as few clusters as possible can reduce costs with less pressure for O&M. That said, both individuals and organizations tend to deploy multiple clusters for various reasons. For instance, the majority of enterprises may deploy their services across clusters as they need to be tested in non-production environments. Another typical example is that enterprises may separate their services based on regions, departments, and infrastructure providers by adopting multiple clusters. + +The main reasons for employing this method fall into the following four categories: + +### High Availability + +Users can deploy workloads on multiple clusters by using a global VIP or DNS to send requests to corresponding backend clusters. When a cluster malfunctions or fails to handle requests, the VIP or DNS records can be transferred to a healthy cluster. + +![high-availability](https://ap3.qingstor.com/kubesphere-website/docs/ha.png) + +### Low Latency + +When clusters are deployed in various regions, user requests can be forwarded to the nearest cluster, greatly reducing network latency. For example, we have three Kubernetes clusters deployed in New York, Houston and Los Angeles respectively. For users in California, their requests can be forwarded to Los Angeles. This will reduce the network latency due to geographical distance, providing the best user experience possible for users in different areas. + +### Isolation + +**Failure Isolation**. Generally, it is much easier for multiple small clusters to isolate failures than a large cluster. In case of outages, network failures, insufficient resources or other possible resulting issues, the failure can be isolated within a certain cluster without spreading to others. + +**Business Isolation**. Although Kubernetes provides namespaces as a solution to app isolation, this method only represents the isolation in logic. This is because different namespaces are connected through the network, which means the issue of resource preemption still exists. To achieve further isolation, users need to create additional network isolation policies or set resource quotas. Using multiple clusters users can achieve complete physical isolation that is more secure and reliable than the isolation through namespaces. For example, this is extremely effective when different departments within an enterprise use multiple clusters for the deployment of development, testing or production environments. + +![pipeline](https://ap3.qingstor.com/kubesphere-website/docs/pipeline.png) + +### Avoid Vendor Lock-in + +Kubernetes has become the de facto standard in container orchestration. Against this backdrop, many enterprises avoid putting all eggs in one basket as they deploy clusters by using services of different cloud providers. That means they can transfer and scale their business anytime between clusters. However, it is not that easy for them to transfer their business in terms of costs, as different cloud providers feature varied Kubernetes services, including storage and network interface. + +KubeSphere provides its unique feature as a solution to the above four cases. Based on the Federation pattern of KubeSphere's multi-cluster feature, multiple heterogeneous Kubernetes clusters can be aggregated within a unified Kubernetes resource pool. When users deploy applications, they can decide to which Kubernetes cluster they want app replicas to be scheduled in the pool. The whole process is managed and maintained through KubeSphere. This is how KubeSphere helps users achieve multi-site high availability (across zones and clusters). + +For more information, see [Multi-cluster Management](../../multicluster-management/). + +## Full-stack Observability with Streamlined O&M + +Observability represents an important part in the work of Ops teams. In this regard, enterprises see increasing pressure on their Ops teams as they deploy their business on Kubernetes directly or on the platform of other cloud providers. This poses considerable challenges to Ops teams since they need to cope with extensive data. + +### Multi-dimensional Cluster Monitoring + +Again, the adoption of multi-cluster deployment across clouds is on the rise both among individuals and enterprises. However, because they run different services, users need to learn, deploy and especially, monitor across different cloud environments. After all, the tool provided by one cloud vendor for observability may not be applicable to another. In short, Ops teams are in desperate need of a unified view across different clouds for cluster monitoring covering metrics across the board. + +### Log Query + +A comprehensive monitoring feature is meaningless without a flexible log query system. This is because users need to be able to track all the information related to their resources, such as alerting messages, node scheduling status, app deployment success, or network policy modification. All these records play an important role in making sure users can keep up with the latest development, which will inform policy decisions of their business. + +### Customization + +Even for resource monitoring on the same platform, the tool provided by the cloud vendor may not be a panacea. In some cases, users need to create their own standard of observability, such as the specific monitoring metrics and display form. Moreover, they need to integrate common tools to the cloud for special use, such as Prometheus, which is the de facto standard for Kubernetes monitoring. In other words, customization has become a necessity in the industry as cloud-powered applications drive business on the one hand while requiring fine-grained monitoring on the other just in case of any failure. + +KubeSphere features a unified platform for the management of clusters deployed across cloud providers. Apps can be deployed automatically, streamlining the process of operation and maintenance. At the same time, KubeSphere boasts powerful observability features (alerting, events, auditing, logging and notifications) with a comprehensive customized monitoring system for a wide range of resources. Users themselves can decide what resources they want to monitor in what kind of forms. + +With KubeSphere, enterprises can focus more on business innovation as they are freed from complicated process of data collection and analysis. + +## Implement DevOps Practices + +DevOps represents an important set of practices or methods that engage both development and Ops teams for more coordinated and efficient cooperation between them. Therefore, development, test and release can be faster, more efficient and more reliable. CI/CD pipelines in KubeSphere provide enterprises with agile development and automated O&M. Besides, the microservices feature (service mesh) in KubeSphere enables enterprises to develop, test and release services in a fine-grained way, creating an enabling environment for their implementation of DevOps. With KubeSphere, enterprises can make full use of DevOps by: + +- Testing service robustness through fault injection without code hacking. +- Decoupling Kubernetes services with credential management and access control. +- Visualizing end-to-end monitoring process. + +## Service Mesh and Cloud-native Architecture + +Enterprises are now under increasing pressure to accelerate innovation amid their digital transformation. Specifically, they need to speed up in terms of development cycle, delivery time and deployment frequency. As application architectures evolve from monolithic to microservices, enterprises are faced with a multitude of resulting challenges. For example, microservices communicate with each other frequently, which entails smooth and stable network connectivity. Among others, latency represents a key factor that affects the entire architecture and user experience. In case of any failure, a troubleshooting and identifying system also needs to be in place to respond in time. Besides, deploying distributed applications is never an easy job without highly-functional tools and infrastructure. + +KubeSphere service mesh addresses a series of microservices use cases. + +### Multi-cloud App Distribution + +As mentioned above, it is not uncommon for individuals or organizations to deploy apps across Kubernetes clusters, whether on premises, public or hybrid. This may bring out significant challenges in unified traffic management, application and service scalability, DevOps pipeline automation, monitoring and so on. + +### Visualization + +As users deploy microservices which will communicate among themselves considerably, it will help users gain a better understanding of topological relations between microservices if the connection is highly visualized. Besides, distributed tracing is also essential for each service, providing operators with a detailed understanding of call flows and service dependencies within a mesh. + +### Rolling Updates + +When enterprises introduce a new version of a service, they may adopt a canary upgrade or blue-green deployment. The new one runs side by side with the old one and a set percentage of traffic is moved to the new service for error detection and latency monitoring. If everything works fine, the traffic to the new one will gradually increase until 100% of customers are using the new version. For this type of update, KubeSphere provides three kinds of categories of grayscale release: + +**Blue-green Deployment**. The blue-green release provides a zero downtime deployment, which means the new version can be deployed with the old one preserved. It enables both versions to run at the same time. If there is a problem with running, you can quickly roll back to the old version. + +**Canary Release**. This method brings part of the actual traffic into a new version to test its performance and reliability. It can help detect potential problems in the actual environment while not affecting the overall system stability. + +**Traffic Mirroring**. Traffic mirroring provides a more accurate way to test new versions as problems can be detected in advance while not affecting the production environment. + +With a lightweight, highly scalable microservices architecture offered by KubeSphere, enterprises are well-positioned to build their own cloud-native applications for the above scenarios. Based on Istio, a major solution to microservices, KubeSphere provides a platform for microservices governance without any hacking into code. Spring Cloud is also integrated for enterprises to build Java apps. KubeSphere also offers microservices upgrade consultations and technical support services, helping enterprises implement microservices architectures for their cloud-native transformation. + +## Bare Metal Deployment + +Sometimes, the cloud is not necessarily the ideal place for the deployment of resources. For example, physical, dedicated servers tend to function better when it comes to the cases that require considerable compute resources and high disk I/O. Besides, for some specialized workloads that are difficult to migrate to a cloud environment, certified hardware and complicated licensing and support agreements may be required. + +KubeSphere can help enterprises deploy a containerized architecture on bare metal, load balancing traffic with a physical switch. In this connection, [OpenELB](https://github.com/kubesphere/openelb), a CNCF-certified cloud-native tool is born for this end. At the same time, KubeSphere, together with QingCloud VPC and QingStor NeonSAN, provides users with a complete set of features ranging from load balancing, container platform building, network management, and storage. This means virtually all aspects of the containerized architecture can be fully controlled and uniformly managed, without sacrificing the performance in virtualization. + +For detailed information about how KubeSphere drives the development of numerous industries, please see [Case Studies](/case/). diff --git a/content/en/docs/v3.4/introduction/what's-new-in-3.3.md b/content/en/docs/v3.4/introduction/what's-new-in-3.3.md new file mode 100644 index 000000000..80c770c63 --- /dev/null +++ b/content/en/docs/v3.4/introduction/what's-new-in-3.3.md @@ -0,0 +1,13 @@ +--- +title: "What's New in 3.3" +keywords: 'Kubernetes, KubeSphere, new features' +description: "What's New in 3.3" +linkTitle: "What's New in 3.3" +weight: 1400 +--- + +In June 2022, KubeSphere 3.3 has been released with more exciting features. This release introduces GitOps-based continuous deployment and supports Git-based code repository management to further optimize the DevOps feature. Moreover, it also provides enhanced features of storage, multi-tenancy, multi-cluster, observability, app store, service mesh, and edge computing, to further perfect the interactive design for better user experience. + +If you want to know details about new feature of KubeSphere 3.3, you can read the article [KubeSphere 3.3.0: Embrace GitOps](/../../../news/kubesphere-3.3.0-ga-announcement/). + +In addition to the above highlights, this release also features other functionality upgrades and fixes the known bugs. There were some deprecated or removed features in 3.3. For more and detailed information, see the [Release Notes for 3.3.0](../../../v3.3/release/release-v330/), [Release Notes for 3.3.1](../../../v3.3/release/release-v331/), and [Release Notes for 3.3.2](../../../v3.3/release/release-v332/). \ No newline at end of file diff --git a/content/en/docs/v3.4/introduction/what-is-kubesphere.md b/content/en/docs/v3.4/introduction/what-is-kubesphere.md new file mode 100644 index 000000000..23438f716 --- /dev/null +++ b/content/en/docs/v3.4/introduction/what-is-kubesphere.md @@ -0,0 +1,39 @@ +--- +title: "What is KubeSphere" +keywords: 'Kubernetes, KubeSphere, Introduction' +description: 'What is KubeSphere' +linkTitle: "What is KubeSphere" +weight: 1100 +--- + +## Overview + +KubeSphere is a **distributed operating system for cloud-native application management**, using [Kubernetes](https://kubernetes.io) as its kernel. It provides a plug-and-play architecture, allowing third-party applications to be seamlessly integrated into its ecosystem. + +KubeSphere also represents a multi-tenant enterprise-grade [Kubernetes container platform](https://kubesphere.io) with full-stack automated IT operation and streamlined [DevOps workflows](https://kubesphere.io/devops/). It provides developer-friendly wizard web UI, helping enterprises to build out a robust and feature-rich platform. It boasts the most common functionalities needed for enterprise Kubernetes strategies, such as **Kubernetes resource management**, **DevOps (CI/CD)**, **application lifecycle management**, **monitoring**, **logging**, **service mesh**, **multi-tenancy**, **alerting and notification**, **auditing**, **storage and networking**, **autoscaling**, **access control**, **GPU support**, **multi-cluster deployment and management**, **network policy**, **registry management**, and **security management**. + +The KubeSphere team developed [KubeKey](https://github.com/kubesphere/kubekey), an open-source brand-new installer, to help enterprises quickly set up a Kubernetes cluster on public clouds or data centers. Users have the option to install Kubernetes only or install both KubeSphere and Kubernetes. KubeKey provides users with different installation options such as all-in-one installation and multi-node installation. It is also an efficient tool to install cloud-native add-ons, and upgrade and scale your Kubernetes cluster. + +![architecture-1](/images/docs/v3.3/introduction/what-is-kubesphere/architecture-1.png) + +## O&M Friendly + +KubeSphere hides the details of underlying infrastructure for users and helps enterprises modernize, migrate, deploy and manage existing and containerized apps seamlessly across a variety of infrastructure types. This is how KubeSphere empowers developers and Ops teams to focus on application development and accelerate DevOps automated workflows and delivery processes with enterprise-level observability and troubleshooting, unified monitoring and logging, centralized storage and networking management, easy-to-use CI/CD pipelines, and so on. + +## Run KubeSphere Everywhere + +As a lightweight platform, KubeSphere has become more friendly to different cloud ecosystems as it does not change Kubernetes itself at all. In other words, KubeSphere can be deployed **on any existing version-compatible Kubernetes cluster on any infrastructure** including virtual machine, bare metal, on-premises, public cloud and hybrid cloud. + +KubeSphere users have the choice of installing KubeSphere on clouds and container platforms, such as Alibaba Cloud, AWS, QingCloud, Tencent Cloud, Huawei Cloud and Rancher, and even importing and managing their existing Kubernetes clusters. + +The seamless integration of KubeSphere into existing Kubernetes platforms means that the business of users will not be affected, without any modification to their current resources or assets. For more information, see [Installing on Linux](../../installing-on-linux/) and [Installing on Kubernetes](../../installing-on-kubernetes/). + +## Open Source + +With the open-source model, the KubeSphere community advances development in an open way. KubeSphere is **100% open source** and available on [GitHub](https://github.com/kubesphere/) where you can find all the source code, documents and discussions. It has been widely installed and used in development, testing and production environments, and a large number of services are running smoothly in KubeSphere. For more information about all major open-source projects, see [Open Source Projects](/projects/). + +## Landscape + +KubeSphere is a member of CNCF and a [Kubernetes Conformance Certified platform](https://www.cncf.io/certification/software-conformance/#logos), further enriching [CNCF CLOUD NATIVE Landscape](https://landscape.cncf.io/?landscape=observability-and-analysis&license=apache-license-2-0). + +![cncf-landscape](/images/docs/v3.3/introduction/what-is-kubesphere/cncf-landscape.png) diff --git a/content/en/docs/v3.4/multicluster-management/_index.md b/content/en/docs/v3.4/multicluster-management/_index.md new file mode 100644 index 000000000..822984fa4 --- /dev/null +++ b/content/en/docs/v3.4/multicluster-management/_index.md @@ -0,0 +1,15 @@ +--- +title: "Multi-cluster Management" +description: "Import a hosted or on-premises Kubernetes cluster into KubeSphere" +layout: "second" + +linkTitle: "Multi-cluster Management" + +weight: 5000 + +icon: "/images/docs/v3.3/docs.svg" +--- + +## Introduction + +This chapter demonstrates how to use the multi-cluster feature of KubeSphere to import heterogeneous clusters for unified management. diff --git a/content/en/docs/v3.4/multicluster-management/enable-multicluster/_index.md b/content/en/docs/v3.4/multicluster-management/enable-multicluster/_index.md new file mode 100644 index 000000000..fe9a1388b --- /dev/null +++ b/content/en/docs/v3.4/multicluster-management/enable-multicluster/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Enable Multi-cluster Management in KubeSphere" +weight: 5200 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/multicluster-management/enable-multicluster/agent-connection.md b/content/en/docs/v3.4/multicluster-management/enable-multicluster/agent-connection.md new file mode 100644 index 000000000..66ce91eeb --- /dev/null +++ b/content/en/docs/v3.4/multicluster-management/enable-multicluster/agent-connection.md @@ -0,0 +1,270 @@ +--- +title: "Agent Connection" +keywords: 'Kubernetes, KubeSphere, multicluster, agent-connection' +description: 'Understand the general steps of importing clusters through agent connection.' +titleLink: "Agent Connection" +weight: 5220 +--- + +The component [Tower](https://github.com/kubesphere/tower) of KubeSphere is used for agent connection. Tower is a tool for network connection between clusters through the agent. If the host cluster cannot access the member cluster directly, you can expose the proxy service address of the host cluster. This enables the member cluster to connect to the host cluster through the agent. This method is applicable when the member cluster is in a private environment (for example, IDC) and the host cluster is able to expose the proxy service. The agent connection is also applicable when your clusters are distributed across different cloud providers. + +To use the multi-cluster feature using an agent, you must have at least two clusters serving as the host cluster and the member cluster respectively. A cluster can be defined as the host cluster or the member cluster either before or after you install KubeSphere. For more information about installing KubeSphere, refer to [Installing on Linux](../../../installing-on-linux/) and [Installing on Kubernetes](../../../installing-on-kubernetes/). + +## Video Demonstration + +{{< youtube JB_tsALgjaA >}} + +## Prepare a Host Cluster + +A host cluster provides you with the central control plane and you can only define one host cluster. + +{{< tabs >}} + +{{< tab "KubeSphere has been installed" >}} + +If you already have a standalone KubeSphere cluster installed, you can set the value of `clusterRole` to `host` by editing the cluster configuration. + +- Option A - Use the web console: + + Use the `admin` account to log in to the console and go to **CRDs** on the **Cluster Management** page. Enter the keyword `ClusterConfiguration` and go to its detail page. Edit the YAML of `ks-installer`, which is similar to [Enable Pluggable Components](../../../pluggable-components/). + +- Option B - Use Kubectl: + + ```shell + kubectl edit cc ks-installer -n kubesphere-system + ``` + +In the YAML file of `ks-installer`, navigate to `multicluster`, set the value of `clusterRole` to `host`, then click **OK** (if you use the web console) to make it effective: + +```yaml +multicluster: + clusterRole: host +``` + +To set the host cluster name, add a field `hostClusterName` under `multicluster.clusterRole` in the YAML file of `ks-installer`: + +```yaml +multicluster: + clusterRole: host + hostClusterName: +``` + +{{< notice note >}} + +- It is recommended that you set the host cluster name while you are preparing your host cluster. When your host cluster is set up and running with resources deployed, it is not recommended that you set the host cluster name. +- The host cluster name can contain only lowercase letters, numbers, hyphens (-), or periods (.), and must start and end with a lowercase letter or number. + +{{}} + +You need to wait for a while so that the change can take effect. + +{{}} + +{{< tab "KubeSphere has not been installed" >}} + +You can define a host cluster before you install KubeSphere either on Linux or on an existing Kubernetes cluster. If you want to [install KubeSphere on Linux](../../../installing-on-linux/introduction/multioverview/#1-create-an-example-configuration-file), you use a `config-sample.yaml` file. If you want to [install KubeSphere on an existing Kubernetes cluster](../../../installing-on-kubernetes/introduction/overview/#deploy-kubesphere), you use two YAML files, one of which is `cluster-configuration.yaml`. + +To set a host cluster, change the value of `clusterRole` to `host` in `config-sample.yaml` or `cluster-configuration.yaml` accordingly before you install KubeSphere. + +```yaml +multicluster: + clusterRole: host +``` + +To set the host cluster name, add a field `hostClusterName` under `multicluster.clusterRole` in `config-sample.yaml` or `cluster-configuration.yaml`: + +```yaml +multicluster: + clusterRole: host + hostClusterName: +``` + +{{< notice note >}} + +- The host cluster name can contain only lowercase letters, numbers, hyphens (-), or periods (.), and must start and end with a lowercase letter or number. + +{{}} + +{{< notice info >}} + +If you install KubeSphere on a single-node cluster ([All-in-One](../../../quick-start/all-in-one-on-linux/)), you do not need to create a `config-sample.yaml` file. In this case, you can set a host cluster after KubeSphere is installed. + +{{}} + +{{}} + +{{}} + +You can use **kubectl** to retrieve the installation logs to verify the status by running the following command. Wait for a while, and you will be able to see the successful log return if the host cluster is ready. + +```bash +kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f +``` + +## Set the Proxy Service Address + +After the installation of the host cluster, a proxy service called `tower` will be created in `kubesphere-system`, whose type is `LoadBalancer`. + +{{< tabs >}} + +{{< tab "A LoadBalancer available in your cluster" >}} + +If a LoadBalancer plugin is available for the cluster, you can see a corresponding address for `EXTERNAL-IP` of tower, which will be acquired by KubeSphere. In this case, the proxy service is set automatically. That means you can skip the step to set the proxy. Execute the following command to verify if you have a LoadBalancer. + +```bash +kubectl -n kubesphere-system get svc +``` + +The output is similar to this: + +```shell +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +tower LoadBalancer 10.233.63.191 139.198.110.23 8080:30721/TCP 16h +``` + +{{< notice note >}} + +Generally, there is always a LoadBalancer solution in the public cloud, and the external IP can be allocated by the load balancer automatically. If your clusters are running in an on-premises environment, especially a **bare metal environment**, you can use [OpenELB](https://github.com/kubesphere/openelb) as the LB solution. + +{{}} + +{{}} + +{{< tab "No LoadBalancer available in your cluster" >}} + + +1. Run the following command to check the service: + + ```shell + kubectl -n kubesphere-system get svc + ``` + + In this sample, `NodePort` is `30721`. + ``` + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + tower LoadBalancer 10.233.63.191 8080:30721/TCP 16h + ``` + +2. If `EXTERNAL-IP` is `pending`, you need to manually set the proxy address. For example, if your public IP address is `139.198.120.120`, you need to expose port (for example, `30721`) of this public IP address to `NodeIP`:`NodePort`. + +3. Add the value of `proxyPublishAddress` to the configuration file of `ks-installer` and provide the public IP address (`139.198.120.120` in this tutorial) and port number as follows. + + - Option A - Use the web console: + + Use the `admin` account to log in to the console and go to **CRDs** on the **Cluster Management** page. Enter the keyword `ClusterConfiguration` and go to its detail page. Edit the YAML of `ks-installer`, which is similar to [Enable Pluggable Components](../../../pluggable-components/). + + - Option B - Use Kubectl: + + ```bash + kubectl -n kubesphere-system edit clusterconfiguration ks-installer + ``` + + Navigate to `multicluster` and add a new line for `proxyPublishAddress` to define the IP address to access tower. + + ```yaml + multicluster: + clusterRole: host + proxyPublishAddress: http://139.198.120.120:{NodePort} # Add this line to set the address to access tower + ``` + + In the YAML file, you need to replace `NodePort` with the port ID you specified in Step 2. + +4. Save the configuration and wait for a while, or you can manually restart `ks-apiserver` to make the change effective immediately using the following command. + + ```shell + kubectl -n kubesphere-system rollout restart deployment ks-apiserver + ``` + +{{}} + +{{}} + +## Prepare a Member Cluster + +In order to manage the member cluster from the **host cluster**, you need to make `jwtSecret` the same between them. Therefore, get it first by excuting the following command on the **host cluster**. + +```bash +kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret +``` + +The output may look like this: + +```yaml +jwtSecret: "gfIwilcc0WjNGKJ5DLeksf2JKfcLgTZU" +``` + +{{< tabs >}} + +{{< tab "KubeSphere has been installed" >}} + +If you already have a standalone KubeSphere cluster installed, you can set the value of `clusterRole` to `member` by editing the cluster configuration. + +- Option A - Use the web console: + + Use the `admin` account to log in to the console and go to **CRDs** on the **Cluster Management** page. Enter the keyword `ClusterConfiguration` and go to its detail page. Edit the YAML of `ks-installer`, which is similar to [Enable Pluggable Components](../../../pluggable-components/). + +- Option B - Use Kubectl: + + ```shell + kubectl edit cc ks-installer -n kubesphere-system + ``` + +In the YAML file of `ks-installer`, enter the corresponding `jwtSecret` shown above: + +```yaml +authentication: + jwtSecret: gfIwilcc0WjNGKJ5DLeksf2JKfcLgTZU +``` + +Scroll down and set the value of `clusterRole` to `member`, then click **OK** (if you use the web console) to make it effective: + +```yaml +multicluster: + clusterRole: member +``` + +You need to wait for a while so that the change can take effect. + +{{}} + +{{< tab "KubeSphere has not been installed" >}} + +You can define a member cluster before you install KubeSphere either on Linux or on an existing Kubernetes cluster. If you want to [install KubeSphere on Linux](../../../installing-on-linux/introduction/multioverview/#1-create-an-example-configuration-file), you use a `config-sample.yaml` file. If you want to [install KubeSphere on an existing Kubernetes cluster](../../../installing-on-kubernetes/introduction/overview/#deploy-kubesphere), you use two YAML files, one of which is `cluster-configuration.yaml`. To set a member cluster, enter the value of `jwtSecret` shown above and change the value of `clusterRole` to `member` in `config-sample.yaml` or `cluster-configuration.yaml` accordingly before you install KubeSphere. + +```yaml +authentication: + jwtSecret: gfIwilcc0WjNGKJ5DLeksf2JKfcLgTZU +``` + +```yaml +multicluster: + clusterRole: member +``` + +{{< notice note >}} + +If you install KubeSphere on a single-node cluster ([All-in-One](../../../quick-start/all-in-one-on-linux/)), you do not need to create a `config-sample.yaml` file. In this case, you can set a member cluster after KubeSphere is installed. + +{{}} + +{{}} + +{{}} + +You can use **kubectl** to retrieve the installation logs to verify the status by running the following command. Wait for a while, and you will be able to see the successful log return if the member cluster is ready. + +```bash +kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f +``` + +## Import a Member Cluster + +1. Log in to the KubeSphere console as `admin` and click **Add Cluster** on the **Cluster Management** page. + +2. Enter the basic information of the cluster to be imported on the **Import Cluster** page. You can also click **Edit Mode** in the upper-right corner to view and edit the basic information in YAML format. After you finish editing, click **Next**. + +3. In **Connection Method**, select **Agent connection** and click **Create**. It will show the YAML configuration file for the agent Deployment generated by the host cluster on the console. + +4. Create an `agent.yaml` file on the member cluster based on the instruction, then copy and paste the agent deployment to the file. Execute `kubectl create -f agent.yaml` on the node and wait for the agent to be up and running. Please make sure the proxy address is accessible to the member cluster. + +5. You can see the cluster you have imported in the host cluster when the cluster agent is up and running. \ No newline at end of file diff --git a/content/en/docs/v3.4/multicluster-management/enable-multicluster/direct-connection.md b/content/en/docs/v3.4/multicluster-management/enable-multicluster/direct-connection.md new file mode 100644 index 000000000..56a3d47ff --- /dev/null +++ b/content/en/docs/v3.4/multicluster-management/enable-multicluster/direct-connection.md @@ -0,0 +1,196 @@ +--- +title: "Direct Connection" +keywords: 'Kubernetes, KubeSphere, multicluster, hybrid-cloud, direct-connection' +description: 'Understand the general steps of importing clusters through direct connection.' +titleLink: "Direct Connection" +weight: 5210 +--- + +If the kube-apiserver address of the member cluster is accessible on any node of the host cluster, you can adopt **Direction Connection**. This method is applicable when the kube-apiserver address of the member cluster can be exposed or host cluster and member cluster are in the same private network or subnet. + +To use the multi-cluster feature using direct connection, you must have at least two clusters serving as the host cluster and the member cluster respectively. A cluster can be defined as the host cluster or the member cluster either before or after you install KubeSphere. For more information about installing KubeSphere, refer to [Installing on Linux](../../../installing-on-linux/) and [Installing on Kubernetes](../../../installing-on-kubernetes/). + +## Video Demonstration + +{{< youtube i-yWU4izFPo >}} + +## Prepare a Host Cluster + +A host cluster provides you with the central control plane and you can only define one host cluster. + +{{< tabs >}} + +{{< tab "KubeSphere has been installed" >}} + +If you already have a standalone KubeSphere cluster installed, you can set the value of `clusterRole` to `host` by editing the cluster configuration. + +- Option A - Use the web console: + + Use the `admin` account to log in to the console and go to **CRDs** on the **Cluster Management** page. Enter the keyword `ClusterConfiguration` and go to its detail page. Edit the YAML of `ks-installer`, which is similar to [Enable Pluggable Components](../../../pluggable-components/). + +- Option B - Use Kubectl: + + ```shell + kubectl edit cc ks-installer -n kubesphere-system + ``` + +In the YAML file of `ks-installer`, navigate to `multicluster`, set the value of `clusterRole` to `host`, then click **OK** (if you use the web console) to make it effective: + +```yaml +multicluster: + clusterRole: host +``` + +To set the host cluster name, add a field `hostClusterName` under `multicluster.clusterRole` in the YAML file of `ks-installer`: + +```yaml +multicluster: + clusterRole: host + hostClusterName: +``` + +{{< notice note >}} + +- It is recommended that you set the host cluster name while you are preparing your host cluster. When your host cluster is set up and running with resources deployed, it is not recommended that you set the host cluster name. +- The host cluster name can contain only lowercase letters, numbers, hyphens (-), or periods (.), and must start and end with a lowercase letter or number. + +{{}} + +You need to wait for a while so that the change can take effect. + +{{}} + +{{< tab "KubeSphere has not been installed" >}} + +You can define a host cluster before you install KubeSphere either on Linux or on an existing Kubernetes cluster. If you want to [install KubeSphere on Linux](../../../installing-on-linux/introduction/multioverview/#1-create-an-example-configuration-file), you use a `config-sample.yaml` file. If you want to [install KubeSphere on an existing Kubernetes cluster](../../../installing-on-kubernetes/introduction/overview/#deploy-kubesphere), you use two YAML files, one of which is `cluster-configuration.yaml`. + +To set a host cluster, change the value of `clusterRole` to `host` in `config-sample.yaml` or `cluster-configuration.yaml` accordingly before you install KubeSphere. + +```yaml +multicluster: + clusterRole: host +``` + +To set the host cluster name, add a field `hostClusterName` under `multicluster.clusterRole` in `config-sample.yaml` or `cluster-configuration.yaml`: + +```yaml +multicluster: + clusterRole: host + hostClusterName: +``` + +{{< notice note >}} + +- The host cluster name can contain only lowercase letters, numbers, hyphens (-), or periods (.), and must start and end with a lowercase letter or number. + +{{}} + +{{< notice info >}} + +If you install KubeSphere on a single-node cluster ([All-in-One](../../../quick-start/all-in-one-on-linux/)), you do not need to create a `config-sample.yaml` file. In this case, you can set a host cluster after KubeSphere is installed. + +{{}} + +{{}} + +{{}} + +You can use **kubectl** to retrieve the installation logs to verify the status by running the following command. Wait for a while, and you will be able to see the successful log return if the host cluster is ready. + +```bash +kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f +``` + +## Prepare a Member Cluster + +In order to manage the member cluster from the **host cluster**, you need to make `jwtSecret` the same between them. Therefore, get it first by excuting the following command on the **host cluster**. + +```bash +kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret +``` + +The output may look like this: + +```yaml +jwtSecret: "gfIwilcc0WjNGKJ5DLeksf2JKfcLgTZU" +``` + +{{< tabs >}} + +{{< tab "KubeSphere has been installed" >}} + +If you already have a standalone KubeSphere cluster installed, you can set the value of `clusterRole` to `member` by editing the cluster configuration. + +- Option A - Use the web console: + + Use the `admin` account to log in to the console and go to **CRDs** on the **Cluster Management** page. Enter the keyword `ClusterConfiguration` and go to its detail page. Edit the YAML of `ks-installer`, which is similar to [Enable Pluggable Components](../../../pluggable-components/). + +- Option B - Use Kubectl: + + ```shell + kubectl edit cc ks-installer -n kubesphere-system + ``` + +In the YAML file of `ks-installer`, enter the corresponding `jwtSecret` shown above: + +```yaml +authentication: + jwtSecret: gfIwilcc0WjNGKJ5DLeksf2JKfcLgTZU +``` + +Scroll down and set the value of `clusterRole` to `member`, then click **OK** (if you use the web console) to make it effective: + +```yaml +multicluster: + clusterRole: member +``` + +You need to **wait for a while** so that the change can take effect. + +{{}} + +{{< tab "KubeSphere has not been installed" >}} + +You can define a member cluster before you install KubeSphere either on Linux or on an existing Kubernetes cluster. If you want to [install KubeSphere on Linux](../../../installing-on-linux/introduction/multioverview/#1-create-an-example-configuration-file), you use a `config-sample.yaml` file. If you want to [install KubeSphere on an existing Kubernetes cluster](../../../installing-on-kubernetes/introduction/overview/#deploy-kubesphere), you use two YAML files, one of which is `cluster-configuration.yaml`. To set a member cluster, enter the value of `jwtSecret` shown above and change the value of `clusterRole` to `member` in `config-sample.yaml` or `cluster-configuration.yaml` accordingly before you install KubeSphere. + +```yaml +authentication: + jwtSecret: gfIwilcc0WjNGKJ5DLeksf2JKfcLgTZU +``` + +```yaml +multicluster: + clusterRole: member +``` + +{{< notice note >}} + +If you install KubeSphere on a single-node cluster ([All-in-One](../../../quick-start/all-in-one-on-linux/)), you do not need to create a `config-sample.yaml` file. In this case, you can set a member cluster after KubeSphere is installed. + +{{}} + +{{}} + +{{}} + +You can use **kubectl** to retrieve the installation logs to verify the status by running the following command. Wait for a while, and you will be able to see the successful log return if the member cluster is ready. + +```bash +kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f +``` + +## Import a Member Cluster + +1. Log in to the KubeSphere console as `admin` and click **Add Cluster** on the **Cluster Management** page. + +2. Enter the basic information of the cluster to be imported on the **Import Cluster** page. You can also click **Edit Mode** in the upper-right corner to view and edit the basic information in YAML format. After you finish editing, click **Next**. + +3. In **Connection Method**, select **Direct connection**, and copy the kubeconfig of the member cluster and paste it into the box. You can also click **Edit Mode** in the upper-right corner to edit the kubeconfig of the member cluster in YAML format. + + {{< notice note >}} + +Make sure the `server` address in KubeConfig is accessible on any node of the host cluster. + + {{}} + +4. Click **Create** and wait for cluster initialization to finish. \ No newline at end of file diff --git a/content/en/docs/v3.4/multicluster-management/enable-multicluster/retrieve-kubeconfig.md b/content/en/docs/v3.4/multicluster-management/enable-multicluster/retrieve-kubeconfig.md new file mode 100644 index 000000000..ffda0b635 --- /dev/null +++ b/content/en/docs/v3.4/multicluster-management/enable-multicluster/retrieve-kubeconfig.md @@ -0,0 +1,43 @@ +--- +title: "Retrieve Kubeconfig" +keywords: 'Kubernetes, KubeSphere, multicluster, hybrid-cloud, kubeconfig' +description: 'Retrieve the Kubeconfig which is needed for cluster importing through direct connection.' +titleLink: "Retrieve KubeConfig" +weight: 5230 +--- + +You need to provide the kubeconfig of a member cluster if you import it using [direct connection](../direct-connection/). + +## Prerequisites + +You have a Kubernetes cluster. + +## Get KubeConfig + +Go to `$HOME/.kube`, and check the file in the directory where, normally, a file named `config` exists. Use the following command to retrieve the KubeConfig file: + +```bash +cat $HOME/.kube/config +``` + +```yaml +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJd01EZ3dPREE1hqaVE3NXhwbGFQNUgwSm5ySk5peTBacFh6QWxjYzZlV2JlaXJ1VgpUbmZUVjZRY3pxaVcrS3RBdFZVbkl4MCs2VTgzL3FiKzdINHk2RnA0aVhUaDJxRHJ6Qkd4dG1UeFlGdC9OaFZlCmhqMHhEbHVMOTVUWkRjOUNmSFgzdGZJeVh5WFR3eWpnQ2g1RldxbGwxVS9qVUo2RjBLVVExZ1pRTFp4TVJMV0MKREM2ZFhvUGlnQ3BNaVRPVXl5SVNhWUVjYVNBMEo5VWZmSGd4ditVcXVleTc0cEM2emszS0lOT2tGMkI1MllxeApUa09OT2VkV2hDUExMZkUveVJqeGw1aFhPL1Z4REFaVC9HQ1Y1a0JZN0toNmRhendmUllOa21IQkhDMD0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=hqaVE3NXhwbGFQNUgwSm5ySk5peTBacFh6QWxjYzZlV2JlaXJ1VgpUbmZUVjZRY3pxaVcrS3RBdFZVbkl4MCs2VTgzL3FiKzdINHk2RnA0aVhUaDJxRHJ6Qkd4dG1UeFlGdC9OaFZlCmhqMHhEbHVMOTVUWkRjOUNmSFgzdGZJeVh5WFR3eWpnQ2g1RldxbGwxVS9qVUo2RjBLVVExZ1pRTFp4TVJMV0MKREM2ZFhvUGlnQ3BNaVRPVXl5SVNhWUVjYVNBMEo5VWZmSGd4ditVcXVleTc0cEM2emszS0lOT2tGMkI1MllxeApUa09OT2VkV2hDUExMZkUveVJqeGw1aFhPL1Z4REFaVC9HQ1Y1a0JZN0toNmRhendmUllOa21IQkhDMD0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + server: https://lb.kubesphere.local:6443 + name: cluster.local +contexts: +- context: + cluster: cluster.local + user: kubernetes-admin + name: kubernetes-admin@cluster.local +current-context: kubernetes-admin@cluster.local +kind: Config +preferences: {} +users: +- name: kubernetes-admin + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM4akNDQWRxZ0F3SUJBZ0lJRzd5REpscVdjdTh3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TURBNE1EZ3dPVEkzTXpkYUZ3MHlNVEE0TURnd09USTNNemhhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnsOTJBUkJDNTRSR3BsZ3VmCmw5a0hPd0lEQVFBQm95Y3dKVEFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFEQ2FUTXNBR1Vhdnhrazg0NDZnOGNRQUJpSmk5RTZiREV5TwphRnJubC8reGRzRmgvOTFiMlNpM3ZwaHFkZ2k5bXRYWkhhaWI5dnQ3aXdtSEFwbGQxUkhBU25sMFoxWFh1dkhzCmMzcXVIU0puY3dmc3JKT0I4UG9NRjVnaG10a0dPV3g0M2RHTTNHQnpGTVJ4ZGcrNmttNjRNUGhneXl6NTJjYUoKbzhPajNja1Uzd1NWNkxvempRcFVaUnZHV25qQjEwUXFPWXBtQUk4VCtlZkxKZzhuY0drK3V3UUVTeXBYWExpYwoxWVQ2QkFJeFhEK2tUUU1hOFhjdUhHZzlWRkdsUm9yK1EvY3l0S3RDeHVncFlxQ2xvbHVpckFUUnpsemRXamxYCkVQaHVjRWs2UUdIZEpObjd0M2NwRGkzSUdYYXJFdGxQQmFwck9nSGpkOHZVOStpWXdoQT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=TJBUkJDNTRSR3BsZ3VmCmw5a0hPd0lEQVFBQm95Y3dKVEFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFEQ2FUTXNBR1Vhdnhrazg0NDZnOGNRQUJpSmk5RTZiREV5TwphRnJubC8reGRzRmgvOTFiMlNpM3ZwaHFkZ2k5bXRYWkhhaWI5dnQ3aXdtSEFwbGQxUkhBU25sMFoxWFh1dkhzCmMzcXVIU0puY3dmc3JKT0I4UG9NRjVnaG10a0dPV3g0M2RHTTNHQnpGTVJ4ZGcrNmttNjRNUGhneXl6NTJjYUoKbzhPajNja1Uzd1NWNkxvempRcFVaUnZHV25qQjEwUXFPWXBtQUk4VCtlZkxKZzhuY0drK3V3UUVTeXBYWExpYwoxWVQ2QkFJeFhEK2tUUU1hOFhjdUhHZzlWRkdsUm9yK1EvY3l0S3RDeHVncFlxQ2xvbHVpckFUUnpsemRXamxYCkVQaHVjRWs2UUdIZEpObjd0M2NwRGkzSUdYYXJFdGxQQmFwck9nSGpkOHZVOStpWXdoQT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBeXBLWkdtdmdiSHdNaU9pVU80UHZKZXB2MTJaaE1yRUIxK2xlVnM0dHIzMFNGQ0p1Ck8wc09jL2lUNmFuWEJzUU1XNDF6V3hwV1B5elkzWXlUWEJMTlIrM01pWTl2SFhUeWJ6eitTWnNlTzVENytHL3MKQnR5NkovNGpJb2pZZlRZNTFzUUxyRVJydStmVnNGeUU0U2dXbE1HYWdqV0RIMFltM0VJsOTJBUkJDNTRSR3BsZ3VmCmw5a0hPd0lEQVFBQm95Y3dKVEFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFEQ2FUTXNBR1Vhdnhrazg0NDZnOGNRQUJpSmk5RTZiREV5TwphRnJubC8reGRzRmgvOTFiMlNpM3ZwaHFkZ2k5bXRYWkhhaWI5dnQ3aXdtSEFwbGQxUkhBU25sMFoxWFh1dkhzCmMzcXVIU0puY3dmc3JKT0I4UG9NRjVnaG10a0dPV3g0M2RHTTNHQnpGTVJ4ZGcrNmttNjRNUGhneXl6NTJjYUoKbzhPajNja1Uzd1NWNkxvempRcFVaUnZHV25qQjEwUXFPWXBtQUk4VCtlZkxKZzhuY0drK3V3UUVTeXBYWExpYwoxWVQ2QkFJeFhEK2tUUU1hOFhjdUhHZzlWRkdsUm9yK1EvY3l0S3RDeHVncFlxQ2xvbHVpckFUUnpsemRXamxYCkVQaHVjRWs2UUdIZEpObjd0M2NwRGkzSUdYYXJFdGxQQmFwck9nSGpkOHZVOStpWXdoQT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=Ygo3THE3a2tBMURKNTBld2pMUTNTd1Yxd2p6N2ZjeDYvbzUwRnJnK083dEJMVVdQNTNHaDQ1VjJpUEp2NkdPYk1uCjhIWElmem83cW5XRFQvU20ybW5HbitUdVY4THdLVWFXL2wya3FkRUNnWUVBcS9zRmR1RDk2Z3VoT2ZaRnczcWMKblZGekNGQ3JsMkUvVkdYQy92SmV1WnJLQnFtSUtNZFI3ajdLWS9WRFVlMnJocVd6MFh2Wm9Sa1FoMkdwWkdIawpDd3NzcENKTVl4L0hETTVaWlBvcittb1J6VE5HNHlDNGhTRGJ2VEFaTmV1VTZTK1hzL1JSTDJ6WnUwemNQQXk1CjJJRVgwelFpZ1JzK3VzS3Jkc1FVZXZrQ2dZQUUrQUNWeDJnMC94bmFsMVFJNmJsK3Y2TDJrZVJtVGppcHB4Wm0KS1JEd2xnaXpsWGxsTjhyQmZwSGNiK1ZnZ282anN2eHFrb0pkTEhBLzFDME5IMWVuS1NoUTlpZVFpeWNsZngwdQpKOE1oeW1JM0RBZUg1REJyOG1rZ0pwNnJwUXNBc1paYmVhOHlLTzV5eVdCYTN6VGxOVnQvNDRibGg5alpnTWNMCjNyUXFVUUtCZ1FETVlXdEt2S0hOQllXV0p5enFERnFPbS9qY3Z3andvcURibUZVMlU3UGs2aUdNVldBV3VYZ3cKSm5qQWtES01GN0JXSnJRUjR6RHVoQlhvQVMxWVhiQ2lGd2hTcXVjWGhFSGlwQ3Nib0haVVRtT1pXUUh4Vlp4bQowU1NiRXFZU2MvZHBDZ1BHRk9IaW1FdUVic05kc2JjRmRETDQyODZHb0psQUxCOGc3VWRUZUE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= +``` diff --git a/content/en/docs/v3.4/multicluster-management/enable-multicluster/update-kubeconfig.md b/content/en/docs/v3.4/multicluster-management/enable-multicluster/update-kubeconfig.md new file mode 100644 index 000000000..47dec4e04 --- /dev/null +++ b/content/en/docs/v3.4/multicluster-management/enable-multicluster/update-kubeconfig.md @@ -0,0 +1,18 @@ +--- +title: "Update Kubeconfig" +keywords: 'Kubernetes, KubeSphere, Multi-cluster, kubeconfig' +description: 'Update the kubeconfig of member clusters.' +linkTitle: "Update Kubeconfig" +weight: 5240 +--- + +In multi-cluster environments, if the certificate of a member cluster is about to expire, the system will notify you 7 days before the expiry date. You can update the kubeconfig as follows. + +1. Choose **Platform > Cluster Management**. + +2. On the **Cluster Management** page, click icon on the right of the member cluster, and click **Update KubeConfig**. + +3. In the **Update KubeConfig** dialog box that is diaplayed, enter the new kubeconfig,and click **update**. + + + diff --git a/content/en/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/_index.md b/content/en/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/_index.md new file mode 100644 index 000000000..92ba09b39 --- /dev/null +++ b/content/en/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Import Cloud-hosted Kubernetes Clusters" +weight: 5300 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/import-aliyun-ack.md b/content/en/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/import-aliyun-ack.md new file mode 100644 index 000000000..abac113c6 --- /dev/null +++ b/content/en/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/import-aliyun-ack.md @@ -0,0 +1,70 @@ +--- +title: "Import an Alibaba Cloud Kubernetes (ACK) Cluster" +keywords: 'Kubernetes, KubeSphere, multicluster, ACK' +description: 'Learn how to import an Alibaba Cloud Kubernetes cluster.' +titleLink: "Import an Alibaba Cloud Kubernetes (ACK) Cluster" +weight: 5310 +--- + +This tutorial demonstrates how to import an Alibaba Cloud Kubernetes (ACK) cluster through the [direct connection](../../../multicluster-management/enable-multicluster/direct-connection/) method. If you want to use the agent connection method, refer to [Agent Connection](../../../multicluster-management/enable-multicluster/agent-connection/). + +## Prerequisites + +- You have a Kubernetes cluster with KubeSphere installed, and prepared this cluster as the host cluster. For more information about how to prepare a host cluster, refer to [Prepare a host cluster](../../../multicluster-management/enable-multicluster/direct-connection/#prepare-a-host-cluster). +- You have an ACK cluster with KubeSphere installed to be used as the member cluster. + +## Import an ACK Cluster + +### Step 1: Prepare the ACK Member Cluster + +1. In order to manage the member cluster from the host cluster, you need to make `jwtSecret` the same between them. Therefore, get it first by executing the following command on your host cluster. + + ```bash + kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret + ``` + + The output is similar to the following: + + ```yaml + jwtSecret: "QVguGh7qnURywHn2od9IiOX6X8f8wK8g" + ``` + +2. Log in to the KubeSphere console of the ACK cluster as `admin`. Click **Platform** in the upper-left corner and then select **Cluster Management**. + +3. Go to **CRDs**, enter `ClusterConfiguration` in the search bar, and then press **Enter** on your keyboard. Click **ClusterConfiguration** to go to its detail page. + +4. Click icon on the right and then select **Edit YAML** to edit `ks-installer`. + +5. In the YAML file of `ks-installer`, change the value of `jwtSecret` to the corresponding value shown above and set the value of `clusterRole` to `member`. Click **Update** to save your changes. + + ```yaml + authentication: + jwtSecret: QVguGh7qnURywHn2od9IiOX6X8f8wK8g + ``` + + ```yaml + multicluster: + clusterRole: member + ``` + + {{< notice note >}} + + Make sure you use the value of your own `jwtSecret`. You need to wait for a while so that the changes can take effect. + + {{}} + +### Step 2: Get the kubeconfig file + +Log in to the web console of Alibaba Cloud. Go to **Clusters** under **Container Service - Kubernetes**, click your cluster to go to its detail page, and then select the **Connection Information** tab. You can see the kubeconfig file under the **Public Access** tab. Copy the contents of the kubeconfig file. + +![kubeconfig](/images/docs/v3.3/multicluster-management/import-cloud-hosted-k8s/import-ack/kubeconfig.png) + +### Step 3: Import the ACK member cluster + +1. Log in to the KubeSphere console on your host cluster as `admin`. Click **Platform** in the upper-left corner and then select **Cluster Management**. On the **Cluster Management** page, click **Add Cluster**. + +2. Enter the basic information based on your needs and click **Next**. + +3. In **Connection Method**, select **Direct connection**. Fill in the kubeconfig file of the ACK member cluster and then click **Create**. + +4. Wait for cluster initialization to finish. \ No newline at end of file diff --git a/content/en/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/import-aws-eks.md b/content/en/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/import-aws-eks.md new file mode 100644 index 000000000..c1dc96bf9 --- /dev/null +++ b/content/en/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/import-aws-eks.md @@ -0,0 +1,171 @@ +--- +title: "Import an AWS EKS Cluster" +keywords: 'Kubernetes, KubeSphere, multicluster, Amazon EKS' +description: 'Learn how to import an Amazon Elastic Kubernetes Service cluster.' +titleLink: "Import an AWS EKS Cluster" +weight: 5320 +--- + +This tutorial demonstrates how to import an AWS EKS cluster through the [direct connection](../../../multicluster-management/enable-multicluster/direct-connection/) method. If you want to use the agent connection method, refer to [Agent Connection](../../../multicluster-management/enable-multicluster/agent-connection/). + +## Prerequisites + +- You have a Kubernetes cluster with KubeSphere installed, and prepared this cluster as the host cluster. For more information about how to prepare a host cluster, refer to [Prepare a host cluster](../../../multicluster-management/enable-multicluster/direct-connection/#prepare-a-host-cluster). +- You have an EKS cluster to be used as the member cluster. + +## Import an EKS Cluster + +### Step 1: Deploy KubeSphere on your EKS cluster + +You need to deploy KubeSphere on your EKS cluster first. For more information about how to deploy KubeSphere on EKS, refer to [Deploy KubeSphere on AWS EKS](../../../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks/#install-kubesphere-on-eks). + +### Step 2: Prepare the EKS member cluster + +1. In order to manage the member cluster from the host cluster, you need to make `jwtSecret` the same between them. Therefore, get it first by executing the following command on your host cluster. + + ```bash + kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret + ``` + + The output is similar to the following: + + ```yaml + jwtSecret: "QVguGh7qnURywHn2od9IiOX6X8f8wK8g" + ``` + +2. Log in to the KubeSphere console of the EKS cluster as `admin`. Click **Platform** in the upper-left corner and then select **Cluster Management**. + +3. Go to **CRDs**, enter `ClusterConfiguration` in the search bar, and then press **Enter** on your keyboard. Click **ClusterConfiguration** to go to its detail page. + +4. Click on the right and then select **Edit YAML** to edit `ks-installer`. + +5. In the YAML file of `ks-installer`, change the value of `jwtSecret` to the corresponding value shown above and set the value of `clusterRole` to `member`. Click **Update** to save your changes. + + ```yaml + authentication: + jwtSecret: QVguGh7qnURywHn2od9IiOX6X8f8wK8g + ``` + + ```yaml + multicluster: + clusterRole: member + ``` + + {{< notice note >}} + + Make sure you use the value of your own `jwtSecret`. You need to wait for a while so that the changes can take effect. + + {{}} + +### Step 3: Create a new kubeconfig file + +1. [Amazon EKS](https://docs.aws.amazon.com/eks/index.html) doesn’t provide a built-in kubeconfig file as a standard kubeadm cluster does. Nevertheless, you can create a kubeconfig file by referring to this [document](https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html). The generated kubeconfig file will be like the following: + + ```yaml + apiVersion: v1 + clusters: + - cluster: + server: + certificate-authority-data: + name: kubernetes + contexts: + - context: + cluster: kubernetes + user: aws + name: aws + current-context: aws + kind: Config + preferences: {} + users: + - name: aws + user: + exec: + apiVersion: client.authentication.k8s.io/v1alpha1 + command: aws + args: + - "eks" + - "get-token" + - "--cluster-name" + - "" + # - "--role" + # - "" + # env: + # - name: AWS_PROFILE + # value: "" + ``` + + However, this automatically generated kubeconfig file requires the command `aws` (aws CLI tools) to be installed on every computer that wants to use this kubeconfig. + +2. Run the following commands on your local computer to get the token of the ServiceAccount `kubesphere` created by KubeSphere. It has the cluster admin access to the cluster and will be used as the new kubeconfig token. + + ```bash + TOKEN=$(kubectl -n kubesphere-system get secret $(kubectl -n kubesphere-system get sa kubesphere -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 -d) + kubectl config set-credentials kubesphere --token=${TOKEN} + kubectl config set-context --current --user=kubesphere + ``` + +3. Retrieve the new kubeconfig file by running the following command: + + ```bash + cat ~/.kube/config + ``` + + The output is similar to the following and you can see that a new user `kubesphere` is inserted and set as the current-context user: + + ```yaml + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZ...S0tLQo= + server: https://*.sk1.cn-north-1.eks.amazonaws.com.cn + name: arn:aws-cn:eks:cn-north-1:660450875567:cluster/EKS-LUSLVMT6 + contexts: + - context: + cluster: arn:aws-cn:eks:cn-north-1:660450875567:cluster/EKS-LUSLVMT6 + user: kubesphere + name: arn:aws-cn:eks:cn-north-1:660450875567:cluster/EKS-LUSLVMT6 + current-context: arn:aws-cn:eks:cn-north-1:660450875567:cluster/EKS-LUSLVMT6 + kind: Config + preferences: {} + users: + - name: arn:aws-cn:eks:cn-north-1:660450875567:cluster/EKS-LUSLVMT6 + user: + exec: + apiVersion: client.authentication.k8s.io/v1alpha1 + args: + - --region + - cn-north-1 + - eks + - get-token + - --cluster-name + - EKS-LUSLVMT6 + command: aws + env: null + - name: kubesphere + user: + token: eyJhbGciOiJSUzI1NiIsImtpZCI6ImlCRHF4SlE5a0JFNDlSM2xKWnY1Vkt5NTJrcDNqRS1Ta25IYkg1akhNRmsifQ.eyJpc3M................9KQtFULW544G-FBwURd6ArjgQ3Ay6NHYWZe3gWCHLmag9gF-hnzxequ7oN0LiJrA-al1qGeQv-8eiOFqX3RPCQgbybmix8qw5U6f-Rwvb47-xA + ``` + + You can run the following command to check that the new kubeconfig does have access to the EKS cluster. + + ```shell + kubectl get nodes + ``` + + The output is simialr to this: + + ``` + NAME STATUS ROLES AGE VERSION + ip-10-0-47-38.cn-north-1.compute.internal Ready 11h v1.18.8-eks-7c9bda + ip-10-0-8-148.cn-north-1.compute.internal Ready 78m v1.18.8-eks-7c9bda + ``` + +### Step 4: Import the EKS member cluster + +1. Log in to the KubeSphere console on your host cluster as `admin`. Click **Platform** in the upper-left corner and then select **Cluster Management**. On the **Cluster Management** page, click **Add Cluster**. + +2. Enter the basic information based on your needs and click **Next**. + +3. In **Connection Method**, select **Direct connection**. Fill in the new kubeconfig file of the EKS member cluster and then click **Create**. + +4. Wait for cluster initialization to finish. \ No newline at end of file diff --git a/content/en/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/import-gke.md b/content/en/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/import-gke.md new file mode 100644 index 000000000..cae855811 --- /dev/null +++ b/content/en/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/import-gke.md @@ -0,0 +1,116 @@ +--- +title: "Import a Google GKE Cluster" +keywords: 'Kubernetes, KubeSphere, multicluster, Google GKE' +description: 'Learn how to import a Google Kubernetes Engine cluster.' +titleLink: "Import a Google GKE Cluster" +weight: 5330 +--- + +This tutorial demonstrates how to import a GKE cluster through the [direct connection](../../../multicluster-management/enable-multicluster/direct-connection/) method. If you want to use the agent connection method, refer to [Agent Connection](../../../multicluster-management/enable-multicluster/agent-connection/). + +## Prerequisites + +- You have a Kubernetes cluster with KubeSphere installed, and prepared this cluster as the host cluster. For more information about how to prepare a host cluster, refer to [Prepare a host cluster](../../../multicluster-management/enable-multicluster/direct-connection/#prepare-a-host-cluster). +- You have a GKE cluster to be used as the member cluster. + +## Import a GKE Cluster + +### Step 1: Deploy KubeSphere on your GKE cluster + +You need to deploy KubeSphere on your GKE cluster first. For more information about how to deploy KubeSphere on GKE, refer to [Deploy KubeSphere on GKE](../../../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke/). + +### Step 2: Prepare the GKE member cluster + +1. To manage the member cluster from the host cluster, you need to make `jwtSecret` the same between them. Therefore, get it first by executing the following command on your host cluster. + + ```bash + kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret + ``` + + The output is similar to the following: + + ```yaml + jwtSecret: "QVguGh7qnURywHn2od9IiOX6X8f8wK8g" + ``` + +2. Log in to the KubeSphere console on GKE as `admin`. Click **Platform** in the upper-left corner and then select **Cluster Management**. + +3. Go to **CRDs**, enter `ClusterConfiguration` in the search bar, and then press **Enter** on your keyboard. Click **ClusterConfiguration** to go to its detail page. + +4. Click icon on the right and then select **Edit YAML** to edit `ks-installer`. + +5. In the YAML file of `ks-installer`, change the value of `jwtSecret` to the corresponding value shown above and set the value of `clusterRole` to `member`. + + ```yaml + authentication: + jwtSecret: QVguGh7qnURywHn2od9IiOX6X8f8wK8g + ``` + + ```yaml + multicluster: + clusterRole: member + ``` + + {{< notice note >}} + + Make sure you use the value of your own `jwtSecret`. You need to wait for a while so that the changes can take effect. + + {{}} + +### Step 3: Create a new kubeconfig file + +1. Run the following commands on your GKE Cloud Shell Terminal: + + ```bash + TOKEN=$(kubectl -n kubesphere-system get secret $(kubectl -n kubesphere-system get sa kubesphere -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 -d) + kubectl config set-credentials kubesphere --token=${TOKEN} + kubectl config set-context --current --user=kubesphere + ``` + +2. Retrieve the new kubeconfig file by running the following command: + + ```bash + cat ~/.kube/config + ``` + + The output is similar to the following: + + ```yaml + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURLekNDQWhPZ0F3SUJBZ0lSQUtPRUlDeFhyWEdSbjVQS0dlRXNkYzR3RFFZSktvWklodmNOQVFFTEJRQXcKTHpFdE1Dc0dBMVVFQXhNa1pqVTBNVFpoTlRVdFpEZzFZaTAwWkdZNUxXSTVNR1V0TkdNeE0yRTBPR1ZpWW1VMwpNQjRYRFRJeE1ETXhNVEl5TXpBMU0xb1hEVEkyTURNeE1ESXpNekExTTFvd0x6RXRNQ3NHQTFVRUF4TWtaalUwCk1UWmhOVFV0WkRnMVlpMDBaR1k1TFdJNU1HVXROR014TTJFME9HVmlZbVUzTUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdkVHVGtKRjZLVEl3QktlbXNYd3dPSnhtU3RrMDlKdXh4Z1grM0dTMwpoeThVQm5RWEo1d3VIZmFGNHNWcDFzdGZEV2JOZitESHNxaC9MV3RxQk5iSlNCU1ppTC96V3V5OUZNeFZMS2czCjVLdnNnM2drdUpVaFVuK0tMUUFPdTNUWHFaZ2tTejE1SzFOSU9qYm1HZGVWSm5KQTd6NTF2ZkJTTStzQWhGWTgKejJPUHo4aCtqTlJseDAvV0UzTHZEUUMvSkV4WnRCRGFuVFU0anpHMHR2NGk1OVVQN2lWbnlwRHk0dkFkWm5mbgowZncwVnplUXJqT2JuQjdYQTZuUFhseXZubzErclRqakFIMUdtU053c1IwcDRzcEViZ0lXQTNhMmJzeUN5dEJsCjVOdmJKZkVpSTFoTmFOZ3hoSDJNenlOUWVhYXZVa29MdDdPN0xqYzVFWlo4cFFJREFRQUJvMEl3UURBT0JnTlYKSFE4QkFmOEVCQU1DQWdRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVUVyVkJrc3MydGV0Qgp6ZWhoRi92bGdVMlJiM2N3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUdEZVBVa3I1bDB2OTlyMHZsKy9WZjYrCitBanVNNFoyOURtVXFHVC80OHBaR1RoaDlsZDQxUGZKNjl4eXFvME1wUlIyYmJuTTRCL2NVT1VlTE5VMlV4VWUKSGRlYk1oQUp4Qy9Uaks2SHpmeExkTVdzbzVSeVAydWZEOFZob2ZaQnlBVWczajdrTFgyRGNPd1lzNXNrenZ0LwpuVUlhQURLaXhtcFlSSWJ6MUxjQmVHbWROZ21iZ0hTa3MrYUxUTE5NdDhDQTBnSExhMER6ODhYR1psSi80VmJzCjNaWVVXMVExY01IUHd5NnAwV2kwQkpQeXNaV3hZdFJyV3JFWUhZNVZIanZhUG90S3J4Y2NQMUlrNGJzVU1ZZ0wKaTdSaHlYdmJHc0pKK1lNc3hmalU5bm5XYVhLdXM5ZHl0WG1kRGw1R0hNU3VOeTdKYjIwcU5RQkxhWHFkVmY0PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://130.211.231.87 + name: gke_grand-icon-307205_us-central1-c_cluster-3 + contexts: + - context: + cluster: gke_grand-icon-307205_us-central1-c_cluster-3 + user: gke_grand-icon-307205_us-central1-c_cluster-3 + name: gke_grand-icon-307205_us-central1-c_cluster-3 + current-context: gke_grand-icon-307205_us-central1-c_cluster-3 + kind: Config + preferences: {} + users: + - name: gke_grand-icon-307205_us-central1-c_cluster-3 + user: + auth-provider: + config: + cmd-args: config config-helper --format=json + cmd-path: /usr/lib/google-cloud-sdk/bin/gcloud + expiry-key: '{.credential.token_expiry}' + token-key: '{.credential.access_token}' + name: gcp + - name: kubesphere + user: + token: eyJhbGciOiJSUzI1NiIsImtpZCI6InNjOFpIb3RrY3U3bGNRSV9NWV8tSlJzUHJ4Y2xnMDZpY3hhc1BoVy0xTGsifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlc3BoZXJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlc3BoZXJlLXRva2VuLXpocmJ3Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6Imt1YmVzcGhlcmUiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiIyMGFmZGI1Ny01MTBkLTRjZDgtYTAwYS1hNDQzYTViNGM0M2MiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZXNwaGVyZS1zeXN0ZW06a3ViZXNwaGVyZSJ9.ic6LaS5rEQ4tXt_lwp7U_C8rioweP-ZdDjlIZq91GOw9d6s5htqSMQfTeVlwTl2Bv04w3M3_pCkvRzMD0lHg3mkhhhP_4VU0LIo4XeYWKvWRoPR2kymLyskAB2Khg29qIPh5ipsOmGL9VOzD52O2eLtt_c6tn-vUDmI_Zw985zH3DHwUYhppGM8uNovHawr8nwZoem27XtxqyBkqXGDD38WANizyvnPBI845YqfYPY5PINPYc9bQBFfgCovqMZajwwhcvPqS6IpG1Qv8TX2lpuJIK0LLjiKaHoATGvHLHdAZxe_zgAC2cT_9Ars3HIN4vzaSX0f-xP--AcRgKVSY9g + ``` + +### Step 4: Import the GKE member cluster + +1. Log in to the KubeSphere console on your host cluster as `admin`. Click **Platform** in the upper-left corner and then select **Cluster Management**. On the **Cluster Management** page, click **Add Cluster**. + +2. Enter the basic information based on your needs and click **Next**. + +3. In **Connection Method**, select **Direct connection**. Fill in the new kubeconfig file of the GKE member cluster and then click **Create**. + +4. Wait for cluster initialization to finish. \ No newline at end of file diff --git a/content/en/docs/v3.4/multicluster-management/introduction/_index.md b/content/en/docs/v3.4/multicluster-management/introduction/_index.md new file mode 100644 index 000000000..0b97cbae9 --- /dev/null +++ b/content/en/docs/v3.4/multicluster-management/introduction/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Introduction" +weight: 5100 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/multicluster-management/introduction/kubefed-in-kubesphere.md b/content/en/docs/v3.4/multicluster-management/introduction/kubefed-in-kubesphere.md new file mode 100644 index 000000000..d687f98ec --- /dev/null +++ b/content/en/docs/v3.4/multicluster-management/introduction/kubefed-in-kubesphere.md @@ -0,0 +1,49 @@ +--- +title: "KubeSphere Federation" +keywords: "Kubernetes, KubeSphere, federation, multicluster, hybrid-cloud" +description: "Understand the fundamental concept of Kubernetes federation in KubeSphere, including member clusters and host clusters." +linkTitle: "KubeSphere Federation" +weight: 5120 +--- + +The multi-cluster feature relates to the network connection among multiple clusters. Therefore, it is important to understand the topological relations of clusters. + +## How the Multi-cluster Architecture Works + +Before you use the central control plane of KubeSphere to manage multiple clusters, you need to create a host cluster, also known as **host** cluster. The host cluster, essentially, is a KubeSphere cluster with the multi-cluster feature enabled. It provides you with the control plane for unified management of member clusters, also known as **member** cluster. Member clusters are common KubeSphere clusters without the central control plane. Namely, tenants with necessary permissions (usually cluster administrators) can access the control plane from the host cluster to manage all member clusters, such as viewing and editing resources on member clusters. Conversely, if you access the web console of any member cluster separately, you cannot see any resources on other clusters. + +There can only be one host cluster while multiple member clusters can exist at the same time. In a multi-cluster architecture, the network between the host cluster and member clusters can be [connected directly](../../enable-multicluster/direct-connection/) or [through an agent](../../enable-multicluster/agent-connection/). The network between member clusters can be set in a completely isolated environment. + +If you are using on-premises Kubernetes clusters built through kubeadm, install KubeSphere on your Kubernetes clusters by referring to [Air-gapped Installation on Kubernetes](../../../installing-on-kubernetes/on-prem-kubernetes/install-ks-on-linux-airgapped/), and then enable KubeSphere multi-cluster management through direct connection or agent connection. + +![kubesphere-federation](/images/docs/v3.3/multicluster-management/introduction/kubesphere-federation/kubesphere-federation.png) + +## Vendor Agnostic + +KubeSphere features a powerful, inclusive central control plane so that you can manage any KubeSphere clusters in a unified way regardless of deployment environments or cloud providers. + +## Resource Requirements + +Before you enable multi-cluster management, make sure you have enough resources in your environment. + +| Namespace | kube-federation-system | kubesphere-system | +| -------------- | ---------------------- | ----------------- | +| Sub-component | 2 x controller-manager | tower | +| CPU Request | 100 m | 100 m | +| CPU Limit | 500 m | 500 m | +| Memory Request | 64 MiB | 128 MiB | +| Memory Limit | 512 MiB | 256 MiB | +| Installation | Optional | Optional | + +{{< notice note >}} + +- The request and limit of CPU and memory resources all refer to single replica. +- After the multi-cluster feature is enabled, tower and controller-manager will be installed on the host cluster. If you use [agent connection](../../../multicluster-management/enable-multicluster/agent-connection/), only tower is needed for member clusters. If you use [direct connection](../../../multicluster-management/enable-multicluster/direct-connection/), no additional component is needed for member clusters. + +{{}} + +## Use the App Store in a Multi-cluster Architecture + +Different from other components in KubeSphere, the [KubeSphere App Store](../../../pluggable-components/app-store/) serves as a global application pool for all clusters, including host cluster and member clusters. You only need to enable the App Store on the host cluster and you can use functions related to the App Store on member clusters directly (no matter whether the App Store is enabled on member clusters or not), such as [app templates](../../../project-user-guide/application/app-template/) and [app repositories](../../../workspace-administration/app-repository/import-helm-repository/). + +However, if you only enable the App Store on member clusters without enabling it on the host cluster, you will not be able to use the App Store on any cluster in the multi-cluster architecture. diff --git a/content/en/docs/v3.4/multicluster-management/introduction/overview.md b/content/en/docs/v3.4/multicluster-management/introduction/overview.md new file mode 100644 index 000000000..8568c836e --- /dev/null +++ b/content/en/docs/v3.4/multicluster-management/introduction/overview.md @@ -0,0 +1,15 @@ +--- +title: "Kubernetes Multi-Cluster Management — Overview" +keywords: 'Kubernetes, KubeSphere, multicluster, hybrid-cloud' +description: 'Gain a basic understanding of multi-cluster management, such as its common use cases, and the benefits that KubeSphere can bring with its multi-cluster feature.' +linkTitle: "Overview" +weight: 5110 +--- + +Today, it's very common for organizations to run and manage multiple Kubernetes clusters across different cloud providers or infrastructures. As each Kubernetes cluster is a relatively self-contained unit, the upstream community is struggling to research and develop a multi-cluster management solution. That said, Kubernetes Cluster Federation ([KubeFed](https://github.com/kubernetes-sigs/kubefed) for short) may be a possible approach among others. + +The most common use cases of multi-cluster management include service traffic load balancing, development and production isolation, decoupling of data processing and data storage, cross-cloud backup and disaster recovery, flexible allocation of computing resources, low latency access with cross-region services, and vendor lock-in avoidance. + +KubeSphere is developed to address multi-cluster and multi-cloud management challenges, including the scenarios mentioned above. It provides users with a unified control plane to distribute applications and its replicas to multiple clusters from public cloud to on-premises environments. KubeSphere also boasts rich observability across multiple clusters including centralized monitoring, logging, events, and auditing logs. + +![multi-cluster-overview](/images/docs/v3.3/multicluster-management/introduction/overview/multi-cluster-overview.jpg) diff --git a/content/en/docs/v3.4/multicluster-management/unbind-cluster.md b/content/en/docs/v3.4/multicluster-management/unbind-cluster.md new file mode 100644 index 000000000..e6dc92b65 --- /dev/null +++ b/content/en/docs/v3.4/multicluster-management/unbind-cluster.md @@ -0,0 +1,61 @@ +--- +title: "Remove a Member Cluster" +keywords: 'Kubernetes, KubeSphere, multicluster, hybrid-cloud' +description: 'Learn how to remove a member cluster from your cluster pool in KubeSphere.' +linkTitle: "Remove a Member Cluster" +weight: 5500 +--- + +This tutorial demonstrates how to remove a member cluster on the KubeSphere web console. + +## Prerequisites + +- You have enabled multi-cluster management. +- You need a user granted a role including the authorization of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the authorization and assign it to a user. + +## Remove a Cluster + +You can remove a cluster by using either of the following methods: + +**Method 1** + +1. Click **Platform** in the upper-left corner and select **Cluster Management**. + +2. In the **Member Clusters** area, click icon on the right of the the cluster that you want to remove from the control plane, and then click **Remove Cluster**. + +3. In the **Remove Cluster** dialog box that is displayed, read the risk alert carefully. If you still want to proceed, enter the name of the member cluster, and click **OK**. + +**Method 2** + +1. Click **Platform** in the upper-left corner and select **Cluster Management**. + +2. In the **Member Clusters** area, click the name of the member cluster that you want to remove from the control plane. + +3. In the navigation tree on the left, select **Cluster Settings** > **Basic Information**. + +4. In the **Cluster Information** area, click **Manage** > **Remove Cluster**. + +5. In the **Remove Cluster** dialog box that is displayed, read the risk alert carefully. If you still want to proceed, enter the name of the member cluster, and click **OK**. + + {{< notice warning >}} + + * After the member cluster has been removed, existing resources of the removed member cluster will not be automatically cleaned up. + + * After the member cluster has been removed, multi-cluster configuration data of the removed member cluster will not be automatically cleaned up, which results in data loss when you uninstall KubeSphere or delete associated resources. + + {{}} + +6. Run the following command to clean up multi-cluster configuration data of the removed member cluster: + + ```bash + for ns in $(kubectl get ns --field-selector status.phase!=Terminating -o jsonpath='{.items[*].metadata.name}'); do kubectl label ns $ns kubesphere.io/workspace- && kubectl patch ns $ns --type merge -p '{"metadata":{"ownerReferences":[]}}'; done + ``` + +## Remove an Unhealthy Cluster + +On some occasions, you cannot remove a cluster by following the steps above. For example, you import a cluster with the wrong credentials, and you cannot access **Cluster Settings**. In this case, execute the following command to remove an unhealthy cluster: + +```bash +kubectl delete cluster +``` + diff --git a/content/en/docs/v3.4/pluggable-components/_index.md b/content/en/docs/v3.4/pluggable-components/_index.md new file mode 100644 index 000000000..f06962770 --- /dev/null +++ b/content/en/docs/v3.4/pluggable-components/_index.md @@ -0,0 +1,13 @@ +--- +title: "Enable Pluggable Components" +description: "Enable KubeSphere Pluggable Components" +layout: "second" + +linkTitle: "Enable Pluggable Components" + +weight: 6000 + +icon: "/images/docs/v3.3/docs.svg" +--- + +This chapter demonstrates detailed steps of enabling different components in KubeSphere both before and after installation so that you can take full advantage of the [container platform](https://kubesphere.io/) for your business. diff --git a/content/en/docs/v3.4/pluggable-components/alerting.md b/content/en/docs/v3.4/pluggable-components/alerting.md new file mode 100644 index 000000000..8495ebbe4 --- /dev/null +++ b/content/en/docs/v3.4/pluggable-components/alerting.md @@ -0,0 +1,100 @@ +--- +title: "KubeSphere Alerting" +keywords: "Kubernetes, alertmanager, KubeSphere, alerting" +description: "Learn how to enable Alerting to identify any potential issues in advance before they take a toll on your business." +linkTitle: "KubeSphere Alerting" +weight: 6600 +--- + +Alerting is an important building block of observability, closely related to monitoring and logging. The alerting system in KubeSphere, coupled with the proactive failure notification system, allows users to know activities of interest based on alerting policies. When a predefined threshold of a certain metric is reached, an alert will be sent to preconfigured recipients. Therefore, you need to configure the notification method beforehand, including Email, Slack, DingTalk, WeCom, and Webhook. With a highly functional alerting and notification system in place, you can quickly identify and resolve potential issues in advance before they affect your business. + +## Enable Alerting Before Installation + +### Installing on Linux + +When you implement multi-node installation of KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components. + +1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file `config-sample.yaml`. Modify the file by executing the following command: + + ```bash + vi config-sample.yaml + ``` + + {{< notice note >}} +If you adopt [All-in-One Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a `config-sample.yaml` file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable Alerting in this mode (for example, for testing purposes), refer to [the following section](#enable-alerting-after-installation) to see how Alerting can be enabled after installation. + {{}} + +2. In this file, navigate to `alerting` and change `false` to `true` for `enabled`. Save the file after you finish. + + ```yaml + alerting: + enabled: true # Change "false" to "true". + ``` + +3. Create a cluster using the configuration file: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +### Installing on Kubernetes + +As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable Alerting first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) file. + +1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) and edit it. + + ```bash + vi cluster-configuration.yaml + ``` + +2. In this local `cluster-configuration.yaml` file, navigate to `alerting` and enable it by changing `false` to `true` for `enabled`. Save the file after you finish. + + ```yaml + alerting: + enabled: true # Change "false" to "true". + ``` + +3. Execute the following commands to start installation: + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f cluster-configuration.yaml + ``` + +## Enable Alerting After Installation + +1. Log in to the console as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. + +2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detail page. + + {{< notice info >}} +A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects. + {{}} + +3. In **Custom Resources**, click on the right of `ks-installer` and select **Edit YAML**. + +4. In this YAML file, navigate to `alerting` and change `false` to `true` for `enabled`. After you finish, click **OK** in the lower-right corner to save the configuration. + + ```yaml + alerting: + enabled: true # Change "false" to "true". + ``` + +5. You can use the web kubectl to check the installation process by executing the following command: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + {{< notice note >}} + +You can find the web kubectl tool by clicking in the lower-right corner of the console. + {{}} + +## Verify the Installation of the Component + +If you can see **Alerting Messages** and **Alerting Policies** on the **Cluster Management** page, it means the installation is successful as the two parts won't display until the component is installed. + + + diff --git a/content/en/docs/v3.4/pluggable-components/app-store.md b/content/en/docs/v3.4/pluggable-components/app-store.md new file mode 100644 index 000000000..09c41607f --- /dev/null +++ b/content/en/docs/v3.4/pluggable-components/app-store.md @@ -0,0 +1,120 @@ +--- +title: "KubeSphere App Store" +keywords: "Kubernetes, KubeSphere, app-store, OpenPitrix" +description: "Learn how to enable the KubeSphere App Store to share data and apps internally and set industry standards of delivery process externally." +linkTitle: "KubeSphere App Store" +weight: 6200 +--- + +As an open-source and app-centric container platform, KubeSphere provides users with a Helm-based App Store for application lifecycle management on the back of [OpenPitrix](https://github.com/openpitrix/openpitrix), an open-source web-based system to package, deploy and manage different types of apps. The KubeSphere App Store allows ISVs, developers, and users to upload, test, install, and release apps with just several clicks in a one-stop shop. + +Internally, the KubeSphere App Store can serve as a place for different teams to share data, middleware, and office applications. Externally, it is conducive to setting industry standards of building and delivery. After you enable this feature, you can add more apps with app templates. + +For more information, see [App Store](../../application-store/). + +## Enable the App Store Before Installation + +### Installing on Linux + +When you implement multi-node installation of KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components. + +1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file `config-sample.yaml`. Modify the file by running the following command: + + ```bash + vi config-sample.yaml + ``` + + {{< notice note >}} +If you adopt [All-in-One Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a `config-sample.yaml` file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable the App Store in this mode (for example, for testing purposes), refer to [the following section](#enable-app-store-after-installation) to see how the App Store can be installed after installation. + {{}} + +2. In this file, search for `openpitrix` and change `false` to `true` for `enabled`. Save the file after you finish. + + ```yaml + openpitrix: + store: + enabled: true # Change "false" to "true". + ``` + +3. Create a cluster using the configuration file: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +### Installing on Kubernetes + +As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable the KubeSphere App Store first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) file. + +1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) and edit it. + + ```bash + vi cluster-configuration.yaml + ``` + +2. In this local `cluster-configuration.yaml` file, search for `openpitrix` and enable the App Store by changing `false` to `true` for `enabled`. Save the file after you finish. + + ```yaml + openpitrix: + store: + enabled: true # Change "false" to "true". + ``` + +3. Run the following commands to start installation: + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f cluster-configuration.yaml + ``` + +## Enable the App Store After Installation + +1. Log in to the console as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. + +2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detail page. + + {{< notice info >}} + +A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects. + +{{}} + +3. In **Custom Resources**, click on the right of `ks-installer` and select **Edit YAML**. + +4. In this YAML file, search for `openpitrix` and change `false` to `true` for `enabled`. After you finish, click **OK** in the lower-right corner to save the configuration. + + ```yaml + openpitrix: + store: + enabled: true # Change "false" to "true". + ``` + +5. Use the web kubectl to check the installation process by running the following command: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + {{< notice note >}} + +You can find the web kubectl tool by clicking in the lower-right corner of the console. + +{{}} + +## Verify the Installation of the Component + +After you log in to the console, if you can see **App Store** in the upper-left corner and apps in it, it means the installation is successful. + +{{< notice note >}} + +- You can even access the App Store without logging in to the console by visiting `:30880/apps`. +- The **OpenPitrix** tab in KubeSphere 3.3 does not appear on the **System Components** page after the App Store is enabled. + +{{}} + +## Use the App Store in a Multi-cluster Architecture + +[In a multi-cluster architecture](../../multicluster-management/introduction/kubefed-in-kubesphere/), you have one Host Cluster (H Cluster) managing all Member Clusters (M Clusters). Different from other components in KubeSphere, the App Store serves as a global application pool for all clusters, including H Cluster and M Clusters. You only need to enable the App Store on the H Cluster and you can use functions related to the App Store on M Clusters directly (no matter whether the App Store is enabled on M Clusters or not), such as [App Templates](../../project-user-guide/application/app-template/) and [App Repositories](../../workspace-administration/app-repository/import-helm-repository/). + +However, if you only enable the App Store on M Clusters without enabling it on the H Cluster, you will not be able to use the App Store on any cluster in the multi-cluster architecture. diff --git a/content/en/docs/v3.4/pluggable-components/auditing-logs.md b/content/en/docs/v3.4/pluggable-components/auditing-logs.md new file mode 100644 index 000000000..36f691433 --- /dev/null +++ b/content/en/docs/v3.4/pluggable-components/auditing-logs.md @@ -0,0 +1,182 @@ +--- +title: "KubeSphere Audit Logs" +keywords: "Kubernetes, auditing, KubeSphere, logs" +description: "Learn how to enable Auditing to document platform events and activities." +linkTitle: "KubeSphere Audit Logs" +weight: 6700 +--- + +The KubeSphere Auditing Log System provides a security-relevant chronological set of records documenting the sequence of activities related to individual users, managers, or other components of the system. Each request to KubeSphere generates an event that is then written to a webhook and processed according to a certain rule. + +For more information, see [Auditing Log Query](../../toolbox/auditing/auditing-query/). + +## Enable Auditing Logs Before Installation + +### Installing on Linux + +When you implement multi-node installation KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components. + +1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file `config-sample.yaml`. Modify the file by executing the following command: + + ```bash + vi config-sample.yaml + ``` + + {{< notice note >}} +If you adopt [All-in-One Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a `config-sample.yaml` file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable Auditing in this mode (for example, for testing purposes), refer to [the following section](#enable-auditing-logs-after-installation) to see how Auditing can be installed after installation. + {{}} + +2. In this file, navigate to `auditing` and change `false` to `true` for `enabled`. Save the file after you finish. + + ```yaml + auditing: + enabled: true # Change "false" to "true". + ``` + + {{< notice note >}} +By default, KubeKey will install Elasticsearch internally if Auditing is enabled. For a production environment, it is highly recommended that you set the following values in `config-sample.yaml` if you want to enable Auditing, especially `externalElasticsearchHost` and `externalElasticsearchPort`. Once you provide the following information before installation, KubeKey will integrate your external Elasticsearch directly instead of installing an internal one. + {{}} + + ```yaml + es: # Storage backend for logging, tracing, events and auditing. + elasticsearchMasterReplicas: 1 # The total number of master nodes. Even numbers are not allowed. + elasticsearchDataReplicas: 1 # The total number of data nodes. + elasticsearchMasterVolumeSize: 4Gi # The volume size of Elasticsearch master nodes. + elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. + logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. + elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. + externalElasticsearchHost: # The Host of external Elasticsearch. + externalElasticsearchPort: # The port of external Elasticsearch. + ``` + +3. Create a cluster using the configuration file: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +### Installing on Kubernetes + +As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable KubeSphere Auditing first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) file. + +1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) and edit it. + + ```bash + vi cluster-configuration.yaml + ``` + +2. In this local `cluster-configuration.yaml` file, navigate to `auditing` and enable Auditing by changing `false` to `true` for `enabled`. Save the file after you finish. + + ```yaml + auditing: + enabled: true # Change "false" to "true". + ``` + + {{< notice note >}} +By default, ks-installer will install Elasticsearch internally if Auditing is enabled. For a production environment, it is highly recommended that you set the following values in `cluster-configuration.yaml` if you want to enable Auditing, especially `externalElasticsearchHost` and `externalElasticsearchPort`. Once you provide the following information before installation, ks-installer will integrate your external Elasticsearch directly instead of installing an internal one. + {{}} + + ```yaml + es: # Storage backend for logging, tracing, events and auditing. + elasticsearchMasterReplicas: 1 # The total number of master nodes. Even numbers are not allowed. + elasticsearchDataReplicas: 1 # The total number of data nodes. + elasticsearchMasterVolumeSize: 4Gi # The volume size of Elasticsearch master nodes. + elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. + logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. + elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. + externalElasticsearchHost: # The Host of external Elasticsearch. + externalElasticsearchPort: # The port of external Elasticsearch. + ``` + +3. Execute the following commands to start installation: + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f cluster-configuration.yaml + ``` + +## Enable Auditing Logs After Installation + +1. Log in to the console as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. + +2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detail page. + + {{< notice info >}} +A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects. + {{}} + +3. In **Custom Resources**, click on the right of `ks-installer` and select **Edit YAML**. + +4. In this YAML file, navigate to `auditing` and change `false` to `true` for `enabled`. After you finish, click **OK** in the lower-right corner to save the configuration. + + ```yaml + auditing: + enabled: true # Change "false" to "true". + ``` + + {{< notice note >}} +By default, Elasticsearch will be installed internally if Auditing is enabled. For a production environment, it is highly recommended that you set the following values in this yaml file if you want to enable Auditing, especially `externalElasticsearchHost` and `externalElasticsearchPort`. Once you provide the following information, KubeSphere will integrate your external Elasticsearch directly instead of installing an internal one. + {{}} + + ```yaml + es: # Storage backend for logging, tracing, events and auditing. + elasticsearchMasterReplicas: 1 # The total number of master nodes. Even numbers are not allowed. + elasticsearchDataReplicas: 1 # The total number of data nodes. + elasticsearchMasterVolumeSize: 4Gi # The volume size of Elasticsearch master nodes. + elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. + logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. + elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. + externalElasticsearchHost: # The Host of external Elasticsearch. + externalElasticsearchPort: # The port of external Elasticsearch. + ``` + +5. You can use the web kubectl to check the installation process by executing the following command: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + {{< notice note >}} + +You can find the web kubectl tool by clicking in the lower-right corner of the console. + {{}} + +## Verify the Installation of the Component + +{{< tabs >}} + +{{< tab "Verify the component on the dashboard" >}} + +Verify that you can use the **Audit Log Search** function from the **Toolbox** in the lower-right corner. + +{{}} + +{{< tab "Verify the component through kubectl" >}} + +Execute the following command to check the status of Pods: + +```bash +kubectl get pod -n kubesphere-logging-system +``` + +The output may look as follows if the component runs successfully: + +```yaml +NAME READY STATUS RESTARTS AGE +elasticsearch-logging-curator-elasticsearch-curator-159872n9g9g 0/1 Completed 0 2d10h +elasticsearch-logging-curator-elasticsearch-curator-159880tzb7x 0/1 Completed 0 34h +elasticsearch-logging-curator-elasticsearch-curator-1598898q8w7 0/1 Completed 0 10h +elasticsearch-logging-data-0 1/1 Running 1 2d20h +elasticsearch-logging-data-1 1/1 Running 1 2d20h +elasticsearch-logging-discovery-0 1/1 Running 1 2d20h +fluent-bit-6v5fs 1/1 Running 1 2d20h +fluentbit-operator-5bf7687b88-44mhq 1/1 Running 1 2d20h +kube-auditing-operator-7574bd6f96-p4jvv 1/1 Running 1 2d20h +kube-auditing-webhook-deploy-6dfb46bb6c-hkhmx 1/1 Running 1 2d20h +kube-auditing-webhook-deploy-6dfb46bb6c-jp77q 1/1 Running 1 2d20h +``` + +{{}} + +{{}} diff --git a/content/en/docs/v3.4/pluggable-components/devops.md b/content/en/docs/v3.4/pluggable-components/devops.md new file mode 100644 index 000000000..a090184a1 --- /dev/null +++ b/content/en/docs/v3.4/pluggable-components/devops.md @@ -0,0 +1,130 @@ +--- +title: "KubeSphere DevOps System" +keywords: "Kubernetes, Jenkins, KubeSphere, DevOps, cicd" +description: "Learn how to enable DevOps to further free your developers and let them focus on code writing." +linkTitle: "KubeSphere DevOps System" +weight: 6300 +--- + +The KubeSphere DevOps System is designed for CI/CD workflows in Kubernetes. Based on [Jenkins](https://jenkins.io/), it provides one-stop solutions to help both development and Ops teams build, test and publish apps to Kubernetes in a straight-forward way. It also features plugin management, [Binary-to-Image (B2I)](../../project-user-guide/image-builder/binary-to-image/), [Source-to-Image (S2I)](../../project-user-guide/image-builder/source-to-image/), code dependency caching, code quality analysis, pipeline logging, and more. + +The DevOps System offers an automated environment for users as apps can be automatically released to the same platform. It is also compatible with third-party private image registries (for example, Harbor) and code repositories (for example, GitLab/GitHub/SVN/BitBucket). As such, it creates excellent user experience by providing users with comprehensive, visualized CI/CD pipelines which are extremely useful in air-gapped environments. + +For more information, see [DevOps User Guide](../../devops-user-guide/). + +## Enable DevOps Before Installation + +### Installing on Linux + +When you implement multi-node installation of KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components. + +1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file `config-sample.yaml`. Modify the file by running the following command: + + ```bash + vi config-sample.yaml + ``` + + {{< notice note >}} +If you adopt [All-in-One Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a `config-sample.yaml` file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable DevOps in this mode (for example, for testing purposes), refer to [the following section](#enable-devops-after-installation) to see how DevOps can be installed after installation. + {{}} + +2. In this file, search for `devops` and change `false` to `true` for `enabled`. Save the file after you finish. + + ```yaml + devops: + enabled: true # Change "false" to "true". + ``` + +3. Create a cluster using the configuration file: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +### Installing on Kubernetes + +As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable KubeSphere DevOps first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) file. + +1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) and edit it. + + ```bash + vi cluster-configuration.yaml + ``` + +2. In this local `cluster-configuration.yaml` file, search for `devops` and enable DevOps by changing `false` to `true` for `enabled`. Save the file after you finish. + + ```yaml + devops: + enabled: true # Change "false" to "true". + ``` + +3. Run the following commands to start installation: + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f cluster-configuration.yaml + ``` + +## Enable DevOps After Installation + +1. Log in to the console as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. + +2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detail page. + + {{< notice info >}} + +A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects. + +{{}} + +3. In **Custom Resources**, click on the right of `ks-installer` and select **Edit YAML**. + +4. In this YAML file, search for `devops` and change `false` to `true` for `enabled`. After you finish, click **OK** in the lower-right corner to save the configuration. + + ```yaml + devops: + enabled: true # Change "false" to "true". + ``` + +5. Use the web kubectl to check the installation process by running the following command: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + {{< notice note >}} + +You can find the web kubectl tool by clicking in the lower-right corner of the console. + +{{}} + +## Verify the Installation of the Component + +{{< tabs >}} + +{{< tab "Verify the component on the dashboard" >}} + +Go to **System Components** and check that all components on the **DevOps** tab page is in **Healthy** state. + +{{}} + +{{< tab "Verify the component through kubectl" >}} + +Run the following command to check the status of Pods: + +```bash +kubectl get pod -n kubesphere-devops-system +``` + +The output may look as follows if the component runs successfully: + +```bash +NAME READY STATUS RESTARTS AGE +devops-jenkins-5cbbfbb975-hjnll 1/1 Running 0 40m +s2ioperator-0 1/1 Running 0 41m +``` + +{{}} + +{{}} diff --git a/content/en/docs/v3.4/pluggable-components/events.md b/content/en/docs/v3.4/pluggable-components/events.md new file mode 100644 index 000000000..f4454d145 --- /dev/null +++ b/content/en/docs/v3.4/pluggable-components/events.md @@ -0,0 +1,191 @@ +--- +title: "KubeSphere Events" +keywords: "Kubernetes, events, KubeSphere, k8s-events" +description: "Learn how to enable Events to keep track of everything that is happening on the platform." +linkTitle: "KubeSphere Events" +weight: 6500 +--- + +KubeSphere events allow users to keep track of what is happening inside a cluster, such as node scheduling status and image pulling result. They will be accurately recorded with the specific reason, status and message displayed in the web console. To query events, users can quickly launch the web Toolkit and enter related information in the search bar with different filters (e.g keyword and project) available. Events can also be archived to third-party tools, such as Elasticsearch, Kafka, or Fluentd. + +For more information, see [Event Query](../../toolbox/events-query/). + +## Enable Events Before Installation + +### Installing on Linux + +When you implement multi-node installation of KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components. + +1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file `config-sample.yaml`. Modify the file by executing the following command: + + ```bash + vi config-sample.yaml + ``` + + {{< notice note >}} + +If you adopt [All-in-One Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a `config-sample.yaml` file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable Events in this mode (for example, for testing purposes), refer to [the following section](#enable-events-after-installation) to see how Events can be [installed after installation](#enable-events-after-installation). + +{{}} + +2. In this file, navigate to `events` and change `false` to `true` for `enabled`. Save the file after you finish. + + ```yaml + events: + enabled: true # Change "false" to "true". + ``` + + {{< notice note >}} +By default, KubeKey will install Elasticsearch internally if Events is enabled. For a production environment, it is highly recommended that you set the following values in `config-sample.yaml` if you want to enable Events, especially `externalElasticsearchHost` and `externalElasticsearchPort`. Once you provide the following information before installation, KubeKey will integrate your external Elasticsearch directly instead of installing an internal one. + {{}} + + ```yaml + es: # Storage backend for logging, tracing, events and auditing. + elasticsearchMasterReplicas: 1 # The total number of master nodes. Even numbers are not allowed. + elasticsearchDataReplicas: 1 # The total number of data nodes. + elasticsearchMasterVolumeSize: 4Gi # The volume size of Elasticsearch master nodes. + elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. + logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. + elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. + externalElasticsearchHost: # The Host of external Elasticsearch. + externalElasticsearchPort: # The port of external Elasticsearch. + ``` + +3. Create a cluster using the configuration file: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +### Installing on Kubernetes + +As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable KubeSphere Events first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) file. + +1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) and edit it. + + ```bash + vi cluster-configuration.yaml + ``` + +2. In this local `cluster-configuration.yaml` file, navigate to `events` and enable Events by changing `false` to `true` for `enabled`. Save the file after you finish. + + ```yaml + events: + enabled: true # Change "false" to "true". + ``` + + {{< notice note >}} +By default, ks-installer will install Elasticsearch internally if Events is enabled. For a production environment, it is highly recommended that you set the following values in `cluster-configuration.yaml` if you want to enable Events, especially `externalElasticsearchHost` and `externalElasticsearchPort`. Once you provide the following information before installation, ks-installer will integrate your external Elasticsearch directly instead of installing an internal one. + {{}} + + ```yaml + es: # Storage backend for logging, tracing, events and auditing. + elasticsearchMasterReplicas: 1 # The total number of master nodes. Even numbers are not allowed. + elasticsearchDataReplicas: 1 # The total number of data nodes. + elasticsearchMasterVolumeSize: 4Gi # The volume size of Elasticsearch master nodes. + elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. + logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. + elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. + externalElasticsearchHost: # The Host of external Elasticsearch. + externalElasticsearchPort: # The port of external Elasticsearch. + ``` + +3. Execute the following commands to start installation: + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f cluster-configuration.yaml + ``` + +## Enable Events After Installation + +1. Log in to the console as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. + +2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detail page. + + {{< notice info >}} + +A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects. + +{{}} + +3. In **Custom Resources**, click on the right of `ks-installer` and select **Edit YAML**. + +4. In this YAML file, navigate to `events` and change `false` to `true` for `enabled`. After you finish, click **OK** in the lower-right corner to save the configuration. + + ```yaml + events: + enabled: true # Change "false" to "true". + ``` + + {{< notice note >}} + +By default, Elasticsearch will be installed internally if Events is enabled. For a production environment, it is highly recommended that you set the following values in this yaml file if you want to enable Events, especially `externalElasticsearchHost` and `externalElasticsearchPort`. Once you provide the following information, KubeSphere will integrate your external Elasticsearch directly instead of installing an internal one. + {{}} + + ```yaml + es: # Storage backend for logging, tracing, events and auditing. + elasticsearchMasterReplicas: 1 # The total number of master nodes. Even numbers are not allowed. + elasticsearchDataReplicas: 1 # The total number of data nodes. + elasticsearchMasterVolumeSize: 4Gi # The volume size of Elasticsearch master nodes. + elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. + logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. + elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. + externalElasticsearchHost: # The Host of external Elasticsearch. + externalElasticsearchPort: # The port of external Elasticsearch. + ``` + +5. You can use the web kubectl to check the installation process by executing the following command: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + {{< notice note >}} + +You can find the web kubectl tool by clicking in the lower-right corner of the console. + +{{}} + +## Verify the Installation of the Component + +{{< tabs >}} + +{{< tab "Verify the component on the dashboard" >}} + +Verify that you can use the **Resource Event Search** function from the **Toolbox** in the lower-right corner. + +{{}} + +{{< tab "Verify the component through kubectl" >}} + +Execute the following command to check the status of Pods: + +```bash +kubectl get pod -n kubesphere-logging-system +``` + +The output may look as follows if the component runs successfully: + +```bash +NAME READY STATUS RESTARTS AGE +elasticsearch-logging-data-0 1/1 Running 0 155m +elasticsearch-logging-data-1 1/1 Running 0 154m +elasticsearch-logging-discovery-0 1/1 Running 0 155m +fluent-bit-bsw6p 1/1 Running 0 108m +fluent-bit-smb65 1/1 Running 0 108m +fluent-bit-zdz8b 1/1 Running 0 108m +fluentbit-operator-9b69495b-bbx54 1/1 Running 0 109m +ks-events-exporter-5cb959c74b-gx4hw 2/2 Running 0 7m55s +ks-events-operator-7d46fcccc9-4mdzv 1/1 Running 0 8m +ks-events-ruler-8445457946-cl529 2/2 Running 0 7m55s +ks-events-ruler-8445457946-gzlm9 2/2 Running 0 7m55s +logsidecar-injector-deploy-667c6c9579-cs4t6 2/2 Running 0 106m +logsidecar-injector-deploy-667c6c9579-klnmf 2/2 Running 0 106m +``` + +{{}} + +{{}} + diff --git a/content/en/docs/v3.4/pluggable-components/kubeedge.md b/content/en/docs/v3.4/pluggable-components/kubeedge.md new file mode 100644 index 000000000..a45841309 --- /dev/null +++ b/content/en/docs/v3.4/pluggable-components/kubeedge.md @@ -0,0 +1,184 @@ +--- +title: "KubeEdge" +keywords: "Kubernetes, KubeSphere, Kubeedge" +description: "Learn how to enable KubeEdge to add edge nodes to your cluster." +linkTitle: "KubeEdge" +weight: 6930 +--- + +[KubeEdge](https://kubeedge.io/en/) is an open-source system for extending native containerized application orchestration capabilities to hosts at edge. It supports multiple edge protocols and looks to provide unified management of cloud and edge applications and resources. + +KubeEdge has components running in two separate places - cloud and edge nodes. The components running on the cloud, collectively known as CloudCore, include Controllers and Cloud Hub. Cloud Hub serves as the gateway for the requests sent by edge nodes while Controllers function as orchestrators. The components running on edge nodes, collectively known as EdgeCore, include EdgeHub, EdgeMesh, MetadataManager, and DeviceTwin. For more information, see [the KubeEdge website](https://kubeedge.io/en/). + +After you enable KubeEdge, you can [add edge nodes to your cluster](../../installing-on-linux/cluster-operation/add-edge-nodes/) and deploy workloads on them. + +![kubeedge_arch](/images/docs/v3.3/enable-pluggable-components/kubeedge/kubeedge_arch.png) + +## Enable KubeEdge Before Installation + +### Installing on Linux + +When you implement multi-node installation of KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components. + +1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file `config-sample.yaml`. Modify the file by executing the following command: + + ```bash + vi config-sample.yaml + ``` + + {{< notice note >}} + If you adopt [All-in-One Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a `config-sample.yaml` file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable KubeEdge in this mode (for example, for testing purposes), refer to [the following section](#enable-kubeedge-after-installation) to see how KubeEdge can be installed after installation. + {{}} + +2. In this file, navigate to `edgeruntime` and `kubeedge`, and change the value of `enabled` from `false` to `true` to enable all KubeEdge components. Click **OK**. + + ```yaml + edgeruntime: # Add edge nodes to your cluster and deploy workloads on edge nodes. + enabled: false + kubeedge: # kubeedge configurations + enabled: false + cloudCore: + cloudHub: + advertiseAddress: # At least a public IP address or an IP address which can be accessed by edge nodes must be provided. + - "" # Note that once KubeEdge is enabled, CloudCore will malfunction if the address is not provided. + service: + cloudhubNodePort: "30000" + cloudhubQuicNodePort: "30001" + cloudhubHttpsNodePort: "30002" + cloudstreamNodePort: "30003" + tunnelNodePort: "30004" + # resources: {} + # hostNetWork: false + ``` + +3. Set the value of `kubeedge.cloudCore.cloudHub.advertiseAddress` to the public IP address of your cluster or an IP address that can be accessed by edge nodes. Save the file when you finish editing. + +4. Create a cluster using the configuration file: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +### Installing on Kubernetes + +As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable KubeEdge first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) file. + +1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) and edit it. + + ```bash + vi cluster-configuration.yaml + ``` + +2. In this local `cluster-configuration.yaml` file, navigate to `edgeruntime` and `kubeedge`, and change the value of `enabled` from `false` to `true` to enable all KubeEdge components. Click **OK**. + + ```yaml + edgeruntime: # Add edge nodes to your cluster and deploy workloads on edge nodes. + enabled: false + kubeedge: # kubeedge configurations + enabled: false + cloudCore: + cloudHub: + advertiseAddress: # At least a public IP address or an IP address which can be accessed by edge nodes must be provided. + - "" # Note that once KubeEdge is enabled, CloudCore will malfunction if the address is not provided. + service: + cloudhubNodePort: "30000" + cloudhubQuicNodePort: "30001" + cloudhubHttpsNodePort: "30002" + cloudstreamNodePort: "30003" + tunnelNodePort: "30004" + # resources: {} + # hostNetWork: false + ``` + +3. Set the value of `kubeedge.cloudCore.cloudHub.advertiseAddress` to the public IP address of your cluster or an IP address that can be accessed by edge nodes. + +4. Save the file and execute the following commands to start installation: + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f cluster-configuration.yaml + ``` + +## Enable KubeEdge After Installation + +1. Log in to the console as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. + +2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detail page. + + {{< notice info >}} +A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects. + {{}} + +3. In **Custom Resources**, click on the right of `ks-installer` and select **Edit YAML**. + +4. In this YAML file, navigate to `edgeruntime` and `kubeedge`, and change the value of `enabled` from `false` to `true` to enable all KubeEdge components. Click **OK**. + + ```yaml + edgeruntime: # Add edge nodes to your cluster and deploy workloads on edge nodes. + enabled: false + kubeedge: # kubeedge configurations + enabled: false + cloudCore: + cloudHub: + advertiseAddress: # At least a public IP address or an IP address which can be accessed by edge nodes must be provided. + - "" # Note that once KubeEdge is enabled, CloudCore will malfunction if the address is not provided. + service: + cloudhubNodePort: "30000" + cloudhubQuicNodePort: "30001" + cloudhubHttpsNodePort: "30002" + cloudstreamNodePort: "30003" + tunnelNodePort: "30004" + # resources: {} + # hostNetWork: false + ``` + +5. Set the value of `kubeedge.cloudCore.cloudHub.advertiseAddress` to the public IP address of your cluster or an IP address that can be accessed by edge nodes. After you finish, click **OK** in the lower-right corner to save the configuration. + +6. You can use the web kubectl to check the installation process by executing the following command: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + {{< notice note >}} + +You can find the web kubectl tool by clicking in the lower-right corner of the console. + {{}} + +## Verify the Installation of the Component + +{{< tabs >}} + +{{< tab "Verify the component on the dashboard" >}} + +On the **Cluster Management** page, verify that the **Edge Nodes** module has appeared under **Nodes**. + +{{}} + +{{< tab "Verify the component through kubectl" >}} + +Execute the following command to check the status of Pods: + +```bash +kubectl get pod -n kubeedge +``` + +The output may look as follows if the component runs successfully: + +```bash +NAME READY STATUS RESTARTS AGE +cloudcore-5f994c9dfd-r4gpq 1/1 Running 0 5h13m +edge-watcher-controller-manager-bdfb8bdb5-xqfbk 2/2 Running 0 5h13m +iptables-hphgf 1/1 Running 0 5h13m +``` + +{{}} + +{{}} + +{{< notice note >}} + +CloudCore may malfunction (`CrashLoopBackOff`) if `kubeedge.cloudCore.cloudHub.advertiseAddress` was not set when you enabled KubeEdge. In this case, run `kubectl -n kubeedge edit cm cloudcore` to add the public IP address of your cluster or an IP address that can be accessed by edge nodes. + +{{}} diff --git a/content/en/docs/v3.4/pluggable-components/logging.md b/content/en/docs/v3.4/pluggable-components/logging.md new file mode 100644 index 000000000..bbe764c7e --- /dev/null +++ b/content/en/docs/v3.4/pluggable-components/logging.md @@ -0,0 +1,199 @@ +--- +title: "KubeSphere Logging System" +keywords: "Kubernetes, Elasticsearch, KubeSphere, Logging, logs" +description: "Learn how to enable Logging to leverage the tenant-based system for log collection, query and management." +linkTitle: "KubeSphere Logging System" +weight: 6400 +--- + +KubeSphere provides a powerful, holistic, and easy-to-use logging system for log collection, query, and management. It covers logs at varied levels, including tenants, infrastructure resources, and applications. Users can search logs from different dimensions, such as project, workload, Pod and keyword. Compared with Kibana, the tenant-based logging system of KubeSphere features better isolation and security among tenants as tenants can only view their own logs. Apart from KubeSphere's own logging system, the container platform also allows users to add third-party log collectors, such as Elasticsearch, Kafka, and Fluentd. + +For more information, see [Log Query](../../toolbox/log-query/). + +## Enable Logging Before Installation + +### Installing on Linux + +When you install KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components. + +1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file `config-sample.yaml`. Modify the file by executing the following command: + + ```bash + vi config-sample.yaml + ``` + + {{< notice note >}} + +- If you adopt [All-in-One Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a `config-sample.yaml` file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable Logging in this mode (for example, for testing purposes), refer to [the following section](#enable-logging-after-installation) to see how Logging can be installed after installation. + +- If you adopt [Multi-node Installation](../../installing-on-linux/introduction/multioverview/) and are using symbolic links for docker root directory, make sure all nodes follow the exactly same symbolic links. Logging agents are deployed in DaemonSets onto nodes. Any discrepancy in container log path may cause collection failures on that node. + +{{}} + +2. In this file, navigate to `logging` and change `false` to `true` for `enabled`. Save the file after you finish. + + ```yaml + logging: + enabled: true # Change "false" to "true". + containerruntime: docker + ``` + + {{< notice info >}}To use containerd as the container runtime, change the value of the field `containerruntime` to `containerd`. If you upgraded to KubeSphere 3.3 from earlier versions, you have to manually add the field `containerruntime` under `logging` when enabling KubeSphere Logging system. + + {{}} + + {{< notice note >}}By default, KubeKey will install Elasticsearch internally if Logging is enabled. For a production environment, it is highly recommended that you set the following values in `config-sample.yaml` if you want to enable Logging, especially `externalElasticsearchHost` and `externalElasticsearchPort`. Once you provide the following information before installation, KubeKey will integrate your external Elasticsearch directly instead of installing an internal one. + {{}} + + ```yaml + es: # Storage backend for logging, tracing, events and auditing. + elasticsearchMasterReplicas: 1 # The total number of master nodes. Even numbers are not allowed. + elasticsearchDataReplicas: 1 # The total number of data nodes. + elasticsearchMasterVolumeSize: 4Gi # The volume size of Elasticsearch master nodes. + elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. + logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. + elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. + externalElasticsearchHost: # The Host of external Elasticsearch. + externalElasticsearchPort: # The port of external Elasticsearch. + ``` + +3. Create a cluster using the configuration file: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +### Installing on Kubernetes + +As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable KubeSphere Logging first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) file. + +1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) and edit it. + + ```bash + vi cluster-configuration.yaml + ``` + +2. In this local `cluster-configuration.yaml` file, navigate to `logging` and enable Logging by changing `false` to `true` for `enabled`. Save the file after you finish. + + ```yaml + logging: + enabled: true # Change "false" to "true". + containerruntime: docker + ``` + + {{< notice info >}}To use containerd as the container runtime, change the value of the field `.logging.containerruntime` to `containerd`. If you upgraded to KubeSphere 3.3 from earlier versions, you have to manually add the field `containerruntime` under `logging` when enabling KubeSphere Logging system. + + {{}} + + {{< notice note >}}By default, ks-installer will install Elasticsearch internally if Logging is enabled. For a production environment, it is highly recommended that you set the following values in `cluster-configuration.yaml` if you want to enable Logging, especially `externalElasticsearchHost` and `externalElasticsearchPort`. Once you provide the following information before installation, ks-installer will integrate your external Elasticsearch directly instead of installing an internal one. + {{}} + + ```yaml + es: # Storage backend for logging, tracing, events and auditing. + elasticsearchMasterReplicas: 1 # The total number of master nodes. Even numbers are not allowed. + elasticsearchDataReplicas: 1 # The total number of data nodes. + elasticsearchMasterVolumeSize: 4Gi # The volume size of Elasticsearch master nodes. + elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. + logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. + elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. + externalElasticsearchHost: # The Host of external Elasticsearch. + externalElasticsearchPort: # The port of external Elasticsearch. + ``` + +3. Execute the following commands to start installation: + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f cluster-configuration.yaml + ``` + +## Enable Logging After Installation + +1. Log in to the console as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. + +2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detail page. + + {{< notice info >}} + +A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects. + +{{}} + +3. In **Custom Resources**, click on the right of `ks-installer` and select **Edit YAML**. + +4. In this YAML file, navigate to `logging` and change `false` to `true` for `enabled`. After you finish, click **OK** in the lower-right corner to save the configuration. + + ```yaml + logging: + enabled: true # Change "false" to "true". + containerruntime: docker + ``` + + {{< notice info >}}To use containerd as the container runtime, change the value of the field `.logging.containerruntime` to `containerd`. If you upgraded to KubeSphere 3.3 from earlier versions, you have to manually add the field `containerruntime` under `logging` when enabling KubeSphere Logging system. + + {{}} + + {{< notice note >}}By default, Elasticsearch will be installed internally if Logging is enabled. For a production environment, it is highly recommended that you set the following values in this yaml file if you want to enable Logging, especially `externalElasticsearchHost` and `externalElasticsearchPort`. Once you provide the following information, KubeSphere will integrate your external Elasticsearch directly instead of installing an internal one. + {{}} + + ```yaml + es: # Storage backend for logging, tracing, events and auditing. + elasticsearchMasterReplicas: 1 # The total number of master nodes. Even numbers are not allowed. + elasticsearchDataReplicas: 1 # The total number of data nodes. + elasticsearchMasterVolumeSize: 4Gi # The volume size of Elasticsearch master nodes. + elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. + logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. + elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. + externalElasticsearchHost: # The Host of external Elasticsearch. + externalElasticsearchPort: # The port of external Elasticsearch. + ``` + +5. You can use the web kubectl to check the installation process by executing the following command: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + {{< notice note >}} + +You can find the web kubectl tool by clicking in the lower-right corner of the console. + +{{}} + +## Verify the Installation of the Component + +{{< tabs >}} + +{{< tab "Verify the component on the dashboard" >}} + +Go to **System Components** and check that all components on the **Logging** tab page is in **Healthy** state. + +{{}} + +{{< tab "Verify the component through kubectl" >}} + +Execute the following command to check the status of Pods: + +```bash +kubectl get pod -n kubesphere-logging-system +``` + +The output may look as follows if the component runs successfully: + +```bash +NAME READY STATUS RESTARTS AGE +elasticsearch-logging-data-0 1/1 Running 0 87m +elasticsearch-logging-data-1 1/1 Running 0 85m +elasticsearch-logging-discovery-0 1/1 Running 0 87m +fluent-bit-bsw6p 1/1 Running 0 40m +fluent-bit-smb65 1/1 Running 0 40m +fluent-bit-zdz8b 1/1 Running 0 40m +fluentbit-operator-9b69495b-bbx54 1/1 Running 0 40m +logsidecar-injector-deploy-667c6c9579-cs4t6 2/2 Running 0 38m +logsidecar-injector-deploy-667c6c9579-klnmf 2/2 Running 0 38m +``` + +{{}} + +{{}} diff --git a/content/en/docs/v3.4/pluggable-components/metrics-server.md b/content/en/docs/v3.4/pluggable-components/metrics-server.md new file mode 100644 index 000000000..e82801df1 --- /dev/null +++ b/content/en/docs/v3.4/pluggable-components/metrics-server.md @@ -0,0 +1,113 @@ +--- +title: "Metrics Server" +keywords: "Kubernetes, KubeSphere, Metrics Server" +description: "Learn how to enable Metrics Server to use HPA to autoscale a Deployment." +linkTitle: "Metrics Server" +weight: 6910 +--- + +KubeSphere supports Horizontal Pod Autoscalers (HPA) for [Deployments](../../project-user-guide/application-workloads/deployments/). In KubeSphere, the Metrics Server controls whether the HPA is enabled. You use an HPA object to autoscale a Deployment based on different types of metrics, such as CPU and memory utilization, as well as the minimum and maximum number of replicas. In this way, an HPA helps to make sure your application runs smoothly and consistently in different situations. + +## Enable the Metrics Server Before Installation + +### Installing on Linux + +When you implement multi-node installation of KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components. + +1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file `config-sample.yaml`. Modify the file by executing the following command: + + ```bash + vi config-sample.yaml + ``` + + {{< notice note >}} + If you adopt [All-in-One Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a `config-sample.yaml` file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable the Metrics Server in this mode (for example, for testing purposes), refer to [the following section](#enable-devops-after-installation) to see how the Metrics Server can be installed after installation. + {{}} + +2. In this file, navigate to `metrics_server` and change `false` to `true` for `enabled`. Save the file after you finish. + + ```yaml + metrics_server: + enabled: true # Change "false" to "true". + ``` + +3. Create a cluster using the configuration file: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +### Installing on Kubernetes + +As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable the Metrics Server first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) file. + +1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) and edit it. + + ```bash + vi cluster-configuration.yaml + ``` + +2. In this local `cluster-configuration.yaml` file, navigate to `metrics_server` and enable it by changing `false` to `true` for `enabled`. Save the file after you finish. + + ```yaml + metrics_server: + enabled: true # Change "false" to "true". + ``` + +3. Execute the following commands to start installation: + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f cluster-configuration.yaml + ``` + + {{< notice note >}} + +If you install KubeSphere on some cloud hosted Kubernetes engines, it is probable that the Metrics Server is already installed in your environment. In this case, it is not recommended that you enable it in `cluster-configuration.yaml` as it may cause conflicts during installation. + {{}} + +## Enable the Metrics Server After Installation + +1. Log in to the console as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. + +2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detail page. + + {{< notice info >}} +A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects. + {{}} + +3. In **Custom Resources**, click on the right of `ks-installer` and select **Edit YAML**. + +4. In this YAML file, navigate to `metrics_server` and change `false` to `true` for `enabled`. After you finish, click **OK** in the lower-right corner to save the configuration. + + ```yaml + metrics_server: + enabled: true # Change "false" to "true". + ``` + +5. You can use the web kubectl to check the installation process by executing the following command: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + {{< notice note >}} + +You can find the web kubectl tool by clicking in the lower-right corner of the console. + {{}} + +## Verify the Installation of the Component + +Execute the following command to verify that the Pod of Metrics Server is up and running. + +```bash +kubectl get pod -n kube-system +``` + +If the Metrics Server is successfully installed, your cluster may return the following output (excluding irrelevant Pods): + +```bash +NAME READY STATUS RESTARTS AGE +metrics-server-6c767c9f94-hfsb7 1/1 Running 0 9m38s +``` \ No newline at end of file diff --git a/content/en/docs/v3.4/pluggable-components/network-policy.md b/content/en/docs/v3.4/pluggable-components/network-policy.md new file mode 100644 index 000000000..437190c87 --- /dev/null +++ b/content/en/docs/v3.4/pluggable-components/network-policy.md @@ -0,0 +1,109 @@ +--- +title: "Network Policies" +keywords: "Kubernetes, KubeSphere, NetworkPolicy" +description: "Learn how to enable Network Policies to control traffic flow at the IP address or port level." +linkTitle: "Network Policies" +weight: 6900 +--- + +Starting from v3.0.0, users can configure network policies of native Kubernetes in KubeSphere. Network Policies are an application-centric construct, enabling you to specify how a Pod is allowed to communicate with various network entities over the network. With network policies, users can achieve network isolation within the same cluster, which means firewalls can be set up between certain instances (Pods). + +{{< notice note >}} + +- Please make sure that the CNI network plugin used by the cluster supports Network Policies before you enable the feature. There are a number of CNI network plugins that support Network Policies, including Calico, Cilium, Kube-router, Romana, and Weave Net. +- It is recommended that you use [Calico](https://www.projectcalico.org/) as the CNI plugin before you enable Network Policies. + +{{}} + +For more information, see [Network Policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/). + +## Enable the Network Policy Before Installation + +### Installing on Linux + +When you implement multi-node installation of KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components. + +1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file `config-sample.yaml`. Modify the file by executing the following command: + + ```bash + vi config-sample.yaml + ``` + + {{< notice note >}} +If you adopt [All-in-One Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a `config-sample.yaml` file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable the Network Policy in this mode (for example, for testing purposes), refer to [the following section](#enable-network-policy-after-installation) to see how the Network Policy can be installed after installation. + {{}} + +2. In this file, navigate to `network.networkpolicy` and change `false` to `true` for `enabled`. Save the file after you finish. + + ```yaml + network: + networkpolicy: + enabled: true # Change "false" to "true". + ``` + +3. Create a cluster using the configuration file: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +### Installing on Kubernetes + +As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable the Network Policy first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) file. + +1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) and edit it. + + ```bash + vi cluster-configuration.yaml + ``` + +2. In this local `cluster-configuration.yaml` file, navigate to `network.networkpolicy` and enable it by changing `false` to `true` for `enabled`. Save the file after you finish. + + ```yaml + network: + networkpolicy: + enabled: true # Change "false" to "true". + ``` + +3. Execute the following commands to start installation: + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f cluster-configuration.yaml + ``` + +## Enable the Network Policy After Installation + +1. Log in to the console as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. + +2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detail page. + + {{< notice info >}} +A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects. + {{}} + +3. In **Custom Resources**, click on the right of `ks-installer` and select **Edit YAML**. + +4. In this YAML file, navigate to `network.networkpolicy` and change `false` to `true` for `enabled`. After you finish, click **OK** in the lower-right corner to save the configuration. + + ```yaml + network: + networkpolicy: + enabled: true # Change "false" to "true". + ``` + +5. You can use the web kubectl to check the installation process by executing the following command: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + {{< notice note >}} + +You can find the web kubectl tool by clicking in the lower-right corner of the console. + {{}} + +## Verify the Installation of the Component + +If you can see the **Network Policies** module in **Network**, it means the installation is successful as this part won't display until you install the component. \ No newline at end of file diff --git a/content/en/docs/v3.4/pluggable-components/overview.md b/content/en/docs/v3.4/pluggable-components/overview.md new file mode 100644 index 000000000..04f63b922 --- /dev/null +++ b/content/en/docs/v3.4/pluggable-components/overview.md @@ -0,0 +1,98 @@ +--- +title: "Enable Pluggable Components — Overview" +keywords: "Kubernetes, KubeSphere, pluggable-components, overview" +description: "Develop a basic understanding of key components in KubeSphere, including features and resource consumption." +linkTitle: "Overview" +weight: 6100 +--- + +KubeSphere has decoupled some core feature components since v2.1.0. These components are designed to be pluggable which means you can enable them either before or after installation. By default, KubeSphere will be deployed with a minimal installation if you do not enable them. + +Different pluggable components are deployed in different namespaces. You can enable any of them based on your needs. It is highly recommended that you install these pluggable components to discover the full-stack features and capabilities provided by KubeSphere. + +For more information about how to enable each component, see respective tutorials in this chapter. + +## Resource Requirements + +Before you enable pluggable components, make sure you have enough resources in your environment based on the tables below. Otherwise, components may crash due to a lack of resources. + +{{< notice note >}} + +The following request and limit of CPU and memory resources are required by a single replica. + +{{}} + +### KubeSphere App Store + +| Namespace | openpitrix-system | +| -------------- | ------------------------------------------------------------ | +| CPU Request | 0.3 core | +| CPU Limit | None | +| Memory Request | 300 MiB | +| Memory Limit | None | +| Installation | Optional | +| Notes | Provide an App Store with application lifecycle management. The installation is recommended. | + +### KubeSphere DevOps System + +| Namespace | kubesphere-devops-system | kubesphere-devops-system | +| -------------- | ------------------------------------------------------------ | ------------------------------------------------------- | +| Pattern | All-in-One installation | Multi-node installation | +| CPU Request | 34 m | 0.47 core | +| CPU Limit | None | None | +| Memory Request | 2.69 G | 8.6 G | +| Memory Limit | None | None | +| Installation | Optional | Optional | +| Notes | Provide one-stop DevOps solutions with Jenkins pipelines and B2I & S2I. | The memory of one of the nodes must be larger than 8 G. | + +### KubeSphere Monitoring System + +| Namespace | kubesphere-monitoring-system | kubesphere-monitoring-system | kubesphere-monitoring-system | +| -------------- | ------------------------------------------------------------ | ---------------------------- | ---------------------------- | +| Sub-component | 2 x Prometheus | 3 x Alertmanager | Notification Manager | +| CPU Request | 100 m | 10 m | 100 m | +| CPU Limit | 4 cores | None | 500 m | +| Memory Request | 400 MiB | 30 MiB | 20 MiB | +| Memory Limit | 8 GiB | None | 1 GiB | +| Installation | Required | Required | Required | +| Notes | The memory consumption of Prometheus depends on the cluster size. 8 GiB is sufficient for a cluster with 200 nodes/16,000 Pods. | - | - | + +{{< notice note >}} + +The KubeSphere monitoring system is not a pluggable component. It is installed by default. The resource request and limit of it are also listed on this page for your reference as it is closely related to other components such as logging. + +{{}} + +### KubeSphere Logging System + +| Namespace | kubesphere-logging-system | kubesphere-logging-system | kubesphere-logging-system | kubesphere-logging-system | +| -------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| Sub-component | 3 x Elasticsearch | fluent bit | kube-events | kube-auditing | +| CPU Request | 50 m | 20 m | 90 m | 20 m | +| CPU Limit | 1 core | 200 m | 900 m | 200 m | +| Memory Request | 2 G | 50 MiB | 120 MiB | 50 MiB | +| Memory Limit | None | 100 MiB | 1200 MiB | 100 MiB | +| Installation | Optional | Required | Optional | Optional | +| Notes | An optional component for log data storage. The internal Elasticsearch is not recommended for the production environment. | The log collection agent. It is a required component after you enable logging. | Collecting, filtering, exporting and alerting of Kubernetes events. | Collecting, filtering and alerting of Kubernetes and KubeSphere auditing logs. | + +### KubeSphere Alerting and Notification + +| Namespace | kubesphere-alerting-system | +| -------------- | ------------------------------------------------------------ | +| CPU Request | 0.08 core | +| CPU Limit | None | +| Memory Request | 80 M | +| Memory Limit | None | +| Installation | Optional | +| Notes | Alerting and Notification need to be enabled at the same time. | + +### KubeSphere Service Mesh + +| Namespace | istio-system | +| -------------- | ------------------------------------------------------------ | +| CPU Request | 1 core | +| CPU Limit | None | +| Memory Request | 3.5 G | +| Memory Limit | None | +| Installation | Optional | +| Notes | Support grayscale release strategies, traffic topology, traffic management and distributed tracing. | diff --git a/content/en/docs/v3.4/pluggable-components/pod-ip-pools.md b/content/en/docs/v3.4/pluggable-components/pod-ip-pools.md new file mode 100644 index 000000000..b8df7f4aa --- /dev/null +++ b/content/en/docs/v3.4/pluggable-components/pod-ip-pools.md @@ -0,0 +1,104 @@ +--- +title: "Pod IP Pools" +keywords: "Kubernetes, KubeSphere, Pod, IP pools" +description: "Learn how to enable Pod IP Pools to assign a specific Pod IP pool to your Pods." +linkTitle: "Pod IP Pools" +weight: 6920 +--- + +A Pod IP pool is used to manage the Pod network address space, and the address space between each Pod IP pool cannot overlap. When you create a workload, you can select a specific Pod IP pool, so that created Pods will be assigned IP addresses from this Pod IP pool. + +## Enable Pod IP Pools Before Installation + +### Installing on Linux + +When you implement multi-node installation of KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components. + +1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file `config-sample.yaml`. Modify the file by executing the following command: + + ```bash + vi config-sample.yaml + ``` + + {{< notice note >}} + If you adopt [All-in-One Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a `config-sample.yaml` file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable Pod IP Pools in this mode (for example, for testing purposes), refer to [the following section](#enable-pod-ip-pools-after-installation) to see how Pod IP pools can be installed after installation. + {{}} + +2. In this file, navigate to `network.ippool.type` and change `none` to `calico`. Save the file after you finish. + + ```yaml + network: + ippool: + type: calico # Change "none" to "calico". + ``` + +3. Create a cluster using the configuration file: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +### Installing on Kubernetes + +As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable Pod IP Pools first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) file. + +1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) and edit it. + + ```bash + vi cluster-configuration.yaml + ``` + +2. In this local `cluster-configuration.yaml` file, navigate to `network.ippool.type` and enable it by changing `none` to `calico`. Save the file after you finish. + + ```yaml + network: + ippool: + type: calico # Change "none" to "calico". + ``` + +3. Execute the following commands to start installation: + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f cluster-configuration.yaml + ``` + + +## Enable Pod IP Pools After Installation + +1. Log in to the console as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. + +2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detail page. + + {{< notice info >}} +A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects. + {{}} + +3. In **Custom Resources**, click on the right of `ks-installer` and select **Edit YAML**. + +4. In this YAML file, navigate to `network` and change `network.ippool.type` to `calico`. After you finish, click **OK** in the lower-right corner to save the configuration. + + ```yaml + network: + ippool: + type: calico # Change "none" to "calico". + ``` + +5. You can use the web kubectl to check the installation process by executing the following command: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + {{< notice note >}} + +You can find the web kubectl tool by clicking in the lower-right corner of the console. + {{}} + +## Verify the Installation of the Component + +On the **Cluster Management** page, verify that you can see the **Pod IP Pools** module under **Network**. + + + diff --git a/content/en/docs/v3.4/pluggable-components/service-mesh.md b/content/en/docs/v3.4/pluggable-components/service-mesh.md new file mode 100644 index 000000000..0b6685a4d --- /dev/null +++ b/content/en/docs/v3.4/pluggable-components/service-mesh.md @@ -0,0 +1,157 @@ +--- +title: "KubeSphere Service Mesh" +keywords: "Kubernetes, Istio, KubeSphere, service-mesh, microservices" +description: "Learn how to enable KubeSphere Service Mesh to use different traffic management strategies for microservices governance." +linkTitle: "KubeSphere Service Mesh" +weight: 6800 +--- + +On the basis of [Istio](https://istio.io/), KubeSphere Service Mesh visualizes microservices governance and traffic management. It features a powerful toolkit including **circuit breaking, blue-green deployment, canary release, traffic mirroring, tracing, observability, and traffic control**. Developers can easily get started with KubeSphere Service Mesh without any code hacking, which greatly reduces the learning curve of Istio. All features of KubeSphere Service Mesh are designed to meet users' demand for their business. + +For more information, see [Grayscale Release](../../project-user-guide/grayscale-release/overview/). + +## Enable KubeSphere Service Mesh Before Installation + +### Installing on Linux + +When you implement multi-node installation of KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components. + +1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file `config-sample.yaml`. Modify the file by executing the following command: + + ```bash + vi config-sample.yaml + ``` + + {{< notice note >}} +If you adopt [All-in-One Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a `config-sample.yaml` file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable KubeSphere Service Mesh in this mode (for example, for testing purposes), refer to [the following section](#enable-service-mesh-after-installation) to see how KubeSphere Service Mesh can be installed after installation. + {{}} + +2. In this file, navigate to `servicemesh` and change `false` to `true` for `enabled`. Save the file after you finish. + + ```yaml + servicemesh: + enabled: true # Change “false” to “true”. + istio: # Customizing the istio installation configuration, refer to https://istio.io/latest/docs/setup/additional-setup/customize-installation/ + components: + ingressGateways: + - name: istio-ingressgateway # Used to expose a service outside of the service mesh using an Istio Gateway. The value is false by defalut. + enabled: false + cni: + enabled: false # When the value is true, it identifies user application pods with sidecars requiring traffic redirection and sets this up in the Kubernetes pod lifecycle’s network setup phase. + ``` + + {{< notice note >}} + - For more information about how to access service after enabling Ingress Gateway, please refer to [Ingress Gateway](https://istio.io/latest/docs/tasks/traffic-management/ingress/ingress-control/). + - For more information about the Istio CNI plugin, please refer to [Install Istio with the Istio CNI plugin](https://istio.io/latest/docs/setup/additional-setup/cni/). + {{}} + +3. Run the following command to create a cluster using the configuration file: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +### Installing on Kubernetes + +As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable KubeSphere Service Mesh first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) file. + +1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) and edit it. + + ```bash + vi cluster-configuration.yaml + ``` + +2. In this local `cluster-configuration.yaml` file, navigate to `servicemesh` and enable it by changing `false` to `true` for `enabled`. Save the file after you finish. + + ```yaml + servicemesh: + enabled: true # Change “false” to “true”. + istio: # Customizing the istio installation configuration, refer to https://istio.io/latest/docs/setup/additional-setup/customize-installation/ + components: + ingressGateways: + - name: istio-ingressgateway # Used to expose a service outside of the service mesh using an Istio Gateway. The value is false by defalut. + enabled: false + cni: + enabled: false # When the value is true, it identifies user application pods with sidecars requiring traffic redirection and sets this up in the Kubernetes pod lifecycle’s network setup phase. + ``` + +3. Run the following commands to start installation: + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f cluster-configuration.yaml + ``` + +## Enable KubeSphere Service Mesh After Installation + +1. Log in to the console as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. + +2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detail page. + + {{< notice info >}} +A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects. + {{}} + +3. In **Custom Resources**, click on the right of `ks-installer` and select **Edit YAML**. + +4. In this YAML file, navigate to `servicemesh` and change `false` to `true` for `enabled`. After you finish, click **OK** in the lower-right corner to save the configuration. + + ```yaml + servicemesh: + enabled: true # Change “false” to “true”. + istio: # Customizing the istio installation configuration, refer to https://istio.io/latest/docs/setup/additional-setup/customize-installation/ + components: + ingressGateways: + - name: istio-ingressgateway # Used to expose a service outside of the service mesh using an Istio Gateway. The value is false by defalut. + enabled: false + cni: + enabled: false # When the value is true, it identifies user application pods with sidecars requiring traffic redirection and sets this up in the Kubernetes pod lifecycle’s network setup phase. + ``` + ``` + +5. Run the following command in kubectl to check the installation process: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + {{< notice note >}} + +You can find the web kubectl tool by clicking in the lower-right corner of the console. + {{}} + +## Verify the Installation of the Component + +{{< tabs >}} + +{{< tab "Verify the component on the dashboard" >}} + +Go to **System Components** and check whether all components on the **Istio** tab page is in **Healthy** state. If yes, the component is successfully installed. + +{{}} + +{{< tab "Verify the component through kubectl" >}} + +Run the following command to check the status of Pods: + +```bash +kubectl get pod -n istio-system +``` + +The following is an example of the output if the component runs successfully: + +```bash +NAME READY STATUS RESTARTS AGE +istio-ingressgateway-78dbc5fbfd-f4cwt 1/1 Running 0 9m5s +istiod-1-6-10-7db56f875b-mbj5p 1/1 Running 0 10m +jaeger-collector-76bf54b467-k8blr 1/1 Running 0 6m48s +jaeger-operator-7559f9d455-89hqm 1/1 Running 0 7m +jaeger-query-b478c5655-4lzrn 2/2 Running 0 6m48s +kiali-f9f7d6f9f-gfsfl 1/1 Running 0 4m1s +kiali-operator-7d5dc9d766-qpkb6 1/1 Running 0 6m53s +``` + +{{}} + +{{}} diff --git a/content/en/docs/v3.4/pluggable-components/service-topology.md b/content/en/docs/v3.4/pluggable-components/service-topology.md new file mode 100644 index 000000000..1dadea0bb --- /dev/null +++ b/content/en/docs/v3.4/pluggable-components/service-topology.md @@ -0,0 +1,130 @@ +--- +title: "Service Topology" +keywords: "Kubernetes, KubeSphere, Services, Topology" +description: "Learn how to enable Service Topology to view contextual details of your Pods based on Weave Scope." +linkTitle: "Service Topology" +weight: 6915 +--- + +You can enable Service Topology to integrate [Weave Scope](https://www.weave.works/oss/scope/), a visualization and monitoring tool for Docker and Kubernetes. Weave Scope uses established APIs to collect information to build a topology of your apps and containers. The Service topology displays in your project, providing you with visual representations of connections based on traffic. + +## Enable Service Topology Before Installation + +### Installing on Linux + +When you implement multi-node installation of KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components. + +1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file `config-sample.yaml`. Modify the file by executing the following command: + + ```bash + vi config-sample.yaml + ``` + + {{< notice note >}} + If you adopt [All-in-One Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a `config-sample.yaml` file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable Service Topology in this mode (for example, for testing purposes), refer to [the following section](#enable-service-topology-after-installation) to see how Service Topology can be installed after installation. + {{}} + +2. In this file, navigate to `network.topology.type` and change `none` to `weave-scope`. Save the file after you finish. + + ```yaml + network: + topology: + type: weave-scope # Change "none" to "weave-scope". + ``` + +3. Create a cluster using the configuration file: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +### Installing on Kubernetes + +As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable Service Topology first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) file. + +1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) and edit it. + + ```bash + vi cluster-configuration.yaml + ``` + +2. In this local `cluster-configuration.yaml` file, navigate to `network.topology.type` and enable it by changing `none` to `weave-scope`. Save the file after you finish. + + ```yaml + network: + topology: + type: weave-scope # Change "none" to "weave-scope". + ``` + +3. Execute the following commands to start installation: + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f cluster-configuration.yaml + ``` + + +## Enable Service Topology After Installation + +1. Log in to the console as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. + +2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detail page. + + {{< notice info >}} +A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects. + {{}} + +3. In **Custom Resources**, click on the right of `ks-installer` and select **Edit YAML**. + +4. In this YAML file, navigate to `network` and change `network.topology.type` to `weave-scope`. After you finish, click **OK** in the lower-right corner to save the configuration. + + ```yaml + network: + topology: + type: weave-scope # Change "none" to "weave-scope". + ``` + +5. You can use the web kubectl to check the installation process by executing the following command: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + {{< notice note >}} + +You can find the web kubectl tool by clicking in the lower-right corner of the console. + {{}} + +## Verify the Installation of the Component + +{{< tabs >}} + +{{< tab "Verify the component on the dashboard" >}} + +Go to one of your project, navigate to **Services** under **Application Workloads**, and you can see a topology of your Services on the **Service Topology** tab page. + +{{}} + +{{< tab "Verify the component through kubectl" >}} + +Execute the following command to check the status of Pods: + +```bash +kubectl get pod -n weave +``` + +The output may look as follows if the component runs successfully: + +```bash +NAME READY STATUS RESTARTS AGE +weave-scope-agent-48cjp 1/1 Running 0 3m1s +weave-scope-agent-9jb4g 1/1 Running 0 3m1s +weave-scope-agent-ql5cf 1/1 Running 0 3m1s +weave-scope-app-5b76897b6f-8bsls 1/1 Running 0 3m1s +weave-scope-cluster-agent-8d9b8c464-5zlpp 1/1 Running 0 3m1s +``` + +{{}} + +{{}} \ No newline at end of file diff --git a/content/en/docs/v3.4/pluggable-components/uninstall-pluggable-components.md b/content/en/docs/v3.4/pluggable-components/uninstall-pluggable-components.md new file mode 100644 index 000000000..17c2cc4b1 --- /dev/null +++ b/content/en/docs/v3.4/pluggable-components/uninstall-pluggable-components.md @@ -0,0 +1,205 @@ +--- +title: "Uninstall Pluggable Components" +keywords: "Installer, uninstall, KubeSphere, Kubernetes" +description: "Learn how to uninstall each pluggable component in KubeSphere." +linkTitle: "Uninstall Pluggable Components" +Weight: 6940 +--- + +After you [enable the pluggable components of KubeSphere](../../pluggable-components/), you can also uninstall them by performing the following steps. Please back up any necessary data before you uninstall these components. + +## Prerequisites + +You have to change the value of the field `enabled` from `true` to `false` in `ks-installer` of the CRD `ClusterConfiguration` before you uninstall any pluggable components except Service Topology and Pod IP Pools. + +Use either of the following methods to change the value of the field `enabled`: + +- Run the following command to edit `ks-installer`: + + ```bash + kubectl -n kubesphere-system edit clusterconfiguration ks-installer + ``` + +- Log in to the KubeSphere web console as `admin`, click **Platform** in the upper-left corner and select **Cluster Management**, and then go to **CRDs** to search for `ClusterConfiguration`. For more information, see [Enable Pluggable Components](../../../pluggable-components/). + +{{< notice note >}} + +After the value is changed, you need to wait until the updating process is complete before you continue with any further operations. + +{{}} + +## Uninstall KubeSphere App Store + +Change the value of `openpitrix.store.enabled` from `true` to `false` in `ks-installer` of the CRD `ClusterConfiguration`. + +## Uninstall KubeSphere DevOps + +1. To uninstall DevOps: + + ```bash + helm uninstall -n kubesphere-devops-system devops + kubectl patch -n kubesphere-system cc ks-installer --type=json -p='[{"op": "remove", "path": "/status/devops"}]' + kubectl patch -n kubesphere-system cc ks-installer --type=json -p='[{"op": "replace", "path": "/spec/devops/enabled", "value": false}]' + ``` +2. To delete DevOps resources: + + ```bash + # Remove all resources related with DevOps + for devops_crd in $(kubectl get crd -o=jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | grep "devops.kubesphere.io"); do + for ns in $(kubectl get ns -ojsonpath='{.items..metadata.name}'); do + for devops_res in $(kubectl get $devops_crd -n $ns -oname); do + kubectl patch $devops_res -n $ns -p '{"metadata":{"finalizers":[]}}' --type=merge + done + done + done + # Remove all DevOps CRDs + kubectl get crd -o=jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | grep "devops.kubesphere.io" | xargs -I crd_name kubectl delete crd crd_name + # Remove DevOps namespace + kubectl delete namespace kubesphere-devops-system + ``` + + +## Uninstall KubeSphere Logging + +1. Change the value of `logging.enabled` from `true` to `false` in `ks-installer` of the CRD `ClusterConfiguration`. + +2. To disable only log collection: + + ```bash + kubectl delete inputs.logging.kubesphere.io -n kubesphere-logging-system tail + ``` + + {{< notice note >}} + + After running this command, you can still view the container's recent logs provided by Kubernetes by default. However, the container history logs will be cleared and you cannot browse them any more. + + {{}} + +3. To uninstall the Logging system, including Elasticsearch: + + ```bash + kubectl delete crd fluentbitconfigs.logging.kubesphere.io + kubectl delete crd fluentbits.logging.kubesphere.io + kubectl delete crd inputs.logging.kubesphere.io + kubectl delete crd outputs.logging.kubesphere.io + kubectl delete crd parsers.logging.kubesphere.io + kubectl delete deployments.apps -n kubesphere-logging-system fluentbit-operator + helm uninstall elasticsearch-logging --namespace kubesphere-logging-system + ``` + + {{< notice warning >}} + + This operation may cause anomalies in Auditing, Events, and Service Mesh. + + {{}} + +4. Run the following command: + + ```bash + kubectl delete deployment logsidecar-injector-deploy -n kubesphere-logging-system + kubectl delete ns kubesphere-logging-system + ``` + +## Uninstall KubeSphere Events + +1. Change the value of `events.enabled` from `true` to `false` in `ks-installer` of the CRD `ClusterConfiguration`. + +2. Run the following command: + + ```bash + helm delete ks-events -n kubesphere-logging-system + ``` + +## Uninstall KubeSphere Alerting + +1. Change the value of `alerting.enabled` from `true` to `false` in `ks-installer` of the CRD `ClusterConfiguration`. + +2. Run the following command: + + ```bash + kubectl -n kubesphere-monitoring-system delete thanosruler kubesphere + ``` + + {{< notice note >}} + + Notification is installed in KubeSphere 3.3 by default, so you do not need to uninstall it. + + {{}} + + +## Uninstall KubeSphere Auditing + +1. Change the value of `auditing.enabled` from `true` to `false` in `ks-installer` of the CRD `ClusterConfiguration`. + +2. Run the following commands: + + ```bash + helm uninstall kube-auditing -n kubesphere-logging-system + kubectl delete crd rules.auditing.kubesphere.io + kubectl delete crd webhooks.auditing.kubesphere.io + ``` + +## Uninstall KubeSphere Service Mesh + +1. Change the value of `servicemesh.enabled` from `true` to `false` in `ks-installer` of the CRD `ClusterConfiguration`. + +2. Run the following commands: + + ```bash + curl -L https://istio.io/downloadIstio | sh - + istioctl x uninstall --purge + + kubectl -n istio-system delete kiali kiali + helm -n istio-system delete kiali-operator + + kubectl -n istio-system delete jaeger jaeger + helm -n istio-system delete jaeger-operator + ``` + +## Uninstall Network Policies + +For the component NetworkPolicy, disabling it does not require uninstalling the component as its controller is now inside `ks-controller-manager`. If you want to remove it from the KubeSphere console, change the value of `network.networkpolicy.enabled` from `true` to `false` in `ks-installer` of the CRD `ClusterConfiguration`. + +## Uninstall Metrics Server + +1. Change the value of `metrics_server.enabled` from `true` to `false` in `ks-installer` of the CRD `ClusterConfiguration`. + +2. Run the following commands: + + ```bash + kubectl delete apiservice v1beta1.metrics.k8s.io + kubectl -n kube-system delete service metrics-server + kubectl -n kube-system delete deployment metrics-server + ``` + +## Uninstall Service Topology + +1. Change the value of `network.topology.type` from `weave-scope` to `none` in `ks-installer` of the CRD `ClusterConfiguration`. + +2. Run the following command: + + ```bash + kubectl delete ns weave + ``` + +## Uninstall Pod IP Pools + +Change the value of `network.ippool.type` from `calico` to `none` in `ks-installer` of the CRD `ClusterConfiguration`. + +## Uninstall KubeEdge + +1. Change the value of `kubeedge.enabled` and `edgeruntime.enabled` from `true` to `false` in `ks-installer` of the CRD `ClusterConfiguration`. + +2. Run the following commands: + + ```bash + helm uninstall kubeedge -n kubeedge + kubectl delete ns kubeedge + ``` + + {{< notice note >}} + + After uninstallation, you will not be able to add edge nodes to your cluster. + + {{}} + diff --git a/content/en/docs/v3.4/project-administration/_index.md b/content/en/docs/v3.4/project-administration/_index.md new file mode 100644 index 000000000..a8c3c7e20 --- /dev/null +++ b/content/en/docs/v3.4/project-administration/_index.md @@ -0,0 +1,13 @@ +--- +title: "Project Administration" +description: "Help you to better manage KubeSphere projects" +layout: "second" + +linkTitle: "Project Administration" +weight: 13000 + +icon: "/images/docs/v3.3/docs.svg" + +--- + +A KubeSphere project is a Kubernetes namespace. There are two types of projects, the single-cluster project and the multi-cluster project. The former one is the regular Kubernetes namespace, while the latter is the federated namespace across multiple clusters. As a project administrator, you are responsible for project creation, limit range settings, network isolation configuration, and more. diff --git a/content/en/docs/v3.4/project-administration/container-limit-ranges.md b/content/en/docs/v3.4/project-administration/container-limit-ranges.md new file mode 100644 index 000000000..8fa82fa9d --- /dev/null +++ b/content/en/docs/v3.4/project-administration/container-limit-ranges.md @@ -0,0 +1,47 @@ +--- +title: "Container Limit Ranges" +keywords: 'Kubernetes, KubeSphere, resource, quotas, limits, requests, limit ranges, containers' +description: 'Learn how to set default container limit ranges in a project.' +linkTitle: "Container Limit Ranges" +weight: 13400 +--- + +A container can use as much CPU and memory as set by [the resource quota for a project](../../workspace-administration/project-quotas/). At the same time, KubeSphere uses requests and limits to control resource (for example, CPU and memory) usage for a container, also known as [LimitRanges](https://kubernetes.io/docs/concepts/policy/limit-range/) in Kubernetes. Requests make sure the container can get the resources it needs as they are specifically guaranteed and reserved. On the contrary, limits ensure that container can never use resources above a certain value. + +When you create a workload, such as a Deployment, you configure resource [Kubernetes requests and limits](https://kubesphere.io/blogs/understand-requests-and-limits-in-kubernetes/) for the container. To make these request and limit fields pre-populated with values, you can set default limit ranges. + +This tutorial demonstrates how to set default limit ranges for containers in a project. + +## Prerequisites + +You have an available workspace, a project and a user (`project-admin`). The user must have the `admin` role at the project level. For more information, see [Create Workspaces, Projects, Users and Roles](../../quick-start/create-workspace-and-project/). + +## Set Default Limit Ranges + +1. Log in to the console as `project-admin` and go to a project. On the **Overview** page, you can see default limit ranges remain unset if the project is newly created. Click **Edit Quotas** next to **Default Container Quotas Not Set** to configure limit ranges. + +2. In the dialog that appears, you can see that KubeSphere does not set any requests or limits by default. To set requests and limits to control CPU and memory resources, use the slider to move to a desired value or enter numbers directly. Leaving a field blank means you do not set any requests or limits. + + {{< notice note >}} + + The limit can never be lower than the request. + + {{}} + +3. Click **OK** to finish setting limit ranges. + +4. Go to **Basic Information** in **Project Settings**, and you can see default limit ranges for containers in a project. + +5. To change default limit ranges, click **Edit Project** on the **Basic Information** page and select **Edit Default Container Quotas**. + +6. Change limit ranges directly in the dialog and click **OK**. + +7. When you create a workload, requests and limits of the container will be pre-populated with values. + {{< notice note >}} + For more information, see **Resource Request** in [Container Image Settings](../../project-user-guide/application-workloads/container-image-settings/). + + {{}} + +## See Also + +[Project Quotas](../../workspace-administration/project-quotas/) diff --git a/content/en/docs/v3.4/project-administration/disk-log-collection.md b/content/en/docs/v3.4/project-administration/disk-log-collection.md new file mode 100644 index 000000000..634317a22 --- /dev/null +++ b/content/en/docs/v3.4/project-administration/disk-log-collection.md @@ -0,0 +1,75 @@ +--- +title: "Log Collection" +keywords: 'KubeSphere, Kubernetes, project, disk, log, collection' +description: 'Enable log collection so that you can collect, manage, and analyze logs in a unified way.' +linkTitle: "Log Collection" +weight: 13600 +--- + +KubeSphere supports multiple log collection methods so that Ops teams can collect, manage, and analyze logs in a unified and flexible way. + +This tutorial demonstrates how to collect logs for an example app. + +## Prerequisites + +- You need to create a workspace, a project and a user (`project-admin`). The user must be invited to the project with the role of `admin` at the project level. For more information, see [Create Workspaces, Projects, Users and Roles](../../quick-start/create-workspace-and-project/). +- You need to enable [the KubeSphere Logging System](../../pluggable-components/logging/). + +## Enable Log Collection + +1. Log in to the web console of KubeSphere as `project-admin` and go to your project. + +2. From the left navigation bar, click **Log Collection** in **Project Settings**, and then click icon to enable the feature. + +## Create a Deployment + +1. From the left navigation bar, select **Workloads** in **Application Workloads**. Under the **Deployments** tab, click **Create**. + +2. In the dialog that appears, set a name for the Deployment (for example, `demo-deployment`) and click **Next**. + +3. Under **Containers**, click **Add Container**. + +4. Enter `alpine` in the search bar to use the image (tag: `latest`) as an example. + +5. Scroll down to **Start Command** and select the checkbox. Enter the following values for **Command** and **Parameters** respectively, click **√**, and then click **Next**. + + **Command** + + ```bash + /bin/sh + ``` + + **Parameters** + + ```bash + -c,if [ ! -d /data/log ];then mkdir -p /data/log;fi; while true; do date >> /data/log/app-test.log; sleep 30;done + ``` + + {{< notice note >}} + + The command and parameters above mean that the date information will be exported to `app-test.log` in `/data/log` every 30 seconds. + + {{}} + +6. On the **Storage Settings** tab, click icon to enable **Collect Logs on Volumes** and click **Mount Volume**. + +7. On the **Temporary Volume** tab, enter a name for the volume (for example, `demo-disk-log-collection`) and set the access mode and path. + + Click **√**, and then click **Next**. + +8. Click **Create** in **Advanced Settings** to finish the process. + + {{< notice note >}} + + For more information, see [Deployments](../../project-user-guide/application-workloads/deployments/). + + {{}} + +## View Logs + +1. Under the **Deployments** tab, click the Deployment just created to go to its detail page. + +2. In **Resource Status**, you can click icon to view container details, and then click icon of `logsidecar-container` (filebeat container) to view logs. + +3. Alternatively, you can also click icon in the lower-right corner and select **Log Search** to view stdout logs. For example, use the Pod name of the Deployment for a fuzzy query. + diff --git a/content/en/docs/v3.4/project-administration/project-and-multicluster-project.md b/content/en/docs/v3.4/project-administration/project-and-multicluster-project.md new file mode 100644 index 000000000..8a36c19d3 --- /dev/null +++ b/content/en/docs/v3.4/project-administration/project-and-multicluster-project.md @@ -0,0 +1,95 @@ +--- +title: "Projects and Multi-cluster Projects" +keywords: 'KubeSphere, Kubernetes, project, multicluster-project' +description: 'Learn how to create different types of projects.' +linkTitle: "Projects and Multi-cluster Projects" +weight: 13100 +--- + +A project in KubeSphere is a Kubernetes [namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/), which is used to organize resources into non-overlapping groups. It represents a logical partitioning capability as it divides cluster resources between multiple tenants. + +A multi-cluster project runs across clusters, empowering users to achieve high availability and isolate occurring issues to a certain cluster while not affecting your business. For more information, see [Multi-cluster Management](../../multicluster-management/). + +This tutorial demonstrates how to manage projects and multi-cluster projects. + +## Prerequisites + +- You need to create a workspace and a user (`project-admin`). The user must be invited to the workspace with the role of `workspace-self-provisioner`. For more information, see [Create Workspaces, Projects, Users and Roles](../../quick-start/create-workspace-and-project/). +- You must enable the multi-cluster feature through [Direction Connection](../../multicluster-management/enable-multicluster/direct-connection/) or [Agent Connection](../../multicluster-management/enable-multicluster/agent-connection/) before you create a multi-cluster project. + +## Projects + +### Create a project + +1. Go to the **Projects** page of a workspace and click **Create** on the **Projects** tab. + + {{< notice note >}} + +- You can change the cluster where the project will be created on the **Cluster** drop-down menu. The list is only visible after you enable the multi-cluster feature. +- If you cannot see the **Create** button, it means no cluster is available to use for your workspace. You need to contact the platform administrator or cluster administrator so that workspace resources can be created in the cluster. [To assign a cluster to a workspace](../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/), the platform administrator or cluster administrator needs to edit **Cluster Visibility** on the **Cluster Management** page. + + {{}} + +2. In the **Create Project** window that appears, enter a project name and add an alias or description if necessary. Under **Cluster**, select the cluster where the project will be created (this option does not appear if the multi-cluster feature is not enabled), and click **OK**. + +3. A project created will display in the list. You can click the project name to go to its **Overview** page. + +### Edit a project + +1. Go to your project, navigate to **Basic Information** under **Project Settings** and click **Manage** on the right. + +2. Choose **Edit Information** from the drop-down menu. + + {{< notice note >}} + +The project name cannot be edited. If you want to change other information, see relevant tutorials in the documentation. + +{{}} + +3. To delete a project, choose **Delete** from the drop-down menu. In the dialog that appears, enter the project name and click **OK** to confirm the deletion. + +{{< notice warning >}} + +A project cannot be recovered once deleted and resources in the project will be removed. + +{{}} + +## Multi-cluster Projects + +### Create a multi-cluster project + +1. Go to the **Projects** page of a workspace, click the **Multi-cluster Projects** tab and click **Create**. + + {{< notice note >}} + +- If you cannot see the **Create** button, it means no cluster is available to use for your workspace. You need to contact the platform administrator or cluster administrator so that workspace resources can be created in the cluster. [To assign a cluster to a workspace](../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/), the platform administrator or cluster administrator needs to edit **Cluster Visibility** on the **Cluster Management** page. +- Make sure at least two clusters are assigned to your workspace. + + {{}} + +2. In the **Create Multi-cluster Project** window that appears, enter a project name and add an alias or description if necessary. Under **Clusters**, select multiple clusters for your project by clicking **Add Cluster**, and then click **OK**. +3. A multi-cluster project created is displayed in the list. Click icon on the right of a multi-cluster project to select an operation from the drop-down menu: + + - **Edit Information**: Edit the basic information of a multi-cluster project. + - **Add Cluster**: Select a cluster from the drop-down list in the displayed dialog box and click **OK** to add a cluster to a multi-cluster project. + - **Delete**: Delete a multi-cluster project. + +### Edit a multi-cluster project + +1. Go to your multi-cluster project, navigate to **Basic Information** under **Project Settings** and click **Manage** on the right. + +2. Choose **Edit Information** from the drop-down menu. + + {{< notice note >}} + +The project name cannot be edited. If you want to change other information, see relevant tutorials in the documentation. + +{{}} + +3. To delete a multi-cluster project, choose **Delete Project** from the drop-down menu. In the dialog that appears, enter the project name and click **OK** to confirm the deletion. + +{{< notice warning >}} + +A multi-cluster project cannot be recovered once deleted and resources in the project will be removed. + +{{}} diff --git a/content/en/docs/v3.4/project-administration/project-gateway.md b/content/en/docs/v3.4/project-administration/project-gateway.md new file mode 100644 index 000000000..37cab408d --- /dev/null +++ b/content/en/docs/v3.4/project-administration/project-gateway.md @@ -0,0 +1,66 @@ +--- +title: "Project Gateway" +keywords: 'KubeSphere, Kubernetes, project, gateway, NodePort, LoadBalancer' +description: 'Understand the concept of project gateway and how to manage it.' +linkTitle: "Project Gateway" +weight: 13500 +--- + +A gateway in a KubeSphere project is an [NGINX Ingress controller](https://www.nginx.com/products/nginx/kubernetes-ingress-controller). KubeSphere has a built‑in configuration for HTTP load balancing, called [Routes](../../project-user-guide/application-workloads/routes/). A Route defines rules for external connections to Services within a cluster. Users who need to provide external access to their Services create a Route resource that defines rules, including the URI path, backing service name, and other information. + +In addition to project gateways, KubeSphere also supports [cluster-scope gateway](../../cluster-administration/cluster-settings/cluster-gateway/) to let all projects share a global gateway. + +This tutorial demonstrates how to enable a project gateway on KubeSphere for external access to Services and Routes. + +## Prerequisites + +You need to create a workspace, a project and a user (`project-admin`). The user must be invited to the project with the role of `admin` at the project level. For more information, see [Create Workspaces, Projects, Users and Roles](../../quick-start/create-workspace-and-project/). + +## Enable a Gateway + +1. Log in to the KubeSphere web console as `project-admin` and go to your project. In **Project Settings** from the navigation bar, click **Gateway Settings**. + +2. Click **Enable Gateway**. In the pop-up window, you can select two access modes for the gateway. + + **NodePort**: You can access Services with corresponding node ports through the gateway. + + **LoadBalancer**: You can access Services with a single IP address through the gateway. + +3. You can also enable **Tracing** on the **Enable Gateway** page. You have to turn on **Application Governance** when you create composed applications so that you can use the Tracing feature and use [different grayscale release strategies](../../project-user-guide/grayscale-release/overview/). Once it is enabled, check whether an annotation (for example, `nginx.ingress.kubernetes.io/service-upstream: true`) is added for your route (Ingress) if the route is inaccessible. + +3. In **Configuration Options**, add key-value pairs to provide configurations for system components of NGINX Ingress controller. For more information, see [NGINX Ingress Controller documentation](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#configuration-options). + +4. After you select an access method, click **OK**. + +## NodePort + +If you select **NodePort**, KubeSphere will set a port for http and https requests respectively. You can access your Service at `EIP:NodePort` or `Hostname:NodePort`. + +For example, to access your Service with an elastic IP address (EIP), visit: + +- `http://EIP:32734` +- `https://EIP:32471` + +When you create a [Route](../../project-user-guide/application-workloads/routes/) (Ingress), you can customize a host name to access your Service. For example, to access your Service with the host name set in your Route, visit: + +- `http://demo.kubesphere.io:32734` +- `https://demo.kubesphere.io:32471` + +{{< notice note >}} + +- You may need to open ports in your security groups and configure relevant port forwarding rules depending on your environment. + +- If you access your Service using the host name, make sure the domain name you set can be resolved to the IP address. +- **NodePort** is not recommended for a production environment. You can use **LoadBalancer** instead. + +{{}} + +## LoadBalancer + +You must configure a load balancer in advance before you select **LoadBalancer**. The IP address of the load balancer will be bound to the gateway to provide access to internal Services and Routes. + +{{< notice note >}} + +Cloud providers often support load balancer plugins. If you install KubeSphere on major Kubernetes engines on their platforms, you may notice a load balancer is already available in the environment for you to use. If you install KubeSphere in a bare metal environment, you can use [OpenELB](https://github.com/kubesphere/openelb) for load balancing. + +{{}} \ No newline at end of file diff --git a/content/en/docs/v3.4/project-administration/project-network-isolation.md b/content/en/docs/v3.4/project-administration/project-network-isolation.md new file mode 100644 index 000000000..9624aef77 --- /dev/null +++ b/content/en/docs/v3.4/project-administration/project-network-isolation.md @@ -0,0 +1,206 @@ +--- +title: "Project Network Isolation" +keywords: 'KubeSphere, Kubernetes, Calico, Network Policy' +description: 'Understand the concept of network isolation and how to configure network policies for a project.' +linkTitle: "Project Network Isolation" +weight: 13300 +--- + +KubeSphere project network isolation lets project administrators enforce which network traffic is allowed using different rules. This tutorial demonstrates how to enable network isolation among projects and set rules to control network traffic. + +## Prerequisites + +- You have already enabled [Network Policies](../../pluggable-components/network-policy/). +- You must have an available project and a user of the `admin` role (`project-admin`) at the project level. For more information, see [Create Workspaces, Projects, Users and Roles](../../quick-start/create-workspace-and-project/). + +{{< notice note >}} + +For the implementation of the Network Policy, you can refer to [KubeSphere NetworkPolicy](https://github.com/kubesphere/community/blob/master/sig-network/concepts-and-designs/kubesphere-network-policy.md). + +{{}} + +## Enable/Disable Project Network Isolation + +1. Log in to KubeSphere as `project-admin`. Go to your project and select **Network Isolation** in **Project Settings**. By default, project network isolation is disabled. + +2. To enable project network isolation, click **Enable**. + + {{< notice note >}} + + When network isolation is turned on, egress traffic will be allowed by default, while ingress traffic will be denied for different projects. But when you add an egress network policy, only traffic that matches your policy will be allowed to go out. + + {{}} + +3. You can also disable network isolation by toggling the **Enabled** button on this page. + + + {{< notice note >}} + + When network isolation is turned off, any previously created network policies will be deleted as well. + + {{}} + +## Set a Network Policy + +If the default policy does not meet your needs when network isolation is enabled, you can customize your network policy to meet your needs. Currently, you can add custom network policies in KubeSphere for traffic within the cluster or incoming traffic outside the cluster. + +### For internal traffic within the cluster + +Network policies at the project level within a cluster are used to control whether resources in this project can be accessed by other projects within the same cluster, and which Services you can access. + +Assume an NGINX Deployment workload has been created in another project `demo-project-2` and is exposed via the Service `nginx` on the port `80` with `TCP`. Here is an example of how to set ingress and egress traffic rules. + +{{< notice note >}} + +For more information about how to create workloads, see [Deployments](../../project-user-guide/application-workloads/deployments/) and [Services](../../project-user-guide/application-workloads/services/) respectively. + +{{}} + +#### Allow ingress traffic from workloads in a different project + +1. On the **Network Isolation** page of your current project, click **Internal Allowlist**. + +2. Click **Add Allowlist Entry**. + +3. Select **Ingress** under **Traffic Direction**. + +4. In **Project**, select the project `demo-project-2`. + +5. Click **OK**, and then you can see that the project is now in the allowlist. + +{{< notice note >}} + +If the network is not accessible after you set the network policy, then you need to check whether the peer project has a corresponding egress rule in it. + +{{}} + +#### Allow egress traffic to Services in a different project + +1. On the **Network Isolation** page of your current project, click **Internal Allowlist**. + +2. Click **Add Allowlist Entry**. + +3. Select **Egress** under **Traffic Direction**. + +4. Select the tab **Service** under **Type**. + +5. Select the project `demo-project-2` from the drop-down list. + +6. Select the Service that is allowed to receive egress traffic. In this case, select `nginx`. + +7. Click **OK**, and then you can see that the Service is now in the allowlist. + +{{< notice note >}} + +When creating a Service, you must make sure that the selectors of the Service are not empty. + +{{}} + +### For incoming traffic outside the cluster + +KubeSphere uses CIDR to distinguish between peers. Assume a Tomcat Deployment workload has been created in your current project and is exposed via the `NodePort` Service `demo-service` on the NodePort `80` with `TCP`. For an external client with the IP address `192.168.1.1` to access this Service, you need to add a rule for it. + +#### Allow ingress traffic from a client outside the cluster + +1. On the **Network Isolation** page of your current project, select **External Allowlist** and click **Add Allowlist Entry**. + +2. Select **Ingress** under **Traffic Direction**. + +3. Enter `192.168.1.1/32` for **Network Segment**. + +4. Select the protocol `TCP` and enter `80` as the port number. + +5. Click **OK**, and then you can see that the rule has been added. + +{{< notice note >}} + +It is recommended to set `spec.externalTrafficPolicy` in the Service configuration to `local`, so that the source address of the packet will not change. Namely, the source address of the packet is the source address of the client. + +{{}} + +Assume the IP address of an external client is `http://10.1.0.1:80`, then you need to set a rule for the egress traffic so that the internal Service can access it. + +#### Allow egress traffic to Services outside the cluster + +1. On the **Network Isolation** page of your current project, select **External Allowlist** and click **Add Allowlist Entry**. + +2. Select **Egress** under **Traffic Direction**. + +3. Enter `10.1.0.1/32` for **Network Segment**. + +4. Select the protocol `TCP` and enter `80` as the port number. + +5. Click **OK**, and you can see that the rule has been added. + +{{< notice note >}} + +In step 4, when you select **SCTP**, you must make sure SCTP is [enabled](https://kubernetes.io/docs/concepts/services-networking/network-policies/#sctp-support). + +{{}} + +### Best practices + +To ensure that all Pods in a project are secure, a best practice is to enable network isolation. When network isolation is on, the project cannot be accessed by other projects. If your workloads need to be accessed by others, you can follow these steps: + +1. Set a [gateway](../project-gateway/) in **Project Settings**. +2. Expose workloads that need to be accessed to a gateway via a Service. +3. Allow ingress traffic from the namespace where your gateway locates. + +If egress traffic is controlled, you should have a clear plan of what projects, Services, and IP addresses can be accessed, and then add them one by one. If you are not sure about what you want, it is recommended that you keep your network policy unchanged. + +## FAQs + +Q: Why cannot the custom monitoring system of KubeSphere get data after I enabled network isolation? + +A: After you enable custom monitoring, the KubeSphere monitoring system will access the metrics of the Pod. You need to allow ingress traffic for the KubeSphere monitoring system. Otherwise, it cannot access Pod metrics. + +KubeSphere provides a configuration item `allowedIngressNamespaces` to simplify similar configurations, which allows all projects listed in the configuration. + +```yaml +root@node1:~# kubectl get -n kubesphere-system clusterconfigurations.installer.kubesphere.io ks-installer -o yaml +apiVersion: installer.kubesphere.io/v1alpha1 +kind: ClusterConfiguration +metadata: + ... + name: ks-installer + namespace: kubesphere-system + ... +spec: + ... + networkpolicy: + enabled: true + nsnpOptions: + allowedIngressNamespaces: + - kubesphere-system + - kubesphere-monitoring-system + ... +``` + +Q: Why cannot I access a Service even after setting a network policy through the Service? + +A: When you add a network policy and access the Service via the cluster IP address, if the network is not + working, check the kube-proxy configuration to see if `masqueradeAll` is `false`. + + ```yaml + root@node1:~# kubectl get cm -n kube-system kube-proxy -o yaml + apiVersion: v1 + data: + config.conf: |- + ... + iptables: + masqueradeAll: false + ... + ... + kind: ConfigMap + metadata: + ... + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system + ... + ``` + +Q: How do I determine the network segment when I set the ingress rule? + +A: In Kubernetes, the source IP address of the packet is often handled by NAT, so you need to figure out what the source address of the packet will be before you add the rule. For more information, refer to [Source IP](https://github.com/kubesphere/community/blob/master/sig-network/concepts-and-designs/kubesphere-network-policy.md#source-ip). diff --git a/content/en/docs/v3.4/project-administration/role-and-member-management.md b/content/en/docs/v3.4/project-administration/role-and-member-management.md new file mode 100644 index 000000000..8354b3606 --- /dev/null +++ b/content/en/docs/v3.4/project-administration/role-and-member-management.md @@ -0,0 +1,75 @@ +--- +title: "Project Role and Member Management" +keywords: 'KubeSphere, Kubernetes, role, member, management, project' +description: 'Learn how to manage access control for a project.' +linkTitle: "Project Role and Member Management" +weight: 13200 +--- + +This tutorial demonstrates how to manage roles and members in a project. At the project level, you can grant permissions in the following modules to a role: + +- **Application Workloads** +- **Storage** +- **Configurations** +- **Monitoring & Alerting** +- **Access Control** +- **Project Settings** + +## Prerequisites + +At least one project has been created, such as `demo-project`. Besides, you need a user of the `admin` role (for example, `project-admin`) at the project level. For more information, see [Create Workspaces, Projects, Users and Roles](../../quick-start/create-workspace-and-project/). + +## Built-in Roles + +In **Project Roles**, there are three available built-in roles as shown below. Built-in roles are created automatically by KubeSphere when a project is created and they cannot be edited or deleted. You can only view permissions included in a built-in role or assign it to a user. + + + + + + + + + + + + + + + + + + +
Built-in RolesDescription
viewerProject viewer who can view all resources in the project.
operatorProject operator who can manage resources other than users and roles in the project.
adminProject administrator who has full control over all resources in the project.
+ +To view the permissions that a role contains: + +1. Log in to the console as `project-admin`. In **Project Roles**, click a role (for example, `admin`) to view the role details. + +2. Click the **Authorized Users** tab to check users that have been granted the role. + +## Create a Project Role + +1. Navigate to **Project Roles** under **Project Settings**. + +2. In **Project Roles**, click **Create** and set a role name (for example, `project-monitor`). Click **Edit Permissions** to continue. + +3. In the pop-up window, permissions are categorized into different **Modules**. In this example, select **Application Workload Viewing** in **Application Workloads**, and **Alerting Message Viewing** and **Alerting Policy Viewing** in **Monitoring & Alerting**. Click **OK** to finish creating the role. + + {{< notice note >}} + +**Depends on** means the major permission (the one listed after **Depends on**) needs to be selected first so that the affiliated permission can be assigned. + +{{}} + +4. Newly-created roles will be listed in **Project Roles**. To edit an existing role, click icon on the right. + +## Invite a New Member + +1. Navigate to **Project Members** under **Project Settings**, and click **Invite**. + +2. Invite a user to the project by clicking icon on the right of it and assign a role to it. + +3. After you add the user to the project, click **OK**. In **Project Members**, you can see the user in the list. + +4. To edit the role of an existing user or remove the user from the project, click icon on the right and select the corresponding operation. diff --git a/content/en/docs/v3.4/project-user-guide/_index.md b/content/en/docs/v3.4/project-user-guide/_index.md new file mode 100644 index 000000000..7dc50ce3b --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/_index.md @@ -0,0 +1,12 @@ +--- +title: "Project User Guide" +description: "Help you to better manage resources in a KubeSphere project" +layout: "second" + +linkTitle: "Project User Guide" +weight: 10000 + +icon: "/images/docs/v3.3/docs.svg" +--- + +In KubeSphere, project users with necessary permissions are able to perform a series of tasks, such as creating different kinds of workloads, configuring volumes, Secrets, and ConfigMaps, setting various release strategies, monitoring app metrics, and creating alerting policies. As KubeSphere features great flexibility and compatibility without any code hacking into native Kubernetes, it is very convenient for users to get started with any feature required for their testing, development and production environments. \ No newline at end of file diff --git a/content/en/docs/v3.4/project-user-guide/alerting/_index.md b/content/en/docs/v3.4/project-user-guide/alerting/_index.md new file mode 100644 index 000000000..1b4523bc0 --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/alerting/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Alerting" +weight: 10700 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/project-user-guide/alerting/alerting-message.md b/content/en/docs/v3.4/project-user-guide/alerting/alerting-message.md new file mode 100644 index 000000000..507563542 --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/alerting/alerting-message.md @@ -0,0 +1,27 @@ +--- +title: "Alerting Messages (Workload Level)" +keywords: 'KubeSphere, Kubernetes, Workload, Alerting, Message, Notification' +description: 'Learn how to view alerting messages for workloads.' +linkTitle: "Alerting Messages (Workload Level)" +weight: 10720 +--- + +Alerting messages record detailed information of alerts triggered based on the alerting policy defined. This tutorial demonstrates how to view alerting messages at the workload level. + +## Prerequisites + +- You have enabled [KubeSphere Alerting](../../../pluggable-components/alerting/). +- You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). +- You have created a workload-level alerting policy and an alert has been triggered. For more information, refer to [Alerting Policies (Workload Level)](../alerting-policy/). + +## View Alerting Messages + +1. Log in to the console as `project-regular`, go to your project, and go to **Alerting Messages** under **Monitoring & Alerting**. + +2. On the **Alerting Messages** page, you can see all alerting messages in the list. The first column displays the summary and message you have defined in the notification of the alert. To view details of an alerting message, click the name of the alerting policy and click the **Alerting History** tab on the displayed page. + +3. On the **Alerting History** tab, you can see alert severity, monitoring targets, and activation time. + +## View Notifications + +If you also want to receive alert notifications (for example, email and Slack messages), you need to configure [a notification channel](../../../cluster-administration/platform-settings/notification-management/configure-email/) first. \ No newline at end of file diff --git a/content/en/docs/v3.4/project-user-guide/alerting/alerting-policy.md b/content/en/docs/v3.4/project-user-guide/alerting/alerting-policy.md new file mode 100644 index 000000000..d46cea3bd --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/alerting/alerting-policy.md @@ -0,0 +1,60 @@ +--- +title: "Alerting Policies (Workload Level)" +keywords: 'KubeSphere, Kubernetes, Workload, Alerting, Policy, Notification' +description: 'Learn how to set alerting policies for workloads.' +linkTitle: "Alerting Policies (Workload Level)" +weight: 10710 +--- + +KubeSphere provides alerting policies for nodes and workloads. This tutorial demonstrates how to create alerting policies for workloads in a project. See [Alerting Policy (Node Level)](../../../cluster-administration/cluster-wide-alerting-and-notification/alerting-policy/) to learn how to configure alerting policies for nodes. + +## Prerequisites + +- You have enabled [KubeSphere Alerting](../../../pluggable-components/alerting/). +- To receive alert notifications, you must configure a [notification channel](../../../cluster-administration/platform-settings/notification-management/configure-email/) beforehand. +- You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). +- You have workloads in this project. If they are not ready, see [Deploy and Access Bookinfo](../../../quick-start/deploy-bookinfo-to-k8s/) to create a sample app. + +## Create an Alerting Policy + +1. Log in to the console as `project-regular` and go to your project. Go to **Alerting Policies** under **Monitoring & Alerting**, then click **Create**. + +2. In the displayed dialog box, provide the basic information as follows. Click **Next** to continue. + + - **Name**. A concise and clear name as its unique identifier, such as `alert-demo`. + - **Alias**. Help you distinguish alerting policies better. + - **Description**. A brief introduction to the alerting policy. + - **Threshold Duration (min)**. The status of the alerting policy becomes `Firing` when the duration of the condition configured in the alerting rule reaches the threshold. + - **Severity**. Allowed values include **Warning**, **Error** and **Critical**, providing an indication of how serious an alert is. + +3. On the **Rule Settings** tab, you can use the rule template or create a custom rule. To use the template, fill in the following fields. + + - **Resource Type**. Select the resource type you want to monitor, such as **Deployment**, **StatefulSet**, and **DaemonSet**. + - **Monitoring Targets**. Depending on the resource type you select, the target can be different. You cannot see any target if you do not have any workload in the project. + - **Alerting Rule**. Define a rule for the alerting policy. These rules are based on Prometheus expressions and an alert will be triggered when conditions are met. You can monitor objects such as CPU and memory. + + {{< notice note >}} + + You can create a custom rule with PromQL by entering an expression in the **Monitoring Metrics** field (autocompletion supported). For more information, see [Querying Prometheus](https://prometheus.io/docs/prometheus/latest/querying/basics/). + + {{}} + + Click **Next** to continue. + +4. On the **Message Settings** tab, enter the alert summary and message to be included in your notification, then click **Create**. + +5. An alerting policy will be **Inactive** when just created. If conditions in the rule expression are met, it reaches **Pending** first, then turn to **Firing** if conditions keep to be met in the given time range. + +## Edit an Alerting Policy + +To edit an alerting policy after it is created, on the **Alerting Policies** page, click icon on the right. + +1. Click **Edit** from the drop-down menu and edit the alerting policy following the same steps as you create it. Click **OK** on the **Message Settings** page to save it. + +2. Click **Delete** from the drop-down menu to delete an alerting policy. + +## View an Alerting Policy + +Click an alerting policy on the **Alerting Policies** page to see its detail information, including alerting rules and alerting history. You can also see the rule expression which is based on the template you use when creating the alerting policy. + +Under **Alert Monitoring**, the **Alert Monitoring** chart shows the actual usage or amount of resources over time. **Alerting Message** displays the customized message you set in notifications. diff --git a/content/en/docs/v3.4/project-user-guide/application-workloads/_index.md b/content/en/docs/v3.4/project-user-guide/application-workloads/_index.md new file mode 100644 index 000000000..d73a9f85a --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/application-workloads/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Application Workloads" +weight: 10200 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/project-user-guide/application-workloads/container-image-settings.md b/content/en/docs/v3.4/project-user-guide/application-workloads/container-image-settings.md new file mode 100644 index 000000000..f0a78fcae --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/application-workloads/container-image-settings.md @@ -0,0 +1,268 @@ +--- +title: "Pod Settings" +keywords: 'KubeSphere, Kubernetes, image, workload, setting, container' +description: 'Learn different properties on the dashboard in detail as you set Pods for your workload.' +linkTitle: "Pod Settings" +weight: 10280 +--- + +When you create Deployments, StatefulSets or DaemonSets, you need to specify a Pod. At the same time, KubeSphere provides users with various options to customize workload configurations, such as health check probes, environment variables and start commands. This page illustrates detailed explanations of different properties in **Pod Settings**. + +{{< notice tip >}} + +You can enable **Edit YAML** in the upper-right corner to see corresponding values in the manifest file (YAML format) of properties on the dashboard. + +{{}} + +## Pod Settings + +### Pod Replicas + +Set the number of replicated Pods by clicking icon or icon, indicated by the `.spec.replicas` field in the manifest file. This option is not available for DaemonSets. + +If you create Deployments in a multi-cluster project, select a replica scheduling mode under **Replica Scheduling Mode**: + +- **Specify Replicas**: select clusters and set the number of Pod replicas in each cluster. +- **Specify Weights**: select clusters, set the total number of Pod replicas in **Total Replicas**, and specify a weight for each cluster. The Pod replicas will be proportionally scheduled to the clusters according to the weights. To change weights after a Deployment is created, click the name of the Deployment to go to its details page and change weights under **Weights** on the **Resource Status** tab. + +If you create StatefulSets in a multi-cluster project, select clusters and set the number of Pod replicas in each cluster under **Pod Replicas**. + +### Add Container + +Click **Add Container** to add a container. + +#### Image Search Box + +You can click icon on the right to select an image from the list or enter an image name to search it. KubeSphere provides Docker Hub images and your private image repository. If you want to use your private image repository, you need to create an Image Registry Secret first in **Secrets** under **Configuration**. + +{{< notice note >}} + +Remember to press **Enter** on your keyboard after you enter an image name in the search box. + +{{}} + +#### Image Tag + +You can enter a tag like `imagename:tag`. If you do not specify it, it will default to the latest version. + +#### Container Name + +The container name is automatically created by KubeSphere, which is indicated by `.spec.containers.name`. + +#### Container Type + +If you choose **Init container**, it means the init container will be created for the workload. For more information about init containers, please visit [Init Containers](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/?spm=a2c4g.11186623.2.19.16704b3e9qHXPb). + +#### Resource Request + +The resource quota reserved by the container includes both CPU and memory resources. It means the container monopolizes the resource, preventing other services or processes from competing for resources due to insufficient resources, causing the application to become unavailable. + +- The CPU request is indicated by `.spec.containers[].resources.requests.cpu` in the manifest file. The CPU request can be exceeded. +- The memory request is indicated by `.spec.containers[].resources.requests.memory` in the manifest file. The memory request can be exceeded but the container may clear up when node memory is insufficient. + +#### Resource Limit + +You can specify the upper limit of the resources that the application can use, including CPU, memory, and GPU, to prevent excessive resources from being occupied. + +- The CPU limit is indicated by `.spec.containers[].resources.limits.cpu` in the manifest file. The CPU limit can be exceeded for a short time, and the container will not be stopped. +- The memory limit is indicated by `.spec.containers[].resources.limits.memory` in the manifest file. The memory limit cannot be exceeded. If it exceeds, the container may be stopped or scheduled to another machine with sufficient resources. + +{{< notice note >}} + +The CPU resource is measured in CPU units, or **Core** in KubeSphere. The memory resource is measured in bytes, or **MiB** in KubeSphere. + +{{}} + +To set **GPU Type**, select a GPU type from the drop-down list, which defaults to `nvidia.com/gpu`. **GPU Limit** defaults to no limit. + +#### **Port Settings** + +You need to set the access protocol for the container as well as port information. To use the default setting, click **Use Default Ports**. + +#### **Image Pull Policy** + +This value is indicated by the `imagePullPolicy` field. On the dashboard, you can choose one of the following three options from the drop-down list. + +- **Use Local Image First**: It means that the image is pulled only if it does not exist locally. + +- **Pull Image Always**: It means that the image is pulled whenever the pod starts. + +- **Use Local Image Only**: It means that the image is not pulled no matter the image exists or not. + +{{< notice tip>}} + +- The default value is **Use Local Image First**, but the value of images tagged with `:latest` is **Pull Image Always** by default. +- Docker will check it when pulling the image. If MD5 has not changed, it will not pull. +- The `:latest` tag should be avoided as much as possible in the production environment, and the latest image can be automatically pulled by the `:latest` tag in the development environment. + +{{< /notice >}} + +#### **Health Check** + +Support liveness check, readiness check, and startup check. + +- **Liveness Check**: Liveness probes are used to know whether a container is running, indicated by `livenessProbe`. + +- **Readiness Check**: Readiness probes are used to know whether a container is ready to serve requests, indicated by `readinessProbe`. + +- **Startup Check**: Startup probes are used to know whether a container application has started, indicated by `startupProbe`. + +Liveness, Readiness and Startup Check include the configurations below: + +- **HTTP Request**: Perform an HTTP `Get` request on the specified port and path on the IP address of the container. If the response status code is greater than or equal to 200 and less than 400, the diagnosis is considered successful. The supported parameters include: + + - **Path**: HTTP or HTTPS, specified by `scheme`, the path to access the HTTP server, specified by `path`, the access port or port name is exposed by the container. The port number must be between 1 and 65535. The value is specified by `port`. + - **Initial Delay (s)**: The number of seconds after the container has started before liveness probes are initiated, specified by `initialDelaySeconds`. It defaults to 0. + - **Check Interval (s)**: The probe frequency (in seconds), specified by `periodSeconds`. It defaults to 10. The minimum value is 1. + - **Timeout (s)**: The number of seconds after which the probe times out, specified by `timeoutSeconds`. It defaults to 1. The minimum value is 1. + - **Success Threshold**: The minimum consecutive successes for the probe to be considered successful after having failed, specified by `successThreshold`. It defaults to 1 and must be 1 for liveness and startup. The minimum value is 1. + - **Failure Threshold**: The minimum consecutive failures for the probe to be considered failed after having succeeded, specified by `failureThreshold`. It defaults to 3. The minimum value is 1. + +- **TCP Port**: Perform a TCP check on the specified port on the IP address of the container. If the port is open, the diagnosis is considered successful. The supported parameters include: + + - **Port**: The access port or port name is exposed by the container. The port number must be between 1 and 65535. The value is specified by `port`. + - **Initial Delay (s)**: The number of seconds after the container has started before liveness probes are initiated, specified by `initialDelaySeconds`. It defaults to 0. + - **Check Interval (s)**: The probe frequency (in seconds), specified by `periodSeconds`. It defaults to 10. The minimum value is 1. + - **Timeouts**: The number of seconds after which the probe times out, specified by `timeoutSeconds`. It defaults to 1. The minimum value is 1. + - **Success Threshold**: The minimum consecutive successes for the probe to be considered successful after having failed, specified by `successThreshold`. It defaults to 1 and must be 1 for liveness and startup. The minimum value is 1. + - **Failure Threshold**: The minimum consecutive failures for the probe to be considered failed after having succeeded, specified by `failureThreshold`. It defaults to 3. The minimum value is 1. + +- **Command**: Execute the specified command in the container. If the return code is 0 when the command exits, the diagnosis is considered successful. The supported parameters include: + + - **Command**: A detection command used to detect the health of the container, specified by `exec.command`. + - **Initial Delay (s)**: The number of seconds after the container has started before liveness probes are initiated, specified by `initialDelaySeconds`. It defaults to 0. + - **Check Interval (s)**: The probe frequency (in seconds), specified by `periodSeconds`. It defaults to 10. The minimum value is 1. + - **Timeouts**: The number of seconds after which the probe times out, specified by `timeoutSeconds`. It defaults to 1. The minimum value is 1. + - **Success Threshold**: The minimum consecutive successes for the probe to be considered successful after having failed, specified by `successThreshold`. It defaults to 1 and must be 1 for liveness and startup. The minimum value is 1. + - **Failure Threshold**: The minimum consecutive failures for the probe to be considered failed after having succeeded, specified by `failureThreshold`. It defaults to 3. The minimum value is 1. + + For more information about health checks, please visit [Container Probes](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes). + +#### **Start Command** + +By default, a container runs the default image command. + +- **Command** refers to the `command` field of containers in the manifest file. +- **Parameters** refers to the `args` field of containers in the manifest file. + +For more information about the command, please visit [Define a Command and Arguments for a Container](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/). + +#### **Environment Variables** + +Configure environment variables for Pods in the form of key-value pairs. + +- name: The name of the environment variable, specified by `env.name`. +- value: The value of the variable referenced, specified by `env.value`. +- Click **From configmap** or **From secret** to use an existing ConfigMap or Secret. + +For more information about the command, please visit [Pod variable](https://kubernetes.io/docs/tasks/inject-data-application/environment-variable-expose-pod-information/?spm=a2c4g.11186623.2.20.16704b3e9qHXPb). + +#### **Container Security Context** + +A security context defines privilege and access control settings for a Pod or Container. For more information about the security context, please visit [Pod Security Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/). + +#### **Synchronize Host Timezone** + +The time zone of the container will be consistent with that of the host after synchronization. + +## **Update Strategy** + +### Pod Update + +Update strategies are different for different workloads. + +{{< tabs >}} + +{{< tab "Deployments" >}} + +The `.spec.strategy` field specifies the strategy used to replace old Pods with new ones. `.spec.strategy.type` can be `Recreate` or `Rolling Update`. `Rolling Update` is the default value. + +- **Rolling Update (recommended)** + + A rolling update means the instance of the old version will be gradually replaced with new ones. During the upgrade process, the traffic will be load balanced and distributed to the old and new instances simultaneously, so the service will not be interrupted. + +- **Simultaneous Update** + + All existing Pods will be killed before new ones are created. Please note that the service will be interrupted during the update process. + +For more information about update strategies, please visit [Strategy in Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy). + +{{}} + +{{< tab "StatefulSets" >}} + +The drop-down menu under **Update Strategy** is indicated by the `.spec.updateStrategy` field of a StatefulSet in the manifest file. It allows you to handle updates of Pod containers, tags, resource requests or limits, and annotations. There are two strategies: + +- **Rolling Update (recommended)** + + If `.spec.template` is updated, the Pods in the StatefulSet will be automatically deleted with new pods created as replacements. Pods are updated in reverse ordinal order, sequentially deleted and created. A new Pod update will not begin until the previous Pod becomes up and running after it is updated. + +- **Update on Deletion** + + If `.spec.template` is updated, the Pods in the StatefulSet will not be automatically updated. You need to manually delete old Pods so that the controller can create new Pods. + +For more information about update strategies, please visit [StatefulSet Update Strategies](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies). + +{{}} + +{{< tab "DaemonSets" >}} + +The drop-down menu under **Update Strategy** is indicated by the `.spec.updateStrategy` field of a DaemonSet in the manifest file. It allows you to handle updates of Pod containers, tags, resource requests or limits, and annotations. There are two strategies: + +- **Rolling Update (recommended)** + + If `.spec.template` is updated, old DaemonSet pods will be killed with new pods created automatically in a controlled fashion. At most one pod of the DaemonSet will be running on each node during the whole update process. + +- **Update on Deletion** + + If `.spec.template` is updated, new DaemonSet pods will only be created when you manually delete old DaemonSet pods. This is the same behavior of DaemonSets in Kubernetes version 1.5 or before. + +For more information about update strategies, please visit [DaemonSet Update Strategy](https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/#daemonset-update-strategy). + +{{}} + +{{}} + +### Rolling Update Settings + +{{< tabs >}} + +{{< tab "Deployments" >}} + +**Rolling Update Settings** in a Deployment is different from that of a StatefulSet. + +- **Maximum Unavailable Pods**: The maximum number of Pods that can be unavailable during the update, specified by `maxUnavailable`. The default value is 25%. +- **Maximum Extra Pods**: The maximum number of Pods that can be scheduled above the desired number of Pods, specified by `maxSurge`. The default value is 25%. + +{{}} + +{{< tab "StatefulSets" >}} + +**Ordinal for Dividing Pod Replicas**: When you partition an update, all Pods with an ordinal greater than or equal to the value you set in Partition are updated when you update the StatefulSet’s Pod specification. This field is specified by `.spec.updateStrategy.rollingUpdate.partition`, whose default value is 0. For more information about partitions, please visit [Partitions](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions). + +{{}} + +{{< tab "DaemonSets" >}} + +**Rolling Update Settings** in a DaemonSet is different from that of a StatefulSet. + +- **Maximum Unavailable Pods**: The maximum number of pods that can be unavailable during the update, specified by `maxUnavailable`. The default value is 20%. +- **Minimum Running Time for Pod Readiness (s)**: The minimum number of seconds before a newly created Pod of DaemonSet is treated as available, specified by `minReadySeconds`. The default value is 0. + +{{}} + +{{}} + +### Pod Security Context + +A security context defines privilege and access control settings for a Pod or Container. For more information about Pod Security Policies, please visit [Pod Security Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/). + +### Pod Scheduling Rules + +You can select different deployment modes to switch between inter-pod affinity and inter-pod anti-affinity. In Kubernetes, inter-pod affinity is specified as field `podAffinity` of field `affinity` while inter-pod anti-affinity is specified as field `podAntiAffinity` of field `affinity`. In KubeSphere, both `podAffinity` and `podAntiAffinity` are set to `preferredDuringSchedulingIgnoredDuringExecution`. You can enable **Edit YAML** in the upper-right corner to see field details. + +- **Decentralized Scheduling** represents anti-affinity. +- **Centralized Scheduling** represents affinity. +- **Custom Rules** is to add custom scheduling rules based on your needs. + +For more information about affinity and anti-affinity, please visit [Pod affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity). diff --git a/content/en/docs/v3.4/project-user-guide/application-workloads/cronjobs.md b/content/en/docs/v3.4/project-user-guide/application-workloads/cronjobs.md new file mode 100644 index 000000000..4dc4f89de --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/application-workloads/cronjobs.md @@ -0,0 +1,105 @@ +--- +title: "CronJobs" +keywords: "KubeSphere, Kubernetes, Jobs, CronJobs" +description: "Learn basic concepts of CronJobs and how to create CronJobs on KubeSphere." +linkTitle: "CronJobs" +weight: 10260 +--- + +CronJobs are useful for creating periodic and recurring tasks, like running backups or sending emails. CronJobs can also schedule individual tasks at a specific time or interval, such as scheduling a Job for when your cluster is likely to be idle. + +For more information, see [the official documentation of Kubernetes](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/). + +## Prerequisites + +You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Create a CronJob + +### Step 1: Open the dashboard + +Log in to the console as `project-regular`. Go to **Jobs** of a project, choose **CronJobs** and click **Create**. + +### Step 2: Enter basic information + +Enter the basic information. You can refer to the instructions below for each field. When you finish, click **Next**. + +- **Name**: The name of the CronJob, which is also the unique identifier. +- **Alias**: The alias name of the CronJob, making resources easier to identify. +- **Schedule**: It runs a Job periodically on a given time-based schedule. Please see [CRON](https://en.wikipedia.org/wiki/Cron) for grammar reference. Some preset CRON statements are provided in KubeSphere to simplify the input. This field is specified by `.spec.schedule`. For this CronJob, enter `*/1 * * * *`, which means it runs once per minute. + + | Type | CRON | + | ----------- | ----------- | + | Every Hour | `0 * * * *` | + | Every Day | `0 0 * * *` | + | Every Week | `0 0 * * 0` | + | Every Month | `0 0 1 * *` | + +- **Advanced Settings**: + + - **Maximum Start Delay (s)**. Specified by `.spec.startingDeadlineSeconds` in the manifest file, this optional field represents the maximum number of seconds that a ConJob can take to start if it misses the scheduled time for any reason. CronJobs that have missed executions will be counted as failed ones. If you do not specify this field, there is no deadline for the CronJob. + - **Successful Jobs Retained**. Specified by `.spec.successfulJobsHistoryLimit` in the manifest file, this field represents the number of successful CronJob executions to retain. This is a pointer to distinguish between explicit zero and not specified. It defaults to 3. + - **Failed Jobs Retained**. Specified by `.spec.failedJobsHistoryLimit` in the manifest file, this field represents the number of failed CronJob executions to retain. This is a pointer to distinguish between explicit zero and not specified. It defaults to 1. + - **Concurrency Policy**. Specified by `.spec.concurrencyPolicy`, it represents how to treat concurrent executions of a Job: + - **Run Jobs concurrently** (default): Run CronJobs concurrently. + - **Skip new Job**: Forbid concurrent runs and skip the next run if the previous run hasn't finished yet. + - **Skip old Job**: Cancels currently running Job and replaces it with a new one. + +{{< notice note >}} + +You can enable **Edit YAML** in the upper-right corner to see the YAML manifest of this CronJob. + +{{}} + +### Step 3: Strategy settings (Optional) + +Please refer to [Jobs](../jobs/#step-3-strategy-settings-optional). + +### Step 4: Set a Pod + +1. Click **Add Container** in **Containers**, enter `busybox` in the search box, and press **Enter**. + +2. Scroll down to **Start Command** and enter `/bin/sh,-c,date; echo "KubeSphere!"` in the box under **Parameters**. + +3. Click **√** to finish setting the image and **Next** to continue. + + {{< notice note >}} + +- This example CronJob prints `KubeSphere`. For more information about setting images, see [Pod Settings](../container-image-settings/). +- For more information about **Restart Policy**, see [Jobs](../jobs/#step-4-set-image). +- You can skip **Storage Settings** and **Advanced Settings** for this tutorial. For more information, see [Mount Volumes](../deployments/#step-4-mount-volumes) and [Configure Advanced Settings](../deployments/#step-5-configure-advanced-settings) in Deployments. + + {{}} + +### Step 5: Check results + +1. In the final step of **Advanced Settings**, click **Create** to finish. A new item will be added to the CronJob list if the creation is successful. Besides, you can also find Jobs under **Jobs** tab. + +2. Under the **ConJobs** tab, click this CronJob and go to the **Job Records** tab where you can see the information of each execution record. There are 3 successful CronJob executions as the field **Successful Jobs Retained** is set to 3. + +3. Click any of them and you will be directed to the Job details page. + +4. In **Resource Status**, you can inspect the Pod status. Click icon on the right and click icon to check the container log as shown below, which displays the expected output. + +## Check CronJob Details + +### Operations + +On the CronJob details page, you can manage the CronJob after it is created. + +- **Edit Information**: Edit the basic information except `Name` of the CronJob. +- **Pause/Start**: Pause or start the Cronjob. Pausing a CronJob will tell the controller to suspend subsequent executions, which does not apply to executions that already start. +- **Edit YAML**: Edit the CronJob's specification in YAML format. +- **Delete**: Delete the CronJob, and return to the CronJob list page. + +### Job records + +Click the **Job Records** tab to view the records of the CronJob. + +### Metadata + +Click the **Metadata** tab to view the labels and annotations of the CronJob. + +### Events + +Click the **Events** tab to view the events of the CronJob. diff --git a/content/en/docs/v3.4/project-user-guide/application-workloads/daemonsets.md b/content/en/docs/v3.4/project-user-guide/application-workloads/daemonsets.md new file mode 100644 index 000000000..d8157ca37 --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/application-workloads/daemonsets.md @@ -0,0 +1,137 @@ +--- +title: "Kubernetes DaemonSets in KubeSphere" +keywords: 'KubeSphere, Kubernetes, DaemonSet, workload' +description: 'Learn basic concepts of DaemonSets and how to create DaemonSets in KubeSphere.' +linkTitle: "DaemonSets" +weight: 10230 +--- + +A DaemonSet manages groups of replicated Pods while it ensures that all (or some) nodes run a copy of a Pod. As nodes are added to the cluster, DaemonSets automatically add Pods to the new nodes as needed. + +For more information, see the [official documentation of Kubernetes](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/). + +## Use Kubernetes DaemonSets + +DaemonSets are very helpful in cases where you want to deploy ongoing background tasks that run on all or certain nodes without any user intervention. For example: + +- Run a log collection daemon on every node, such as Fluentd or Logstash. +- Run a node monitoring daemon on every node, such as Prometheus Node Exporter, collectd, and AppDynamics Agent. +- Run a cluster storage daemon and system program on every node, such as Glusterd, Ceph, kube-dns, and kube-proxy. + +## Prerequisites + +You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Create a DaemonSet + +### Step 1: Open the dashboard + +Log in to the console as `project-regular`. Go to **Application Workloads** of a project, select **Workloads**, and click **Create** under the tab **DaemonSets**. + +### Step 2: Enter basic information + +Specify a name for the DaemonSet (for example, `demo-daemonset`), select a project, and click **Next**. + +### Step 3: Set a Pod + +1. Click **Add Container**. + +2. Enter an image name from public Docker Hub or from a [private repository](../../configuration/image-registry/) you specified. For example, enter `fluentd` in the search box and press **Enter**. + + {{< notice note >}} + +- Remember to press **Enter** on your keyboard after you enter an image name in the search box. +- If you want to use your private image repository, you should [create an Image Registry Secret](../../configuration/image-registry/) first in **Secrets** under **Configuration**. + + {{}} + +3. Set requests and limits for CPU and memory resources based on your needs. For more information, see [Resource Request and Resource Limit in Container Image Settings](../container-image-settings/#add-container-image). + +4. Click **Use Default Ports** for **Port Settings** or you can customize **Protocol**, **Name** and **Container Port**. + +5. Select a policy for image pulling from the drop-down menu. For more information, see [Image Pull Policy in Container Image Settings](../container-image-settings/#add-container-image). + +6. For other settings (**Health Check**, **Start Command**, **Environment Variables**, **Container Security Context** and **Synchronize Host Timezone**), you can configure them on the dashboard as well. For more information, see detailed explanations of these properties in [Pod Settings](../container-image-settings/#add-container-image). When you finish, click **√** in the lower-right corner to continue. + +7. Select an update strategy from the drop-down menu. It is recommended you choose **Rolling Update**. For more information, see [Update Strategy](../container-image-settings/#update-strategy). + +8. Select a Pod scheduling rule. For more information, see [Pod Scheduling Rules](../container-image-settings/#pod-scheduling-rules). + +9. Click **Next** to continue when you finish setting the container image. + +### Step 4: Mount volumes + +You can add a volume directly or mount a ConfigMap or Secret. Alternatively, click **Next** directly to skip this step. For more information about volumes, visit [Volumes](../../storage/volumes/#mount-a-volume). + +{{< notice note >}} + +DaemonSets can't use a volume template, which is used by StatefulSets. + +{{}} + +### Step 5: Configure advanced settings + +You can add metadata in this section. When you finish, click **Create** to complete the whole process of creating a DaemonSet. + +- **Add Metadata** + + Additional metadata settings for resources such as **Labels** and **Annotations**. + +## Check Kubernetes DaemonSet Details + +### Details page + +1. After a DaemonSet is created, it will be displayed in the list. You can click icon on the right and select the options from the menu to modify a DaemonSet. + + - **Edit Information**: View and edit the basic information. + - **Edit YAML**: View, upload, download, or update the YAML file. + - **Re-create**: Re-create the DaemonSet. + - **Delete**: Delete the DaemonSet. + +2. Click the name of the DaemonSet and you can go to its details page. + +3. Click **More** to display what operations about this DaemonSet you can do. + + - **Roll Back**: Select the revision to roll back. + - **Edit Settings**: Configure update strategies, containers and volumes. + - **Edit YAML**: View, upload, download, or update the YAML file. + - **Re-create**: Re-create this DaemonSet. + - **Delete**: Delete the DaemonSet, and return to the DaemonSet list page. + +4. Click the **Resource Status** tab to view the port and Pod information of a DaemonSet. + + - **Replica Status**: You cannot change the number of Pod replicas for a DaemonSet. + - **Pods** + + - The Pod list provides detailed information of the Pod (status, node, Pod IP and resource usage). + - You can view the container information by clicking a Pod item. + - Click the container log icon to view output logs of the container. + - You can view the Pod details page by clicking the Pod name. + +### Revision records + +After the resource template of workload is changed, a new log will be generated and Pods will be rescheduled for a version update. The latest 10 versions will be saved by default. You can implement a redeployment based on the change log. + +### Metadata + +Click the **Metadata** tab to view the labels and annotations of the DaemonSet. + +### Monitoring + +1. Click the **Monitoring** tab to view the CPU usage, memory usage, outbound traffic, and inbound traffic of the DaemonSet. + +2. Click the drop-down menu in the upper-right corner to customize the time range and sampling interval. + +3. Click icon/icon in the upper-right corner to start/stop automatic data refreshing. + +4. Click icon in the upper-right corner to manually refresh the data. + +### Environment variables + +Click the **Environment Variables** tab to view the environment variables of the DaemonSet. + +### Events + +Click the **Events** tab to view the events of the DaemonSet. + + diff --git a/content/en/docs/v3.4/project-user-guide/application-workloads/deployments.md b/content/en/docs/v3.4/project-user-guide/application-workloads/deployments.md new file mode 100644 index 000000000..062a03ec7 --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/application-workloads/deployments.md @@ -0,0 +1,139 @@ +--- +title: "Deployments" +keywords: 'KubeSphere, Kubernetes, Deployments, workload' +description: 'Learn basic concepts of Deployments and how to create Deployments in KubeSphere.' +linkTitle: "Deployments" + +weight: 10210 +--- + +A Deployment controller provides declarative updates for Pods and ReplicaSets. You describe a desired state in a Deployment object, and the Deployment controller changes the actual state to the desired state at a controlled rate. As a Deployment runs a number of replicas of your application, it automatically replaces instances that go down or malfunction. This is how Deployments make sure app instances are available to handle user requests. + +For more information, see the [official documentation of Kubernetes](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/). + +## Prerequisites + +You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Create a Deployment + +### Step 1: Open the dashboard + +Log in to the console as `project-regular`. Go to **Application Workloads** of a project, select **Workloads**, and click **Create** under the tab **Deployments**. + +### Step 2: Enter basic information + +Specify a name for the Deployment (for example, `demo-deployment`), select a project, and click **Next**. + +### Step 3: Set a Pod + +1. Before you set an image, define the number of replicated Pods in **Pod Replicas** by clicking icon or icon, which is indicated by the `.spec.replicas` field in the manifest file. + + {{< notice tip >}} +You can see the Deployment manifest file in YAML format by enabling **Edit YAML** in the upper-right corner. KubeSphere allows you to edit the manifest file directly to create a Deployment. Alternatively, you can follow the steps below to create a Deployment via the dashboard. + {{}} + +2. Click **Add Container**. + +3. Enter an image name from public Docker Hub or from a [private repository](../../configuration/image-registry/) you specified. For example, enter `nginx` in the search box and press **Enter**. + + {{< notice note >}} + +- Remember to press **Enter** on your keyboard after you enter an image name in the search box. +- If you want to use your private image repository, you should [create an Image Registry Secret](../../configuration/image-registry/) first in **Secrets** under **Configuration**. + + {{}} + +4. Set requests and limits for CPU and memory resources based on your needs. For more information, see [Resource Request and Resource Limit in Container Image Settings](../container-image-settings/#add-container-image). + +5. Click **Use Default Ports** for **Port Settings** or you can customize **Protocol**, **Name** and **Container Port**. + +6. Select a policy for image pulling from the drop-down list. For more information, see [Image Pull Policy in Container Image Settings](../container-image-settings/#add-container-image). + +7. For other settings (**Health Check**, **Start Command**, **Environment Variables**, **Container Security Context** and **Synchronize Host Timezone**), you can configure them on the dashboard as well. For more information, see detailed explanations of these properties in [Pod Settings](../container-image-settings/#add-container-image). When you finish, click **√** in the lower-right corner to continue. + +8. Select an update strategy from the drop-down menu. It is recommended that you choose **Rolling Update**. For more information, see [Update Strategy](../container-image-settings/#update-strategy). + +9. Select a Pod scheduling rule. For more information, see [Pod Scheduling Rules](../container-image-settings/#pod-scheduling-rules). + +10. Click **Next** to continue when you finish setting the Pod. + +### Step 4: Mount volumes + +You can add a volume directly or mount a ConfigMap or Secret. Alternatively, click **Next** directly to skip this step. For more information about volumes, visit [Volumes](../../storage/volumes/#mount-a-volume). + +{{< notice note >}} + +Deployments can't use a volume template, which is used by StatefulSets. + +{{}} + +### Step 5: Configure advanced settings + +You can set a policy for node scheduling and add metadata in this section. When you finish, click **Create** to complete the whole process of creating a Deployment. + +- **Select Nodes** + + Assign Pod replicas to run on specified nodes. It is specified in the field `nodeSelector`. + +- **Add Metadata** + + Additional metadata settings for resources such as **Labels** and **Annotations**. + +## Check Deployment Details + +### Details page + +1. After a Deployment is created, it will be displayed in the list. You can click icon on the right and select options from the menu to modify your Deployment. + + - **Edit Information**: View and edit the basic information. + - **Edit YAML**: View, upload, download, or update the YAML file. + - **Re-create**: Re-create the Deployment. + - **Delete**: Delete the Deployment. + +2. Click the name of the Deployment and you can go to its details page. + +3. Click **More** to display the operations about this Deployment you can do. + + - **Roll Back**: Select the revision to roll back. + - **Edit Autoscaling**: Autoscale the replicas according to CPU and memory usage. If both CPU and memory are specified, replicas are added or deleted if any of the conditions is met. + - **Edit Settings**: Configure update strategies, containers and volumes. + - **Edit YAML**: View, upload, download, or update the YAML file. + - **Re-create**: Re-create this Deployment. + - **Delete**: Delete the Deployment, and return to the Deployment list page. + +4. Click the **Resource Status** tab to view the port and Pod information of the Deployment. + + - **Replica Status**: Click icon or icon to increase or decrease the number of Pod replicas. + - **Pods** + + - The Pod list provides detailed information of the Pod (status, node, Pod IP and resource usage). + - You can view the container information by clicking a Pod item. + - Click the container log icon to view output logs of the container. + - You can view the Pod details page by clicking the Pod name. + +### Revision records + +After the resource template of workload is changed, a new log will be generated and Pods will be rescheduled for a version update. The latest 10 versions will be saved by default. You can implement a redeployment based on the change log. + +### Metadata + +Click the **Metadata** tab to view the labels and annotations of the Deployment. + +### Monitoring + +1. Click the **Monitoring** tab to view the CPU usage, memory usage, outbound traffic, and inbound traffic of the Deployment. + +2. Click the drop-down menu in the upper-right corner to customize the time range and sampling interval. + +3. Click icon/icon in the upper-right corner to start/stop automatic data refreshing. + +4. Click icon in the upper-right corner to manually refresh the data. + +### Environment variables + +Click the **Environment Variables** tab to view the environment variables of the Deployment. + +### Events + +Click the **Events** tab to view the events of the Deployment. \ No newline at end of file diff --git a/content/en/docs/v3.4/project-user-guide/application-workloads/horizontal-pod-autoscaling.md b/content/en/docs/v3.4/project-user-guide/application-workloads/horizontal-pod-autoscaling.md new file mode 100755 index 000000000..663444f5c --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/application-workloads/horizontal-pod-autoscaling.md @@ -0,0 +1,104 @@ +--- +title: "Kubernetes HPA (Horizontal Pod Autoscaling) on KubeSphere" +keywords: "Horizontal, Pod, Autoscaling, Autoscaler" +description: "How to configure Kubernetes Horizontal Pod Autoscaling on KubeSphere." +weight: 10290 + +--- + +This document describes how to configure Horizontal Pod Autoscaling (HPA) on KubeSphere. + +The Kubernetes HPA feature automatically adjusts the number of Pods to maintain average resource usage (CPU and memory) of Pods around preset values. For details about how HPA functions, see the [official Kubernetes document](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/). + +This document uses HPA based on CPU usage as an example. Operations for HPA based on memory usage are similar. + +## Prerequisites + +- You need to [enable the Metrics Server](../../../pluggable-components/metrics-server/). +- You need to create a workspace, a project and a user (for example, `project-regular`). `project-regular` must be invited to the project and assigned the `operator` role. For more information, see [Create Workspaces, Projects, Users and Roles](/docs/v3.3/quick-start/create-workspace-and-project/). + +## Create a Service + +1. Log in to the KubeSphere web console as `project-regular` and go to your project. + +2. Choose **Services** in **Application Workloads** on the left navigation bar and click **Create** on the right. + +3. In the **Create Service** dialog box, click **Stateless Service**. + +4. Set the Service name (for example, `hpa`) and click **Next**. + +5. Click **Add Container**, set **Image** to `mirrorgooglecontainers/hpa-example` and click **Use Default Ports**. + +6. Set the CPU request (for example, 0.15 cores) for each container, click **√**, and click **Next**. + + {{< notice note >}} + + * To use HPA based on CPU usage, you must set the CPU request for each container, which is the minimum CPU resource reserved for each container (for details, see the [official Kubernetes document](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/)). The HPA feature compares the average Pod CPU usage with a target percentage of the average Pod CPU request. + * For HPA based on memory usage, you do not need to configure the memory request. + + {{}} + +7. Click **Next** on the **Storage Settings** tab and click **Create** on the **Advanced Settings** tab. + +## Configure Kubernetes HPA + +1. Select **Deployments** in **Workloads** on the left navigation bar and click the HPA Deployment (for example, hpa-v1) on the right. + +2. Click **More** and select **Edit Autoscaling** from the drop-down menu. + +3. In the **Horizontal Pod Autoscaling** dialog box, configure the HPA parameters and click **OK**. + + * **Target CPU Usage (%)**: Target percentage of the average Pod CPU request. + * **Target Memory Usage (MiB)**: Target average Pod memory usage in MiB. + * **Minimum Replicas**: Minimum number of Pods. + * **Maximum Replicas**: Maximum number of Pods. + + In this example, **Target CPU Usage (%)** is set to `60`, **Minimum Replicas** is set to `1`, and **Maximum Replicas** is set to `10`. + + {{< notice note >}} + + Ensure that the cluster can provide sufficient resources for all Pods when the number of Pods reaches the maximum. Otherwise, the creation of some Pods will fail. + + {{}} + +## Verify HPA + +This section uses a Deployment that sends requests to the HPA Service to verify that HPA automatically adjusts the number of Pods to meet the resource usage target. + +### Create a load generator Deployment + +1. Select **Workloads** in **Application Workloads** on the left navigation bar and click **Create** on the right. + +2. In the **Create Deployment** dialog box, set the Deployment name (for example, `load-generator`) and click **Next**. + +3. Click **Add Container** and set **Image** to `busybox`. + +4. Scroll down in the dialog box, select **Start Command**, and set **Command** to `sh,-c` and **Parameters** to `while true; do wget -q -O- http://..svc.cluster.local; done` (for example, `while true; do wget -q -O- http://hpa.demo-project.svc.cluster.local; done`). + +5. Click **√** and click **Next**. + +6. Click **Next** on the **Storage Settings** tab and click **Create** on the **Advanced Settings** tab. + +### View the HPA Deployment status + +1. After the load generator Deployment is created, go to **Workloads** in **Application Workloads** on the left navigation bar and click the HPA Deployment (for example, hpa-v1) on the right. The number of Pods displayed on the page automatically increases to meet the resource usage target. + +2. Choose **Workloads** in **Application Workloads** on the left navigation bar, click icon on the right of the load generator Deployment (for example, load-generator-v1), and choose **Delete** from the drop-down list. After the load-generator Deployment is deleted, check the status of the HPA Deployment again. The number of Pods decreases to the minimum. + +{{< notice note >}} + +The system may require a few minutes to adjust the number of Pods and collect data. + +{{}} + +## Edit HPA Configuration + +You can repeat steps in [Configure HPA](#configure-hpa) to edit the HPA configuration. + +## Cancel HPA + +1. Choose **Workloads** in **Application Workloads** on the left navigation bar and click the HPA Deployment (for example, hpa-v1) on the right. + +2. Click icon on the right of **Autoscaling** and choose **Cancel** from the drop-down list. + + diff --git a/content/en/docs/v3.4/project-user-guide/application-workloads/jobs.md b/content/en/docs/v3.4/project-user-guide/application-workloads/jobs.md new file mode 100644 index 000000000..cbfcf136f --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/application-workloads/jobs.md @@ -0,0 +1,162 @@ +--- +title: "Jobs" +keywords: "KubeSphere, Kubernetes, Docker, Jobs" +description: "Learn basic concepts of Jobs and how to create Jobs on KubeSphere." +linkTitle: "Jobs" + +weight: 10250 +--- + +A Job creates one or more Pods and ensures that a specified number of them successfully terminates. As Pods successfully complete, the Job tracks the successful completions. When a specified number of successful completions is reached, the task (namely, Job) is complete. Deleting a Job will clean up the Pods it created. + +A simple case is to create one Job object in order to reliably run one Pod to completion. The Job object will start a new Pod if the first Pod fails or is deleted (for example, due to a node hardware failure or a node reboot). You can also use a Job to run multiple Pods in parallel. + +The following example demonstrates specific steps of creating a Job (computing π to 2000 decimal places) on KubeSphere. + +## Prerequisites + +You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Create a Job + +### Step 1: Open the dashboard + +Log in to the console as `project-regular`. Go to **Jobs** under **Application Workloads** and click **Create**. + +### Step 2: Enter basic information + +Enter the basic information. The following describes the parameters: + +- **Name**: The name of the Job, which is also the unique identifier. +- **Alias**: The alias name of the Job, making resources easier to identify. +- **Description**: The description of the Job, which gives a brief introduction of the Job. + +### Step 3: Strategy settings (optional) + +You can set the values in this step or click **Next** to use the default values. Refer to the table below for detailed explanations of each field. + +| Name | Definition | Description | +| ----------------------- | ---------------------------- | ------------------------------------------------------------ | +| Maximum Retries | `spec.backoffLimit` | It specifies the maximum number of retries before this Job is marked as failed. It defaults to 6. | +| Complete Pods | `spec.completions` | It specifies the desired number of successfully finished Pods the Job should be run with. Setting it to nil means that the success of any Pod signals the success of all Pods, and allows parallelism to have any positive value. Setting it to 1 means that parallelism is limited to 1 and the success of that Pod signals the success of the Job. For more information, see [Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/). | +| Parallel Pods | `spec.parallelism` | It specifies the maximum desired number of Pods the Job should run at any given time. The actual number of Pods running in a steady state will be less than this number when the work left to do is less than max parallelism ((`.spec.completions - .status.successful`) < `.spec.parallelism`). For more information, see [Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/). | +| Maximum Duration (s) | `spec.activeDeadlineSeconds` | It specifies the duration in seconds relative to the startTime that the Job may be active before the system tries to terminate it; the value must be a positive integer. | + +### Step 4: Set a Pod + +1. Select **Re-create Pod** for **Restart Policy**. You can only specify **Re-create Pod** or **Restart container** for **Restart Policy** when the Job is not completed: + + - If **Restart Policy** is set to **Re-create Pod**, the Job creates a new Pod when the Pod fails, and the failed Pod does not disappear. + + - If **Restart Policy** is set to **Restart container**, the Job will internally restart the container when the Pod fails, instead of creating a new Pod. + +2. Click **Add Container** which directs you to the **Add Container** page. Enter `perl` in the image search box and press **Enter**. + +3. On the same page, scroll down to **Start Command**. Enter the following commands in the box which computes pi to 2000 places then prints it. Click **√** in the lower-right corner and select **Next** to continue. + + ```bash + perl,-Mbignum=bpi,-wle,print bpi(2000) + ``` + + {{< notice note >}}For more information about setting images, see [Pod Settings](../container-image-settings/).{{}} + +### Step 5: Inspect the Job manifest (optional) + +1. Enable **Edit YAML** in the upper-right corner which displays the manifest file of the Job. You can see all the values are set based on what you have specified in the previous steps. + + ```yaml + apiVersion: batch/v1 + kind: Job + metadata: + namespace: demo-project + labels: + app: job-test-1 + name: job-test-1 + annotations: + kubesphere.io/alias-name: Test + kubesphere.io/description: A job test + spec: + template: + metadata: + labels: + app: job-test-1 + spec: + containers: + - name: container-4rwiyb + imagePullPolicy: IfNotPresent + image: perl + command: + - perl + - '-Mbignum=bpi' + - '-wle' + - print bpi(2000) + restartPolicy: Never + serviceAccount: default + initContainers: [] + volumes: [] + imagePullSecrets: null + backoffLimit: 5 + completions: 4 + parallelism: 2 + activeDeadlineSeconds: 300 + ``` + +2. You can make adjustments in the manifest directly and click **Create** or disable the **Edit YAML** and get back to the **Create** page. + + {{< notice note >}}You can skip **Storage Settings** and **Advanced Settings** for this tutorial. For more information, see [Mount volumes](../deployments/#step-4-mount-volumes) and [Configure advanced settings](../deployments/#step-5-configure-advanced-settings).{{}} + +### Step 6: Check the result + +1. In the final step of **Advanced Settings**, click **Create** to finish. A new item will be added to the Job list if the creation is successful. + +2. Click this Job and go to **Job Records** where you can see the information of each execution record. There are four completed Pods since **Completions** was set to `4` in Step 3. + + {{< notice tip >}} +You can rerun the Job if it fails and the reason for failure is displayed under **Message**. + {{}} + +3. In **Resource Status**, you can inspect the Pod status. Two Pods were created each time as **Parallel Pods** was set to 2. Click icon on the right and click icon to check the container log, which displays the expected calculation result. + + {{< notice tip >}} + +- In **Resource Status**, the Pod list provides the Pod's detailed information (for example, creation time, node, Pod IP and monitoring data). +- You can view the container information by clicking the Pod. +- Click the container log icon to view the output logs of the container. +- You can view the Pod details page by clicking the Pod name. + + {{}} + +## Check Job Details + +### Operations + +On the Job details page, you can manage the Job after it is created. + +- **Edit Information**: Edit the basic information except `Name` of the Job. +- **Rerun**: Rerun the Job, the Pod will restart, and a new execution record will be generated. +- **View YAML**: View the Job's specification in YAML format. +- **Delete**: Delete the Job and return to the Job list page. + +### Execution records + +1. Click the **Job Records** tab to view the execution records of the Job. + +2. Click icon to refresh the execution records. + +### Resource status + +1. Click the **Resource Status** tab to view the Pods of the Job. + +2. Click icon to refresh the Pod information, and click / to display/hide the containers in each Pod. + +### Metadata + +Click the **Metadata** tab to view the labels and annotations of the Job. + +### Environment variables + +Click the **Environment Variables** tab to view the environment variables of the Job. + +### Events + +Click the **Events** tab to view the events of the Job. diff --git a/content/en/docs/v3.4/project-user-guide/application-workloads/routes.md b/content/en/docs/v3.4/project-user-guide/application-workloads/routes.md new file mode 100644 index 000000000..586f5cee2 --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/application-workloads/routes.md @@ -0,0 +1,133 @@ +--- +title: "Routes" +keywords: "KubeSphere, Kubernetes, Route, Ingress" +description: "Learn basic concepts of Routes (i.e. Ingress) and how to create Routes in KubeSphere." +weight: 10270 +--- + +This document describes how to create, use, and edit a Route on KubeSphere. + +A Route on KubeSphere is the same as an [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/#what-is-ingress) on Kubernetes. You can use a Route and a single IP address to aggregate and expose multiple Services. + +## Prerequisites + +- You need to create a workspace, a project and two users (for example, `project-admin` and `project-regular`). In the project, the role of `admin` must be `project-admin` and that of `project-regular` must be `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](/docs/v3.3/quick-start/create-workspace-and-project/). +- If the Route is to be accessed in HTTPS mode, you need to [create a Secret](/docs/v3.3/project-user-guide/configuration/secrets/) that contains the `tls.crt` (TLS certificate) and `tls.key` (TLS private key) keys used for encryption. +- You need to [create at least one Service](/docs/v3.3/project-user-guide/application-workloads/services/). This document uses a demo Service as an example, which returns the Pod name to external requests. + +## Configure the Route Access Method + +1. Log in to the KubeSphere web console as `project-admin` and go to your project. + +2. Select **Gateway Settings** in **Project Settings** on the left navigation bar and click **Enable Gateway** on the right. + +3. In the displayed dialog box, set **Access Mode** to **NodePort** or **LoadBalancer**, and click **OK**. + + {{< notice note >}} + + If **Access Mode** is set to **LoadBalancer**, you may need to enable the load balancer plugin in your environment according to the plugin user guide. + + {{}} + +## Create a Route + +### Step 1: Configure basic information + +1. Log out of the KubeSphere web console, log back in as `project-regular`, and go to the same project. + +2. Choose **Routes** in **Application Workloads** on the left navigation bar and click **Create** on the right. + +3. On the **Basic Information** tab, configure the basic information about the Route and click **Next**. + * **Name**: Name of the Route, which is used as a unique identifier. + * **Alias**: Alias of the Route. + * **Description**: Description of the Route. + +### Step 2: Configure routing rules + +1. On the **Routing Rules** tab, click **Add Routing Rule**. + +2. Select a mode, configure routing rules, click **√**, and click **Next**. + + * **Auto Generate**: KubeSphere automatically generates a domain name in the `...nip.io` format and the domain name is automatically resolved by [nip.io](https://nip.io/) into the gateway address. This mode supports only HTTP. + + * **Domain Name**: Set a domain name for the Route. + * **Protocol**: Select `http` or `https`. If `https` is selected, you need to select a Secret that contains the `tls.crt` (TLS certificate) and `tls.key` (TLS private key) keys used for encryption. + * **Paths**: Map each Service to a path. You can click **Add** to add multiple paths. + +### (Optional) Step 3: Configure advanced settings + +1. On the **Advanced Settings** tab, select **Add Metadata**. + +2. Configure annotations and labels for the Route and click **Create**. + + {{< notice note >}} + + You can use annotations to customize the behavior of the Route. For more information, see the [official Nginx Ingress controller document](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/). + + {{}} + +### Step 4: Obtain the domain name, Service path, and gateway address + +1. Select **Routes** in **Application Workloads** on the left navigation bar and click the name of the Route on the right. + +2. Obtain the domain name and Service path and the gateway address in the **Rules** area. + + * If the [Route access mode](#configure-the-route-access-method) is set to NodePort, the IP address of a Kubernetes cluster node is used as the gateway address and the NodePort is displayed after the domain name. + + * If the [Route access mode](#configure-the-route-access-method) is set to LoadBalancer, the gateway address is assigned by the load balancer plugin. + +## Configure Domain Name Resolution + +If **Auto Generate** is selected in the [routing rule configuration](#step-2-configure-route-rules), you do not need to configure domain name resolution and the domain name is automatically resolved by [nip.io](https://nip.io/) into the gateway address. + +If **Specify Domain** is selected in the [routing rule configuration](#step-2-configure-route-rules), you need to configure domain name resolution on your DNS server or add ` ` to the `etc/hosts` file of your client machine. + +## Access the Route + +### NodePort access mode + +1. Log in to a client machine connected to the Route gateway address. + +2. Use the `:/` address to access the backend Service of the Route. + +### LoadBalancer access method + +1. Log in to a client machine connected to the Route gateway address. + +2. Use the `/` address to access the backend Service of the Route. + +{{< notice note >}} + +If you need to access the Route from outside your private network by using either NodePort or LoadBalancer, depending on your network environment: + +* You may need to configure traffic forwarding and firewall policies in your infrastructure environment so that the gateway address and port number of the Route can be accessed. +* If **Auto Generate** is selected in the [routing rule configuration](#step-2-configure-routing-rules), you may need to manually [edit the routing rules](#edit-the-route) to change the gateway address in the Route domain name to the external IP address of your private network. +* If **Specify Domain** is selected in the [routing rule configuration](#step-2-configure-routing-rules), you may need to change the configuration on your DNS server or in the `etc/hosts` file of your client machine so that the domain name can be resolved into the external IP address of your private network. + +{{}} + +## Check Route Details + +### Operations + +1. Choose **Routes** in **Application Workloads** on the left navigation bar and click the name of the Route on the right. + +2. Click **Edit Information**, or click **More** and choose an operation from the drop-down menu. + * **Edit YAML**: Edit the YAML configuration file of the Route. + * **Edit Routing Rules**: Edit the Route rules. + * **Edit Annotations**: Edit the Route annotations. For more information, see the [official Nginx Ingress controller document](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/). + * **Delete**: Delete the Route and return to the Route list page. + +### Resource status + +Click the **Resource Status** tab to view the Route rules. + +### Metadata + +Click the **Metadata** tab to view the labels and annotations of the Route. + +### Events + +Click the **Events** tab to view the events of the Route. + + diff --git a/content/en/docs/v3.4/project-user-guide/application-workloads/services.md b/content/en/docs/v3.4/project-user-guide/application-workloads/services.md new file mode 100644 index 000000000..5c19fafc0 --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/application-workloads/services.md @@ -0,0 +1,190 @@ +--- +title: "Services" +keywords: 'KubeSphere, Kubernetes, services, workloads' +description: 'Learn basic concepts of Services and how to create Services in KubeSphere.' +linkTitle: "Services" +weight: 10240 +--- + +A Service is an abstract way to expose an application running on a set of Pods as a network service. Namely, a Service groups endpoints of these Pods into a single resource, which can be accessed through different ways. + +With Kubernetes, you don't need to modify your application to use an unfamiliar service discovery mechanism. Kubernetes gives Pods their IP addresses and a single DNS name for a set of Pods, and can load-balance across them. + +For more information, see the [official documentation of Kubernetes](https://kubernetes.io/docs/concepts/services-networking/service/). + +## Access Types + +- **Virtual IP**: It is based on the unique IP generated by the cluster. A service can be accessed through this IP inside the cluster. This type is suitable for most services. Alternatively, a service can also be accessed through a NodePort and LoadBalancer outside the cluster. + +- **Headless**: The cluster does not generate an IP address for the service, and the service is directly accessed through the backend Pod IP of the service within the cluster. This type is suitable for backend heterogeneous services, such as services that need to distinguish between master and agent. + +{{< notice tip>}} + +In KubeSphere, stateful and stateless Services are created with a virtual IP by default. If you want to create a Headless Service, use **YAML** to configure it directly. + +{{}} + +## Prerequisites + +You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Service Types + +KubeSphere provides three basic methods to create a Service: **Stateless Service**, **Stateful Service**, and **External Service**. Besides, you can also customize a Service through **Specify Workload** and **Edit YAML** under **Customize Service**. + +- **Stateless Service** + + Stateless Services are the most commonly used Services in container services. They define the Pod template to control the Pod status, including rolling updates and rollbacks. A **Deployment** workload is also created when you create a stateless Service. For more information about stateless Services, see [Deployments](../../application-workloads/deployments/). + +- **Stateful Service** + + Stateful Services are used to manage stateful applications, ensuring ordered and graceful deployment and scaling. They also provide stable persistent storage and network identifiers. A **StatefulSet** workload is also created when you create a stateful Service. For more information about stateful Services, see [StatefulSets](../../application-workloads/statefulsets/). + +- **External Service** + + Different from stateless and stateful Services, an External Service maps a Service to a DNS name instead of a selector. You need to specify these Services in the **External Service Address** field, indicated by `externalName` in the YAML file. + +- **Specify Workload** + + Create a Service with existing Pods. + +- **Edit YAML** + + Create a Service directly with YAML. You can upload and download YAML configuration files to and from the console. + + {{< notice tip>}} + +The value of `annotations:kubesphere.io/serviceType` keywords can be defined as: `statelessservice`, `statefulservice`, `externalservice` and `None`. + + {{}} + +## Create a Stateless Service + +### Step 1: Open the dashboard + +1. Go to **Services** under **Application Workloads** of a project and click **Create**. + +2. Click **Stateless Service**. + + {{< notice note >}} + +The steps of creating a stateful Service and a stateless Service are basically the same. This example only goes through the process of creating a stateless Service for demonstration purpose. + +{{}} + +### Step 2: Enter basic information + +1. In the displayed dialog box, you can see the field **Version** prepopulated with `v1`. You need to define a name for the Service, such as `demo-stateless`. When you finish, click **Next** to continue. + + - **Name**: The name of the Service and Deployment, which is also the unique identifier. + - **Alias**: The alias name of the Service, making resources easier to identify. + - **Version**: It can only contain lowercase letters and numbers. The maximum length of characters is set to 16. + + {{< notice tip >}} + +The value of **Name** is used in both configurations, one for Deployment and the other for Service. You can see the manifest file of the Deployment and the Service by enabling **Edit YAML** in the upper-right corner. Below is an example file for your reference. + + {{}} + + ``` yaml + kind: Deployment + metadata: + labels: + version: v1 + app: xxx + name: xxx-v1 + spec: + selector: + matchLabels: + version: v1 + app: xxx + template: + metadata: + labels: + version: v1 + app: xxx + --- + kind: Service + metadata: + labels: + version: v1 + app: xxx + name: xxx + spec: + metadata: + labels: + version: v1 + app: xxx + ``` + +### Step 3: Set a Pod + +To add a container image for the Service, see [Set a Pod](../deployments/#step-3-set-a-pod) for details. + +{{< notice tip >}} + +For more information about explanations of dashboard properties, see [Pod Settings](../container-image-settings/) directly. + +{{}} + +### Step 4: Mount volumes + +To mount a volume for the Service, see [Mount Volumes](../deployments/#step-4-mount-volumes) for details. + +### Step 5: Configure advanced settings + +You can set a policy for node scheduling and add metadata which is the same as explained in [Deployments](../deployments/#step-5-configure-advanced-settings). For a Service, you can see two additional options available, **External Access** and **Sticky Session**. + +- External Access + + You can expose a Service externally through two methods, NodePort and LoadBalancer. + + - **NodePort**: A Service is exposed on each node's IP address at a static port. + + - **LoadBalancer**: Clients send requests to the IP address of a load balancer. + + {{< notice note >}} + +This value is specified by `.spec.type`. If you select **LoadBalancer**, you need to add annotations for it at the same time. + + {{}} + +- Sticky Session + + You may want to route all traffic sent from a single client session to the same instance of an app which runs across multiple replicas. This makes better use of caches as it reduces latency. This behavior of load balancing is called Sticky Sessions. + + You can set the maximum session sticky time in this field, specified by `.spec.sessionAffinityConfig.clientIP.timeoutSeconds` in the manifest file, which defaults to 10800. + +## Check Service Details + +### Details page + +1. After a Service is created, you can click icon on the right to further edit it, such as its metadata (excluding **Name**), YAML, port, and Internet access. + + - **Edit Information**: View and edit the basic information. + - **Edit YAML**: View, upload, download, or update the YAML file. + - **Edit Service**: View the access type and set selectors and ports. + - **Edit External Access**: Edit external access method for the Service. + - **Delete**: When you delete a Service, associated resources will be displayed. If you check them, they will be deleted together with the Service. + +2. Click the name of the Service and you can go to its details page. + + - Click **More** to expand the drop-down menu which is the same as the one in the Service list. + - The Pod list provides detailed information of the Pod (status, node, Pod IP and resource usage). + - You can view the container information by clicking a Pod item. + - Click the container log icon to view output logs of the container. + - You can view the Pod details page by clicking the Pod name. + +### Resource status + +1. Click the **Resource Status** tab to view information about the Service ports, workloads, and Pods. + +2. In the **Pods** area, click icon to refresh the Pod information, and click / to display/hide the containers in each Pod. + +### Metadata + +Click the **Metadata** tab to view the labels and annotations of the Service. + +### Events + +Click the **Events** tab to view the events of the Service. \ No newline at end of file diff --git a/content/en/docs/v3.4/project-user-guide/application-workloads/statefulsets.md b/content/en/docs/v3.4/project-user-guide/application-workloads/statefulsets.md new file mode 100644 index 000000000..d5b160d06 --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/application-workloads/statefulsets.md @@ -0,0 +1,148 @@ +--- +title: "Kubernetes StatefulSet in KubeSphere" +keywords: 'KubeSphere, Kubernetes, StatefulSets, Dashboard, Service' +description: 'Learn basic concepts of StatefulSets and how to create StatefulSets on KubeSphere.' +linkTitle: "StatefulSets" +weight: 10220 +--- + +As a workload API object, a Kubernetes StatefulSet is used to manage stateful applications. It is responsible for the deploying, scaling of a set of Pods, and guarantees the ordering and uniqueness of these Pods. + +Like a Deployment, a StatefulSet manages Pods that are based on an identical container specification. Unlike a Deployment, a StatefulSet maintains a sticky identity for each of their Pods. These Pods are created from the same specification, but are not interchangeable: each has a persistent identifier that it maintains across any rescheduling. + +If you want to use storage volumes to provide persistence for your workload, you can use a StatefulSet as part of the solution. Although individual Pods in a StatefulSet are susceptible to failure, the persistent Pod identifiers make it easier to match existing volumes to the new Pods that replace any that have failed. + +StatefulSets are valuable for applications that require one or more of the following. + +- Stable, unique network identifiers. +- Stable, persistent storage. +- Ordered, graceful deployment, and scaling. +- Ordered, automated rolling updates. + +For more information, see the [official documentation of Kubernetes](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/). + +## Prerequisites + +You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Create a Kubernetes StatefulSet + +In KubeSphere, a **Headless** service is also created when you create a StatefulSet. You can find the headless service in [Services](../services/) under **Application Workloads** in a project. + +### Step 1: Open the dashboard + +Log in to the console as `project-regular`. Go to **Application Workloads** of a project, select **Workloads**, and click **Create** under the **StatefulSets** tab. + +### Step 2: Enter basic information + +Specify a name for the StatefulSet (for example, `demo-stateful`), select a project, and click **Next**. + +### Step 3: Set a Pod + +1. Before you set an image, define the number of replicated Pods in **Pod Replicas** by clicking icon or icon, which is indicated by the `.spec.replicas` field in the manifest file. + + {{< notice tip >}} + +You can see the StatefulSet manifest file in YAML format by enabling **Edit YAML** in the upper-right corner. KubeSphere allows you to edit the manifest file directly to create a StatefulSet. Alternatively, you can follow the steps below to create a StatefulSet via the dashboard. + + {{}} + +2. Click **Add Container**. + +3. Enter an image name from public Docker Hub or from a [private repository](../../configuration/image-registry/) you specified. For example, enter `nginx` in the search box and press **Enter**. + + {{< notice note >}} + +- Remember to press **Enter** on your keyboard after you enter an image name in the search box. +- If you want to use your private image repository, you should [create an Image Registry Secret](../../configuration/image-registry/) first in **Secrets** under **Configuration**. + + {{}} + +4. Set requests and limits for CPU and memory resources based on your needs. For more information, see [Resource Request and Resource Limit in Container Image Settings](../container-image-settings/#add-container-image). + +5. Click **Use Default Ports** for **Port Settings** or you can customize **Protocol**, **Name** and **Container Port**. + +6. Select a policy for image pulling from the drop-down list. For more information, see [Image Pull Policy in Container Image Settings](../container-image-settings/#add-container-image). + +7. For other settings (**Health Check**, **Start Command**, **Environment Variables**, **Container Security Context** and **Synchronize Host Timezone**), you can configure them on the dashboard as well. For more information, see detailed explanations of these properties in [Pod Settings](../container-image-settings/#add-container-image). When you finish, click **√** in the lower-right corner to continue. + +8. Select an update strategy from the drop-down menu. It is recommended you choose **Rolling Update**. For more information, see [Update Strategy](../container-image-settings/#update-strategy). + +9. Select a Pod scheduling rule. For more information, see [Pod Scheduling Rules](../container-image-settings/#pod-scheduling-rules). + +10. Click **Next** to continue when you finish setting the container image. + +### Step 4: Mount volumes + +StatefulSets can use the volume template, but you must create it in **Storage** in advance. For more information about volumes, visit [Volumes](../../storage/volumes/#mount-a-volume). When you finish, click **Next** to continue. + +### Step 5: Configure advanced settings + +You can set a policy for node scheduling and add StatefulSet metadata in this section. When you finish, click **Create** to complete the whole process of creating a StatefulSet. + +- **Select Nodes** + + Assign Pod replicas to run on specified nodes. It is specified in the field `nodeSelector`. + +- **Add Metadata** + + Additional metadata settings for resources such as **Labels** and **Annotations**. + +## Check Kubernetes StatefulSet Details + +### Details page + +1. After a StatefulSet is created, it will be displayed in the list. You can click icon on the right to select options from the menu to modify your StatefulSet. + + - **Edit Information**: View and edit the basic information. + - **Edit YAML**: View, upload, download, or update the YAML file. + - **Re-create**: Re-create the StatefulSet. + - **Delete**: Delete the StatefulSet. + +2. Click the name of the StatefulSet and you can go to its details page. + +3. Click **More** to display what operations about this StatefulSet you can do. + + - **Roll Back**: Select the revision to roll back. + - **Edit Service**: Set the port to expose the container image and the service port. + - **Edit Settings**: Configure update strategies, containers and volumes. + - **Edit YAML**: View, upload, download, or update the YAML file. + - **Re-create**: Re-create this StatefulSet. + - **Delete**: Delete the StatefulSet, and return to the StatefulSet list page. + +4. Click the **Resource Status** tab to view the port and Pod information of a StatefulSet. + + - **Replica Status**: Click icon or icon to increase or decrease the number of Pod replicas. + - **Pods** + + - The Pod list provides detailed information of the Pod (status, node, Pod IP and resource usage). + - You can view the container information by clicking a Pod item. + - Click the container log icon to view output logs of the container. + - You can view the Pod details page by clicking the Pod name. + +### Revision records + +After the resource template of workload is changed, a new log will be generated and Pods will be rescheduled for a version update. The latest 10 versions will be saved by default. You can implement a redeployment based on the change log. + +### Metadata + +Click the **Metadata** tab to view the labels and annotations of the StatefulSet. + +### Monitoring + +1. Click the **Monitoring** tab to view the CPU usage, memory usage, outbound traffic, and inbound traffic of the StatefulSet. + +2. Click the drop-down menu in the upper-right corner to customize the time range and sampling interval. + +3. Click icon/icon in the upper-right corner to start/stop automatic data refreshing. + +4. Click icon in the upper-right corner to manually refresh the data. + +### Environment variables + +Click the **Environment Variables** tab to view the environment variables of the StatefulSet. + +### Events + +Click the **Events** tab to view the events of the StatefulSet. + diff --git a/content/en/docs/v3.4/project-user-guide/application/_index.md b/content/en/docs/v3.4/project-user-guide/application/_index.md new file mode 100644 index 000000000..7e0d6b2b6 --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/application/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Applications" +weight: 10100 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/project-user-guide/application/app-template.md b/content/en/docs/v3.4/project-user-guide/application/app-template.md new file mode 100644 index 000000000..30958f0bc --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/application/app-template.md @@ -0,0 +1,33 @@ +--- +title: "App Templates" +keywords: 'Kubernetes, Chart, Helm, KubeSphere, Application Template, Repository' +description: 'Understand the concept of app templates and how they can help to deploy applications within enterprises.' +linkTitle: "App Templates" +weight: 10110 +--- + +An app template serves as a way for users to upload, deliver, and manage apps. Generally, an app is composed of one or more Kubernetes workloads (for example, [Deployments](../../../project-user-guide/application-workloads/deployments/), [StatefulSets](../../../project-user-guide/application-workloads/statefulsets/) and [DaemonSets](../../../project-user-guide/application-workloads/daemonsets/)) and [Services](../../../project-user-guide/application-workloads/services/) based on how it functions and communicates with the external environment. Apps that are uploaded as app templates are built based on a [Helm](https://helm.sh/) package. + +## How App Templates Work + +You can deliver Helm charts to the public repository of KubeSphere or import a private app repository to offer app templates. + +The public repository, also known as the App Store on KubeSphere, is accessible to every tenant in a workspace. After [uploading the Helm chart of an app](../../../workspace-administration/upload-helm-based-application/), you can deploy your app to test its functions and submit it for review. Ultimately, you have the option to release it to the App Store after it is approved. For more information, see [Application Lifecycle Management](../../../application-store/app-lifecycle-management/). + +For a private repository, only users with required permissions are allowed to [add private repositories](../../../workspace-administration/app-repository/import-helm-repository/) in a workspace. Generally, the private repository is built based on object storage services, such as MinIO. After imported to KubeSphere, these private repositories serve as application pools to provide app templates. + +{{< notice note >}} + +[For individual apps that are uploaded as Helm charts](../../../workspace-administration/upload-helm-based-application/) to KubeSphere, they are displayed in the App Store together with built-in apps after approved and released. Besides, when you select app templates from private app repositories, you can also see **Current workspace** in the list, which stores these individual apps uploaded as Helm charts. + +{{}} + +KubeSphere deploys app repository services based on [OpenPitrix](https://github.com/openpitrix/openpitrix) as a [pluggable component](../../../pluggable-components/app-store/). + +## Why App Templates + +App templates enable users to deploy and manage apps in a visualized way. Internally, they play an important role as shared resources (for example, databases, middleware and operating systems) created by enterprises for the coordination and cooperation within teams. Externally, app templates set industry standards of building and delivery. Users can take advantage of app templates in different scenarios to meet their own needs through one-click deployment. + +In addition, as OpenPitrix is integrated to KubeSphere to provide application management across the entire lifecycle, the platform allows ISVs, developers and regular users to all participate in the process. Backed by the multi-tenant system of KubeSphere, each tenant is only responsible for their own part, such as app uploading, app review, release, test, and version management. Ultimately, enterprises can build their own App Store and enrich their application pools with their customized standards. As such, apps can also be delivered in a standardized fashion. + +For more information about how to use app templates, see [Deploy Apps from App Templates](../deploy-app-from-template/). \ No newline at end of file diff --git a/content/en/docs/v3.4/project-user-guide/application/compose-app.md b/content/en/docs/v3.4/project-user-guide/application/compose-app.md new file mode 100644 index 000000000..5a7e7bb27 --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/application/compose-app.md @@ -0,0 +1,96 @@ +--- +title: "Create a Microservices-based App" +keywords: 'KubeSphere, Kubernetes, service mesh, microservices' +description: 'Learn how to compose a microservice-based application from scratch.' +linkTitle: "Create a Microservices-based App" +weight: 10140 +--- + +With each microservice handling a single part of the app's functionality, an app can be divided into different components. These components have their own responsibilities and limitations, independent from each other. In KubeSphere, this kind of app is called **Composed App**, which can be built through newly created Services or existing Services. + +This tutorial demonstrates how to create a microservices-based app Bookinfo, which is composed of four Services, and set a customized domain name to access the app. + +## Prerequisites + +- You need to create a workspace, a project, and a user (`project-regular`) for this tutorial. The user needs to be invited to the project with the `operator` role. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). +- `project-admin` needs to [set the project gateway](../../../project-administration/project-gateway/) so that `project-regular` can define a domain name when creating the app. + +## Create Microservices that Compose an App + +1. Log in to the web console of KubeSphere and navigate to **Apps** in **Application Workloads** of your project. On the **Composed Apps** tab, click **Create**. + +2. Set a name for the app (for example, `bookinfo`) and click **Next**. + +3. On the **Service Settings** page, you need to create microservices that compose the app. Click **Create Service** and select **Stateless Service**. + +4. Set a name for the Service (e.g `productpage`) and click **Next**. + + {{< notice note >}} + + You can create a Service on the dashboard directly or enable **Edit YAML** in the upper-right corner to edit the YAML file. + + {{}} + +5. Click **Add Container** under **Containers** and enter `kubesphere/examples-bookinfo-productpage-v1:1.13.0` in the search box to use the Docker Hub image. + + {{< notice note >}} + + You must press **Enter** in your keyboard after you enter the image name. + + {{}} + +6. Click **Use Default Ports**. For more information about image settings, see [Pod Settings](../../../project-user-guide/application-workloads/container-image-settings/). Click **√** in the lower-right corner and **Next** to continue. + +7. On the **Storage Settings** page, [add a volume](../../../project-user-guide/storage/volumes/) or click **Next** to continue. + +8. Click **Create** on the **Advanced Settings** page. + +9. Similarly, add the other three microservices for the app. Here is the image information: + + | Service | Name | Image | + | --------- | --------- | ------------------------------------------------ | + | Stateless | `details` | `kubesphere/examples-bookinfo-details-v1:1.13.0` | + | Stateless | `reviews` | `kubesphere/examples-bookinfo-reviews-v1:1.13.0` | + | Stateless | `ratings` | `kubesphere/examples-bookinfo-ratings-v1:1.13.0` | + +10. When you finish adding microservices, click **Next**. + +11. On the **Route Settings** page, click **Add Routing Rule**. On the **Specify Domain** tab, set a domain name for your app (for example, `demo.bookinfo`) and select `HTTP` in the **Protocol** field. For `Paths`, select the Service `productpage` and port `9080`. Click **OK** to continue. + + {{< notice note >}} + +The button **Add Routing Rule** is not visible if the project gateway is not set. + +{{}} + +12. You can add more rules or click **Create** to finish the process. + +13. Wait for your app to reach the **Ready** status. + + +## Access the App + +1. As you set a domain name for the app, you need to add an entry in the hosts (`/etc/hosts`) file. For example, add the IP address and hostname as below: + + ```txt + 192.168.0.9 demo.bookinfo + ``` + + {{< notice note >}} + + You must add your **own** IP address and hostname. + + {{}} + +2. In **Composed Apps**, click the app you just created. + +3. In **Resource Status**, click **Access Service** under **Routes** to access the app. + + {{< notice note >}} + + Make sure you open the port in your security group. + + {{}} + +4. Click **Normal user** and **Test user** respectively to see other **Services**. + diff --git a/content/en/docs/v3.4/project-user-guide/application/deploy-app-from-appstore.md b/content/en/docs/v3.4/project-user-guide/application/deploy-app-from-appstore.md new file mode 100644 index 000000000..f9613b89c --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/application/deploy-app-from-appstore.md @@ -0,0 +1,62 @@ +--- +title: "Deploy Apps from the App Store" +keywords: 'Kubernetes, Chart, Helm, KubeSphere, Application, App Store' +description: 'Learn how to deploy an application from the App Store.' +linkTitle: "Deploy Apps from the App Store" +weight: 10130 +--- + +The [App Store](../../../application-store/) is also the public app repository on the platform, which means every tenant on the platform can view the applications in the Store regardless of which workspace they belong to. The App Store contains 16 featured enterprise-ready containerized apps and apps released by tenants from different workspaces on the platform. Any authenticated users can deploy applications from the Store. This is different from private app repositories which are only accessible to tenants in the workspace where private app repositories are imported. + +This tutorial demonstrates how to quickly deploy [NGINX](https://www.nginx.com/) from the KubeSphere App Store powered by [OpenPitrix](https://github.com/openpitrix/openpitrix) and access its service through a NodePort. + +## Prerequisites + +- You have enabled [OpenPitrix (App Store)](../../../pluggable-components/app-store/). +- You need to create a workspace, a project, and a user (`project-regular`) for this tutorial. The user must be invited to the project and granted the `operator` role. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Hands-on Lab + +### Step 1: Deploy NGINX from the App Store + +1. Log in to the web console of KubeSphere as `project-regular` and click **App Store** in the upper-left corner. + + {{< notice note >}} + + You can also go to **Apps** under **Application Workloads** in your project, click **Create**, and select **From App Store** to go to the App Store. + + {{}} + +2. Search for NGINX, click it, and click **Install** on the **App Information** page. Make sure you click **Agree** in the displayed **Deployment Agreement** dialog box. + +3. Set a name and select an app version, confirm the location where NGINX will be deployed , and click **Next**. + +4. In **App Settings**, specify the number of replicas to deploy for the app and enable Ingress based on your needs. When you finish, click **Install**. + + {{< notice note >}} + + To specify more values for NGINX, use the toggle to see the app’s manifest in YAML format and edit its configurations. + + {{}} + +5. Wait until NGINX is up and running. + +### Step 2: Access NGINX + +To access NGINX outside the cluster, you need to expose the app through a NodePort first. + +1. Go to **Services** in the created project and click the service name of NGINX. + +2. On the Service details page, click **More** and select **Edit External Access** from the drop-down menu. + +3. Select **NodePort** for **Access Method** and click **OK**. For more information, see [Project Gateway](../../../project-administration/project-gateway/). + +4. Under **Ports**, view the exposed port. + +5. Access NGINX through `:`. + + {{< notice note >}} + + You may need to open the port in your security groups and configure related port forwarding rules depending on your where your Kubernetes cluster is deployed. + + {{}} \ No newline at end of file diff --git a/content/en/docs/v3.4/project-user-guide/application/deploy-app-from-template.md b/content/en/docs/v3.4/project-user-guide/application/deploy-app-from-template.md new file mode 100644 index 000000000..4793a5632 --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/application/deploy-app-from-template.md @@ -0,0 +1,92 @@ +--- +title: "Deploy Apps from App Templates" +keywords: 'Kubernetes, Chart, Helm, KubeSphere, Application, App Templates' +description: 'Learn how to deploy an application from a Helm-based template.' +linkTitle: "Deploy Apps from App Templates" +weight: 10120 +--- + +When you deploy an app, you can select the app from the App Store which contains built-in apps of KubeSphere and [apps uploaded as Helm charts](../../../workspace-administration/upload-helm-based-application/). Alternatively, you can use apps from private app repositories added to KubeSphere to provide app templates. + +This tutorial demonstrates how to quickly deploy [Grafana](https://grafana.com/) using the app template from a private repository, which is based on QingStor object storage. + +## Prerequisites + +- You have enabled [OpenPitrix (App Store)](../../../pluggable-components/app-store/). +- You have completed the tutorial of [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). Namely, you must have a workspace, a project and two users (`ws-admin` and `project-regular`). `ws-admin` must be granted the role of `workspace-admin` in the workspace and `project-regular` must be granted the role of `operator` in the project. + +## Hands-on Lab + +### Step 1: Add an app repository + +1. Log in to the web console of KubeSphere as `ws-admin`. In your workspace, go to **App Repositories** under **App Management**, and then click **Add**. + +2. In the displayed dialog box, enter `test-repo` for the app repository name and `https://charts.kubesphere.io/main` for the repository URL. Click **Validate** to verify the URL, set **Synchronization Interval** based on your needs, and click **OK**. + +3. Your repository is displayed in the list after successfully imported to KubeSphere. + + {{< notice note >}} + + For more information about dashboard properties as you add a private repository, see [Import Helm Repository](../../../workspace-administration/app-repository/import-helm-repository/). + + {{}} + +### Step 2: Deploy Grafana from app templates + +1. Log out of KubeSphere and log back in as `project-regular`. In your project, go to **Apps** under **Application Workloads** and click **Create**. + +2. Select **From App Template** in the displayed dialog box. + + **From App Store**: Choose built-in apps and apps uploaded individually as Helm charts. + + **From App Templates**: Choose apps from private app repositories and the workspace app pool. + +3. Select `test-repo` from the drop-down list, which is the private app repository just uploaded. + + {{< notice note >}} + + The option **Current workspace** in the list represents the workspace app pool, which contains apps uploaded as Helm charts. They are also part of app templates. + + {{}} + +4. Enter `grafana` in the search box to search for the app, and then click it to deploy it. + + {{< notice note >}} + + The app repository used in this tutorial is synchronized from the Google Helm repository. Some apps in it may not be deployed successfully as their Helm charts are maintained by different organizations. + + {{}} + +5. Its app information and configuration files are also displayed. Under **Version**, select a version number from the list and click **Install**. + +6. Set an app name and confirm the version and deployment location. Click **Next**. + +7. In **App Settings**, manually edit the manifest file or click **Install** directly. + +8. Wait for Grafana to be up and running. + +### Step 3: Expose the Grafana Service + +To access Grafana outside the cluster, you need to expose the app through a NodePort first. + +1. Go to **Services** and click the service name of Grafana. + +2. Click **More** and select **Edit External Access** from the drop-down menu. + +3. Select **NodePort** for **Access Method** and click **OK**. For more information, see [Project Gateway](../../../project-administration/project-gateway/). + +4. Under **Ports**, view the exposed port. + +### Step 4: Access Grafana + +1. To access the Grafana dashboard, you need the username and password. Go to **Secrets** under **Configuration** and click the item that has the same name as the app name. + +2. On the details page, click the eye icon to view the username and password. + +3. Access Grafana through `:`. + + {{< notice note >}} + + You may need to open the port in your security groups and configure related port forwarding rules depending on your where your Kubernetes cluster is deployed. + + {{}} \ No newline at end of file diff --git a/content/en/docs/v3.4/project-user-guide/configuration/_index.md b/content/en/docs/v3.4/project-user-guide/configuration/_index.md new file mode 100644 index 000000000..f23595117 --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/configuration/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Configuration" +weight: 10400 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/project-user-guide/configuration/configmaps.md b/content/en/docs/v3.4/project-user-guide/configuration/configmaps.md new file mode 100644 index 000000000..c50682ecf --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/configuration/configmaps.md @@ -0,0 +1,71 @@ +--- +title: "ConfigMaps" +keywords: 'KubeSphere, Kubernetes, ConfigMaps' +description: 'Learn how to create a ConfigMap in KubeSphere.' +linkTitle: "ConfigMaps" +weight: 10420 +--- + +A Kubernetes [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) is used to store configuration data in the form of key-value pairs. The ConfigMap resource provides a way to inject configuration data into Pods. The data stored in a ConfigMap object can be referenced in a volume of type `ConfigMap` and then consumed by containerized applications running in a Pod. ConfigMaps are often used in the following cases: + +- Set the value of environment variables. +- Set command parameters in containers. +- Create a configuration file in volumes. + +This tutorial demonstrates how to create a ConfigMap in KubeSphere. + +## Prerequisites + +You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Create a ConfigMap + +1. Log in to the console as `project-regular`. Go to **Configuration** of a project, select **ConfigMaps** and click **Create**. + +2. In the displayed dialog box, specify a name for the ConfigMap (for example, `demo-configmap`) and click **Next** to continue. + + {{< notice tip >}} + +You can see the ConfigMap manifest file in YAML format by enabling **Edit YAML** in the upper-right corner. KubeSphere allows you to edit the manifest file directly to create a ConfigMap. Alternatively, you can follow the steps below to create a ConfigMap via the dashboard. + +{{}} + +3. On the **Data Settings** tab, configure values by clicking **Add Data**. + +4. Enter a key-value pair. For example: + + {{< notice note >}} + +- key-value pairs displays under the field `data` in the manifest. + +- On the KubeSphere dashboard, you can only add key-value pairs for a ConfigMap currently. In future releases, you will be able to add a path to a directory containing configuration files to create ConfigMaps directly on the dashboard. + +{{}} + +5. Click **√** in the lower-right corner to save it and click **Add Data** again if you want to add more key-value pairs. + +6. Click **Create** to generate the ConfigMap. + +## View ConfigMap Details + +1. After a ConfigMap is created, it is displayed on the **ConfigMaps** page. You can click icon on the right and select the operation below from the drop-down list. + + - **Edit Information**: View and edit the basic information. + - **Edit YAML**: View, upload, download, or update the YAML file. + - **Edit Settings**: Modify the key-value pair of the ConfigMap. + - **Delete**: Delete the ConfigMap. + +2. Click the name of the ConfigMap to go to its details page. Under the tab **Data**, you can see all the key-value pairs you have added for the ConfigMap. + +3. Click **More** to display what operations about this ConfigMap you can do. + + - **Edit YAML**: View, upload, download, or update the YAML file. + - **Edit Settings**: Modify the key-value pair of the ConfigMap. + - **Delete**: Delete the ConfigMap, and return to the list page. + +4. Click **Edit Information** to view and edit the basic information. + + +## Use a ConfigMap + +When you create workloads, [Services](../../../project-user-guide/application-workloads/services/), [Jobs](../../../project-user-guide/application-workloads/jobs/) or [CronJobs](../../../project-user-guide/application-workloads/cronjobs/), you may need to add environment variables for containers. On the **Add Container** page, check **Environment Variables** and click **From secret** to use a ConfigMap from the list. diff --git a/content/en/docs/v3.4/project-user-guide/configuration/image-registry.md b/content/en/docs/v3.4/project-user-guide/configuration/image-registry.md new file mode 100644 index 000000000..0cce22b71 --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/configuration/image-registry.md @@ -0,0 +1,104 @@ +--- +title: "Image Registries" +keywords: 'KubeSphere, Kubernetes, docker, Secrets' +description: 'Learn how to create an image registry on KubeSphere.' +linkTitle: "Image Registries" +weight: 10430 +--- + +A Docker image is a read-only template that can be used to deploy container services. Each image has a unique identifier (for example, image name:tag). For example, an image can contain a complete package of an Ubuntu operating system environment with only Apache and a few applications installed. An image registry is used to store and distribute Docker images. + +This tutorial demonstrates how to create Secrets for different image registries. + +## Prerequisites + +You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Create a Secret + +When you create workloads, [Services](../../../project-user-guide/application-workloads/services/), [Jobs](../../../project-user-guide/application-workloads/jobs/), or [CronJobs](../../../project-user-guide/application-workloads/cronjobs/), you can select images from your private registry in addition to the public registry. To use images from your private registry, you must create a Secret for it so that the registry can be integrated to KubeSphere. + +### Step 1: Open the dashboard + +Log in to the web console of KubeSphere as `project-regular`. Go to **Configuration** of a project, select **Secrets** and click **Create**. + +### Step 2: Enter basic information + +Specify a name for the Secret (for example, `demo-registry-secret`) and click **Next** to continue. + +{{< notice tip >}} + +You can see the Secret's manifest file in YAML format by enabling **Edit YAML** in the upper-right corner. KubeSphere allows you to edit the manifest file directly to create a Secret. Alternatively, you can follow the steps below to create a Secret via the dashboard. + +{{}} + +### Step 3: Specify image registry information + +Select **Image registry information** for **Type**. To use images from your private registry as you create application workloads, you need to specify the following fields. + +- **Registry Address**. The address of the image registry that stores images for you to use when creating application workloads. +- **Username**. The account name you use to log in to the registry. +- **Password**. The password you use to log in to the registry. +- **Email** (optional). Your email address. + +#### Add the Docker Hub registry + +1. Before you add your image registry in [Docker Hub](https://hub.docker.com/), make sure you have an available Docker Hub account. On the **Secret Settings** page, enter `docker.io` for **Registry Address** and enter your Docker ID and password for **User Name** and **Password**. Click **Validate** to check whether the address is available. + +2. Click **Create**. Later, the Secret is displayed on the **Secrets** page. For more information about how to edit the Secret after you create it, see [Check Secret Details](../../../project-user-guide/configuration/secrets/#check-secret-details). + +#### Add the Harbor image registry + +[Harbor](https://goharbor.io/) is an open-source trusted cloud-native registry project that stores, signs, and scans content. Harbor extends the open-source Docker Distribution by adding the functionalities usually required by users such as security, identity and management. Harbor uses HTTP and HTTPS to serve registry requests. + +**HTTP** + +1. You need to modify the Docker configuration for all nodes within the cluster. For example, if there is an external Harbor registry and its IP address is `http://192.168.0.99`, then you need to add the field `--insecure-registry=192.168.0.99` to `/etc/systemd/system/docker.service.d/docker-options.conf`: + + ```bash + [Service] + Environment="DOCKER_OPTS=--registry-mirror=https://registry.docker-cn.com --insecure-registry=10.233.0.0/18 --data-root=/var/lib/docker --log-opt max-size=50m --log-opt max-file=5 \ + --insecure-registry=192.168.0.99" + ``` + + {{< notice note >}} + + - Replace the image registry address with your own registry address. + + - `Environment` represents [dockerd options](https://docs.docker.com/engine/reference/commandline/dockerd/). + + - `--insecure-registry` is required by the Docker daemon for the communication with an insecure registry. Refer to [Docker documentation](https://docs.docker.com/engine/reference/commandline/dockerd/#insecure-registries) for its syntax. + + {{}} + +2. After that, reload the configuration file and restart Docker: + + ```bash + sudo systemctl daemon-reload + ``` + + ```bash + sudo systemctl restart docker + ``` + +3. Go back to the **Data Settings** page and select **Image registry information** for **Type**. Enter your Harbor IP address for **Registry Address** and enter the username and password. + + {{< notice note >}} + + If you want to use the domain name instead of the IP address with Harbor, you may need to configure the CoreDNS and nodelocaldns within the cluster. + + {{}} + +4. Click **Create**. Later, the Secret is displayed on the **Secrets** page. For more information about how to edit the Secret after you create it, see [Check Secret Details](../../../project-user-guide/configuration/secrets/#check-secret-details). + +**HTTPS** + +For the integration of the HTTPS-based Harbor registry, refer to [Harbor Documentation](https://goharbor.io/docs/1.10/install-config/configure-https/). Make sure you use `docker login` to connect to your Harbor registry. + +## Use an Image Registry + +When you set images, you can select the private image registry if the Secret of it is created in advance. For example, click the arrow on the **Add Container** page to expand the registry list when you create a [Deployment](../../../project-user-guide/application-workloads/deployments/). After you choose the image registry, enter the image name and tag to use the image. + +If you use YAML to create a workload and need to use a private image registry, you need to manually add `kubesphere.io/imagepullsecrets` to `annotations` in your local YAML file, and enter the key-value pair in JSON format, where `key` must be the name of the container, and `value` must be the name of the secret, as shown in the following sample. + +![kubesphere-ecosystem](/images/docs/v3.3/project-user-guide/configurations/image-pull-secrets.png) \ No newline at end of file diff --git a/content/en/docs/v3.4/project-user-guide/configuration/secrets.md b/content/en/docs/v3.4/project-user-guide/configuration/secrets.md new file mode 100644 index 000000000..16e9dfd05 --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/configuration/secrets.md @@ -0,0 +1,121 @@ +--- +title: "Kubernetes Secrets in KubeSphere" +keywords: 'KubeSphere, Kubernetes, Secrets' +description: 'Learn how to create a Secret on KubeSphere.' +linkTitle: "Secrets" +weight: 10410 +--- + +A Kubernetes [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) is used to store and manage sensitive information, such as passwords, OAuth tokens, and ssh keys. To use a Secret, a Pod needs to reference it in one of [the following ways](https://kubernetes.io/docs/concepts/configuration/secret/#overview-of-secrets). + +- As a file in a volume mounted and consumed by containerized applications running in a Pod. +- As environment variables used by containers in a Pod. +- As image registry credentials when images are pulled for the Pod by the kubelet. + +This tutorial demonstrates how to create a Secret in KubeSphere. + +## Prerequisites + +You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Create a Kubernetes Secret + +### Step 1: Open the dashboard + +Log in to the console as `project-regular`. Go to **Configuration** of a project, select **Secrets** and click **Create**. + +### Step 2: Enter basic information + +Specify a name for the Secret (for example, `demo-secret`) and click **Next** to continue. + +{{< notice tip >}} + +You can see the Secret's manifest file in YAML format by enabling **Edit YAML** in the upper-right corner. KubeSphere allows you to edit the manifest file directly to create a Secret. Alternatively, you can follow the steps below to create a Secret via the dashboard. + +{{}} + +### Step 3: Set a Secret + +1. Under the tab **Data Settings**, you must select a Secret type. In KubeSphere, you can create the following Kubernetes Secret types, indicated by the `type` field. + + {{< notice note >}} + + For all Secret types, values for all keys under the field `data` in the manifest must be base64-encoded strings. After you specify values on the KubeSphere dashboard, KubeSphere converts them into corresponding base64 character values in the YAML file. For example, if you enter `password` and `hello123` for **Key** and **Value** respectively on the **Edit Data** page when you create the default type of Secret, the actual value displaying in the YAML file is `aGVsbG8xMjM=` (namely, `hello123` in base64 format), automatically created by KubeSphere. + + {{}} + + - **Default**. The type of [Opaque](https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets) in Kubernetes, which is also the default Secret type in Kubernetes. You can create arbitrary user-defined data for this type of Secret. Click **Add Data** to add key-value pairs for it. + + - **TLS information**. The type of [kubernetes.io/tls](https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets) in Kubernetes, which is used to store a certificate and its associated key that are typically used for TLS, such as TLS termination of Ingress resources. You must specify **Credential** and **Private Key** for it, indicated by `tls.crt` and `tls.key` in the YAML file respectively. + + - **Image registry information**. The type of [kubernetes.io/dockerconfigjson](https://kubernetes.io/docs/concepts/configuration/secret/#docker-config-secrets) in Kubernetes, which is used to store the credentials for accessing a Docker registry for images. For more information, see [Image Registries](../image-registry/). + + - **Username and password**. The type of [kubernetes.io/basic-auth](https://kubernetes.io/docs/concepts/configuration/secret/#basic-authentication-secret) in Kubernetes, which is used to store credentials needed for basic authentication. You must specify **Username** and **Password** for it, indicated by `username` and `password` in the YAML file respectively. + +2. For this tutorial, select the default type of Secret. Click **Add Data** and enter the **Key** (`MYSQL_ROOT_PASSWORD`) and **Value** (`123456`) to specify a Secret for MySQL. + +3. Click **√** in the lower-right corner to confirm. You can continue to add key-value pairs to the Secret or click **Create** to finish the creation. For more information about how to use the Secret, see [Compose and Deploy WordPress](../../../quick-start/wordpress-deployment/#task-3-create-an-application). + +## Check Secret Details + +1. After a Secret is created, it will be displayed in the list. You can click icon on the right and select the operation from the menu to modify it. + + - **Edit Information**: View and edit the basic information. + - **Edit YAML**: View, upload, download, or update the YAML file. + - **Edit Settings**: Modify the key-value pair of the Secret. + - **Delete**: Delete the Secret. + +2. Click the name of the Secret and you can go to its details page. Under the tab **Data**, you can see all the key-value pairs you have added for the Secret. + + {{< notice note >}} + +As mentioned above, KubeSphere automatically converts the value of a key into its corresponding base64 character value. To see the actual decoded value, click icon on the right. + +{{}} + +3. Click **More** to display what operations about this Secret you can do. + + - **Edit YAML**: View, upload, download, or update the YAML file. + - **Edit Secret**: Modify the key-value pair of the Secret. + - **Delete**: Delete the Secret, and return to the list page. + + +## How to Use a Kubernetes Secret + +Generally, you need to use a Secret when you create workloads, [Services](../../../project-user-guide/application-workloads/services/), [Jobs](../../../project-user-guide/application-workloads/jobs/) or [CronJobs](../../../project-user-guide/application-workloads/cronjobs/). For example, you can select a Secret for a code repository. For more information, see [Image Registries](../image-registry/). + +Alternatively, you may need to add environment variables for containers. On the **Container Image** page, select **Environment Variables** and click **From secret** to use a Secret from the list. + +## Create the Most Common Secrets + +This section shows how to create Secrets from your Docker Hub account and GitHub account. + +### Create the Docker Hub Secret + +1. Log in to KubeSphere as `project-regular` and go to your project. Select **Secrets** from the navigation bar and click **Create** on the right. + +2. Set a name, such as `dockerhub-id`, and click **Next**. On the **Data Settings** page, fill in the following fields and click **Validate** to verify whether the information provided is valid. + + **Type**: Select **Image registry information**. + + **Registry Address**: Enter the Docker Hub registry address, such as `docker.io`. + + **Username**: Enter your Docker ID. + + **Password**: Enter your Docker Hub password. + +3. Click **Create** to finish. + +### Create the GitHub Secret + +1. Log in to KubeSphere as `project-regular` and go to your project. Select **Secrets** from the navigation bar and click **Create** on the right. + +2. Set a name, such as `github-id`, and click **Next**. On the **Data Settings** page, fill in the following fields. + + **Type**: Select **Username and password**. + + **Username**: Enter your GitHub account. + + **Password**: Enter your GitHub password. + +3. Click **Create** to finish. diff --git a/content/en/docs/v3.4/project-user-guide/configuration/serviceaccounts.md b/content/en/docs/v3.4/project-user-guide/configuration/serviceaccounts.md new file mode 100644 index 000000000..c7dca7108 --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/configuration/serviceaccounts.md @@ -0,0 +1,50 @@ +--- +title: "Service Accounts" +keywords: 'KubeSphere, Kubernetes, Service Accounts' +description: 'Learn how to create service accounts on KubeSphere.' +linkTitle: "Service Accounts" +weight: 10440 +--- + +A [service account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) provides an identity for processes that run in a Pod. When accessing a cluster, a user is authenticated by the API server as a particular user account. Processes in containers inside Pods are authenticated as a particular service account when these processes contact the API server. + +This document describes how to create service accounts on KubeSphere. + +## Prerequisites + +You have created a workspace, a project, and a user (`project-regular`), invited the user to the project, and assigned it the `operator` role. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Create a Service Account + +1. Log in to the KubeSphere console as `project-regular`, and click **Projects**. + +1. Select a project where you want to create a service account. + +1. On the left navigation pane, select **Configuration** > **Service Accounts**. You can see a service account `default` on the **Service Accounts** page, which is automatically created when the project is created. + + {{< notice note >}} + + If you have not specified any service account when creating workloads in a project, the service account `default` in the same project is automatically assigned. + + {{}} + +2. Click **Create**. In the displayed **Create Service Account** dialog box, set the following parameters: + +- **Name** (mandatory): Specifies a unique identifier for the service account. +- **Alias**: Specifies an alias for the service account to help you better identify the service account. +- **Description**: Briefly introduces the service account. +- **Project Role**: Selects a project role from the drop-down list for the service account. Different project roles have [different permissions](../../../project-administration/role-and-member-management/#built-in-roles). + +4. Click **Create** after you finish setting the parameters. The service account created is displayed on the **Service Accounts** page. + +## View the Details Page of a Service Account + +1. On the left navigation pane, select **Configuration** > **Service Accounts**. Click the service account created to go to its details page. + +2. Click **Edit Information** to edit its basic information, or click **More** to perform the following operations: + - **Edit YAML**: Views, updates, or downloads the YAML file. + - **Change Role**: Changes the project role of the service account. + - **Delete**: Deletes the service account. + +3. On the **Resource Status** tab on the right, view details of the Secret and the kubeconfig of the service account. + diff --git a/content/en/docs/v3.4/project-user-guide/custom-application-monitoring/_index.md b/content/en/docs/v3.4/project-user-guide/custom-application-monitoring/_index.md new file mode 100644 index 000000000..5e82ef007 --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/custom-application-monitoring/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Custom Application Monitoring" +weight: 10800 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/project-user-guide/custom-application-monitoring/examples/_index.md b/content/en/docs/v3.4/project-user-guide/custom-application-monitoring/examples/_index.md new file mode 100644 index 000000000..b06c2a50e --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/custom-application-monitoring/examples/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Examples" +weight: 10811 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/project-user-guide/custom-application-monitoring/examples/monitor-mysql.md b/content/en/docs/v3.4/project-user-guide/custom-application-monitoring/examples/monitor-mysql.md new file mode 100644 index 000000000..5522caf7b --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/custom-application-monitoring/examples/monitor-mysql.md @@ -0,0 +1,72 @@ +--- +title: "Monitor MySQL" +keywords: 'monitoring, Prometheus, MySQL, MySQL Exporter' +description: 'Deploy MySQL and MySQL Exporter and create a dashboard to monitor the app.' +linkTitle: "Monitor MySQL" +weight: 10812 +--- +From the [Introduction](../../introduction#indirect-exposing) section, you know it is not feasible to instrument MySQL with Prometheus metrics directly. To expose MySQL metrics in Prometheus format, you need to deploy MySQL Exporter first. + +This tutorial demonstrates how to monitor and visualize MySQL metrics. + +## Prerequisites + +- You need to [enable the App Store](../../../../pluggable-components/app-store/). MySQL and MySQL Exporter are available in the App Store. +- You need to create a workspace, a project, and a user (`project-regular`) for this tutorial. The user needs to be invited to the project with the `operator` role. For more information, see [Create Workspaces, Projects, Users and Roles](../../../../quick-start/create-workspace-and-project/). + +## Step 1: Deploy MySQL + +To begin with, you need to [deploy MySQL from the App Store](../../../../application-store/built-in-apps/mysql-app/). + +1. Go to your project and click **App Store** in the upper-left corner. + +2. Click **MySQL** to go to its details page and click **Install** on the **App Information** tab. + + {{< notice note >}} + +MySQL is a built-in app in the KubeSphere App Store, which means it can be deployed and used directly once the App Store is enabled. + +{{}} + +3. Under **Basic Information**, set a **Name** and select a **Version**. Select the project where the app is deployed under **Location** and click **Next**. + +4. Under **App Settings**, set a root password by uncommenting the `mysqlRootPassword` field and click **Install**. + +5. Wait until MySQL is up and running. + +## Step 2: Deploy MySQL Exporter + +You need to deploy MySQL Exporter in the same project on the same cluster. MySQL Exporter is responsible for querying the status of MySQL and reports the data in Prometheus format. + +1. Go to **App Store** and click **MySQL Exporter**. + +2. On the details page, click **Install**. + +3. Under **Basic Information**, set a **Name** and select a **Version**. Select the same project where MySQL is deployed under **Location** and click **Next**. + +4. Make sure `serviceMonitor.enabled` is set to `true`. The built-in MySQL Exporter sets it to `true` by default, so you don't need to manually change the value of `serviceMonitor.enabled`. + + {{< notice warning >}} +You must enable the ServiceMonitor CRD if you are using external exporter Helm charts. Those charts usually disable ServiceMonitors by default and require manual modification. + {{}} + +5. Modify MySQL connection parameters. MySQL Exporter needs to connect to the target MySQL. In this tutorial, MySQL is installed with the service name `mysql-dh3ily`. Navigate to `mysql` in the configuration file, and set `host` to `mysql-dh3ily`, `pass` to `testing`, and `user` to `root`. Note that your MySQL service may be created with **a different name**. After you finish editing the file, click **Install**. + +6. Wait until MySQL Exporter is up and running. + +## Step 3: Create a Monitoring Dashboard + +You can create a monitoring dashboard for MySQL and visualize real-time metrics. + +1. In the same project, go to **Custom Monitoring** under **Monitoring & Alerting** in the sidebar and click **Create**. + +2. In the displayed dialog box, set a name for the dashboard (for example, `mysql-overview`) and select the MySQL template. Click **Next** to continue. + +3. Save the template by clicking **Save Template** in the upper-right corner. A newly-created dashboard is displayed on the **Custom Monitoring Dashboards** page. + + {{< notice note >}} + +- The built-in MySQL template is provided by KubeSphere to help you monitor MySQL metrics. You can also add more metrics on the dashboard as needed. + +- For more information about dashboard properties, see [Visualization](../../../../project-user-guide/custom-application-monitoring/visualization/overview/). + {{}} \ No newline at end of file diff --git a/content/en/docs/v3.4/project-user-guide/custom-application-monitoring/examples/monitor-sample-web.md b/content/en/docs/v3.4/project-user-guide/custom-application-monitoring/examples/monitor-sample-web.md new file mode 100644 index 000000000..1e74c3dde --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/custom-application-monitoring/examples/monitor-sample-web.md @@ -0,0 +1,72 @@ +--- +title: "Monitor a Sample Web Application" +keywords: 'monitoring, prometheus, prometheus operator' +description: 'Use a Helm chart to deploy a sample web app and create a dashboard to monitor the app.' +linkTitle: "Monitor a Sample Web Application" +weight: 10813 +--- + +This section walks you through monitoring a sample web application. The application is instrumented with Prometheus Go client in its code. Therefore, it can expose metrics directly without the help of exporters. + +## Prerequisites + +- Please make sure you [enable the OpenPitrix system](../../../../pluggable-components/app-store/). +- You need to create a workspace, a project, and a user account for this tutorial. For more information, see [Create Workspaces, Projects, Users and Roles](../../../../quick-start/create-workspace-and-project/). The account needs to be a platform regular user and to be invited to the workspace with the `self-provisioner` role. Namely, create a user `workspace-self-provisioner` of the `self-provisioner` role, and use this account to create a project (for example, `test`). In this tutorial, you log in as `workspace-self-provisioner` and work in the project `test` in the workspace `demo-workspace`. + +- Knowledge of Helm charts and [PromQL](https://prometheus.io/docs/prometheus/latest/querying/examples/). + +## Hands-on Lab + +### Step 1: Prepare the image of a sample web application + +The sample web application exposes a user-defined metric called `myapp_processed_ops_total`. It is a counter type metric that counts the number of operations that have been processed. The counter increases automatically by one every 2 seconds. + +This sample application exposes application-specific metrics via the endpoint `http://localhost:2112/metrics`. + +In this tutorial, you use the made-ready image `kubespheredev/promethues-example-app`. The source code can be found in [kubesphere/prometheus-example-app](https://github.com/kubesphere/prometheus-example-app). You can also follow [Instrument A Go Application For Prometheus](https://prometheus.io/docs/guides/go-application/) in the official documentation of Prometheus. + +### Step 2: Pack the application into a Helm chart + +Pack the Deployment, Service, and ServiceMonitor YAML template into a Helm chart for reuse. In the Deployment and Service template, you define the sample web container and the port for the metrics endpoint. A ServiceMonitor is a custom resource defined and used by Prometheus Operator. It connects your application and KubeSphere monitoring engine (Prometheus) so that the engine knows where and how to scrape metrics. In future releases, KubeSphere will provide a graphical user interface for easy operation. + +Find the source code in the folder `helm` in [kubesphere/prometheus-example-app](https://github.com/kubesphere/prometheus-example-app). The Helm chart package is made ready and is named `prometheus-example-app-0.1.0.tgz`. Please download the .tgz file and you will use it in the next step. + +### Step 3: Upload the Helm chart + +1. Go to the workspace **Overview** page of `demo-workspace` and navigate to **App Templates** under **App Management**. + +2. Click **Create** and upload `prometheus-example-app-0.1.0.tgz`. + +### Step 4: Deploy the sample web application + +You need to deploy the sample web application into `test`. For demonstration purposes, you can simply run a test deployment. + +1. Click `prometheus-example-app`. + +2. Expand the menu and click **Install**. + +3. Make sure you deploy the sample web application in `test` and click **Next**. + +4. Make sure `serviceMonitor.enabled` is set to `true` and click **Install**. + +5. In **Workloads** of the project `test`, wait until the sample web application is up and running. + +### Step 5: Create a monitoring dashboard + +This section guides you on how to create a dashboard from scratch. You will create a text chart showing the total number of processed operations and a line chart for displaying the operation rate. + +1. Navigate to **Custom Monitoring Dashboards** and click **Create**. + +2. Set a name (for example, `sample-web`) and click **Next**. + +3. Enter a title in the upper-left corner (for example, `Sample Web Overview`). + +4. Click icon on the left column to create a text chart. + +5. Type the PromQL expression `myapp_processed_ops_total` in the field **Monitoring Metric** and give a chart name (for example, `Operation Count`). Click **√** in the lower-right corner to continue. + +6. Click **Add Monitoring Item**, select **Line Chart**, and click **OK**. + +7. Enter the PromQL expression `irate(myapp_processed_ops_total[3m])` for **Monitoring Metric** and name the chart `Operation Rate`. To improve the appearance, you can set **Metric Name** to `{{service}}`. It will name each line with the value of the metric label `service`. Next, set **Decimal Places** to `2` so that the result will be truncated to two decimal places. Click **√** in the lower-right corner to continue. + +8. Click **Save Template** to save it. diff --git a/content/en/docs/v3.4/project-user-guide/custom-application-monitoring/introduction.md b/content/en/docs/v3.4/project-user-guide/custom-application-monitoring/introduction.md new file mode 100644 index 000000000..04581f603 --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/custom-application-monitoring/introduction.md @@ -0,0 +1,53 @@ +--- +title: "Introduction to Custom Application Monitoring" +keywords: 'monitoring, prometheus, prometheus operator' +description: 'Introduce the KubeSphere custom monitoring feature and metric exposing, including exposing methods and ServiceMonitor CRD.' +linkTitle: "Introduction" +weight: 10810 +--- + +Custom monitoring allows you to monitor and visualize custom application metrics in KubeSphere. The application can be either a third-party application, such as MySQL, Redis, and Elasticsearch, or your own application. This section introduces the workflow of this feature. + +The KubeSphere monitoring engine is powered by Prometheus and Prometheus Operator. To integrate custom application metrics into KubeSphere, you need to go through the following steps in general. + +- [Expose Prometheus-formatted metrics](#step-1-expose-prometheus-formatted-metrics) of your application. +- [Apply ServiceMonitor CRD](#step-2-apply-servicemonitor-crd) to hook up your application with the monitoring target. +- [Visualize metrics](#step-3-visualize-metrics) on the dashboard to view the trend of custom metrics. + +### Step 1: Expose Prometheus-formatted metrics + +First of all, your application must expose Prometheus-formatted metrics. The Prometheus exposition format is the de-facto format in the realm of cloud-native monitoring. Prometheus uses a [text-based exposition format](https://prometheus.io/docs/instrumenting/exposition_formats/). Depending on your application and use case, there are two ways to expose metrics: + +#### Direct exposing + +Directly exposing Prometheus metrics from applications is a common way among cloud-native applications. It requires developers to import Prometheus client libraries in their codes and expose metrics at a specific endpoint. Many applications, such as etcd, CoreDNS, and Istio, adopt this method. + +The Prometheus community offers client libraries for most programming languages. Find your language on the [Prometheus Client Libraries](https://prometheus.io/docs/instrumenting/clientlibs/) page. For Go developers, read [Instrumenting a Go application](https://prometheus.io/docs/guides/go-application/) to learn how to write a Prometheus-compliant application. + +The [sample web application](../examples/monitor-sample-web/) is an example demonstrating how an application exposes Prometheus-formatted metrics directly. + +#### Indirect exposing + +If you don’t want to modify your code or you cannot do so because the application is provided by a third party, you can deploy an exporter which serves as an agent to scrape metric data and translate them into Prometheus format. + +For most third-party applications, such as MySQL, the Prometheus community provides production-ready exporters. Refer to [Exporters and Integrations](https://prometheus.io/docs/instrumenting/exporters/) for available exporters. In KubeSphere, it is recommended to [enable OpenPitrix](../../../pluggable-components/app-store/) and deploy exporters from the App Store. Exporters for MySQL, Elasticsearch, and Redis are all built-in apps in the App Store. + +Please read [Monitor MySQL](../examples/monitor-mysql/) to learn how to deploy a MySQL exporter and monitor MySQL metrics. + +Writing an exporter is nothing short of instrumenting an application with Prometheus client libraries. The only difference is that exporters need to connect to applications and translate application metrics into Prometheus format. + +### Step 2: Apply ServiceMonitor CRD + +In the previous step, you expose metric endpoints in a Kubernetes Service object. Next, you need to inform the KubeSphere monitoring engine of your new changes. + +The ServiceMonitor CRD is defined by [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator). A ServiceMonitor contains information about the metrics endpoints. With ServiceMonitor objects, the KubeSphere monitoring engine knows where and how to scape metrics. For each monitoring target, you apply a ServiceMonitor object to hook your application (or exporters) up to KubeSphere. + +In KubeSphere 3.3, you need to pack a ServiceMonitor with your applications (or exporters) into a Helm chart for reuse. In future releases, KubeSphere will provide graphical interfaces for easy operation. + +Please read [Monitor a Sample Web Application](../examples/monitor-sample-web/) to learn how to pack a ServiceMonitor with your application. + +### Step 3: Visualize Metrics + +Around two minutes, the KubeSphere monitoring engine starts to scape and store metrics. Then you can use PromQL to query metrics and design panels and dashboards. + +Please read [Querying](../visualization/querying/) to learn how to write a PromQL expression. For dashboard features, please read [Visualization](../visualization/overview/). diff --git a/content/en/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/_index.md b/content/en/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/_index.md new file mode 100644 index 000000000..b5f6f3290 --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Visualization" +weight: 10814 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/overview.md b/content/en/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/overview.md new file mode 100644 index 000000000..51ad6ee56 --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/overview.md @@ -0,0 +1,71 @@ +--- +title: "Monitoring Dashboard — Overview" +keywords: 'monitoring, prometheus, prometheus operator' +description: 'Understand the general steps of creating a monitoring dashboard as well as its layout.' +linkTitle: "Overview" +weight: 10815 +--- + +This section introduces monitoring dashboard features. You will learn how to visualize metric data in KubeSphere for your custom apps. If you do not know how to integrate your app metrics into the KubeSphere monitoring system, read [Introduction](../../introduction/) first. + +## Create a Monitoring Dashboard + +To create new dashboards for your app metrics, navigate to **Custom Monitoring** under **Monitoring & Alerting** in a project. There are three ways to create monitoring dashboards: built-in templates, blank templates for customization and YAML files. + +Built-in templates include MySQL, Elasticsearch, Redis, and more. These templates are for demonstration purposes and are updated with KubeSphere releases. Besides, you can choose to customize monitoring dashboards. + +A KubeSphere custom monitoring dashboard can be seen as simply a YAML configuration file. The data model is heavily inspired by [Grafana](https://github.com/grafana/grafana), an open-source tool for monitoring and observability. Please find KubeSphere monitoring dashboard data model design in [kubesphere/monitoring-dashboard](https://github.com/kubesphere/monitoring-dashboard). The configuration file is portable and sharable. You are welcome to contribute dashboard templates to the KubeSphere community via [Monitoring Dashboards Gallery](https://github.com/kubesphere/monitoring-dashboard/tree/master/contrib/gallery). + +### From a built-in template + +To help you quickly get started, KubeSphere provides built-in templates for MySQL, Elasticsearch, and Redis. If you want to create dashboards from built-in templates, select a template and then click **Next**. + +### From a blank template + +To start with a blank template, click **Next**. + +### From a YAML file + +Turn on **Edit YAML** in the upper-right corner and then paste your dashboard YAML file. + +## Dashboard Layout + +The monitoring dashboard is composed of four parts. Global settings are on the top of the page. The left-most column is for text-based charts showing the current value of metrics. The middle column contains chart collections for visualizing metrics over a specific period. The right-most column presents detailed information in charts. + +### Top bar + +On the top bar, you can configure the following settings: title, theme, time range, and refresh interval. + +### Text chart column + +You can add new text charts in the left-most column. + +### Chart display column + +You can view charts in the middle column. + +### Detail column + +You can view chart details in the right-most column. It shows the **max**, **min**, **avg**, and **last** value of metrics within the specific period. + +## Edit the monitoring dashboard + +You can modify an existing template by clicking **Edit Template** in the upper-right corner. + +### Add a chart + +To add text charts, click ➕ in the left column. To add charts in the middle column, click **Add Monitoring Item** in the lower-right corner. + +### Add a monitoring group + +To group monitoring items, you can click icon to drag and drop an item into the target group. To add a new group, click **Add Monitoring Group**. If you want to change the place of a group, hover over a group and click or arrow on the right. + +{{< notice note >}} + +The place of group on the right is consistent with the place of charts in the middle. In other words, if you change the order of groups, the place of their respective charts will also change accordingly. + +{{}} + +## Dashboard Templates + +Find and share dashboard templates in [Monitoring Dashboards Gallery](https://github.com/kubesphere/monitoring-dashboard/tree/master/contrib/gallery). It is a place for KubeSphere community users to contribute their masterpieces. diff --git a/content/en/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/panel.md b/content/en/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/panel.md new file mode 100644 index 000000000..1dd9703d8 --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/panel.md @@ -0,0 +1,34 @@ +--- +title: "Charts" +keywords: 'monitoring, Prometheus, Prometheus Operator' +description: 'Explore dashboard properties and chart metrics.' +linkTitle: "Charts" +weight: 10816 +--- + +KubeSphere currently supports two kinds of charts: text charts and graphs. + +## Text Chart + +A text chart is preferable for displaying a single metric value. The editing window for the text chart is composed of two parts. The upper part displays the real-time metric value, and the lower part is for editing. You can enter a PromQL expression to fetch a single metric value. + +- **Chart Name**: The name of the text chart. +- **Unit**: The metric data unit. +- **Decimal Places**: Accept an integer. +- **Monitoring Metric**: Specify a monitoring metric from the drop-down list of available Prometheus metrics. + +## Graph Chart + +A graph chart is preferable for displaying multiple metric values. The editing window for the graph is composed of three parts. The upper part displays real-time metric values. The left part is for setting the graph theme. The right part is for editing metrics and chart descriptions. + +- **Chart Types**: Support basic charts and bar charts. +- **Graph Types**: Support basic charts and stacked charts. +- **Chart Colors**: Change line colors. +- **Chart Name**: The name of the chart. +- **Description**: The chart description. +- **Add**: Add a new query editor. +- **Metric Name**: Legend for the line. It supports variables. For example, `{{pod}}` means using the value of the Prometheus metric label `pod` to name this line. +- **Interval**: The step value between two data points. +- **Monitoring Metric**: A list of available Prometheus metrics. +- **Unit**: The metric data unit. +- **Decimal Places**: Accept an integer. diff --git a/content/en/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/querying.md b/content/en/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/querying.md new file mode 100644 index 000000000..c9f6f40d4 --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/querying.md @@ -0,0 +1,13 @@ +--- +title: "Querying" +keywords: 'monitoring, Prometheus, Prometheus Operator, querying' +description: 'Learn how to specify monitoring metrics.' +linkTitle: "Querying" +weight: 10817 +--- + +In the query editor, enter PromQL expressions in **Monitoring Metrics** to process and fetch metrics. To learn how to write PromQL, read [Query Examples](https://prometheus.io/docs/prometheus/latest/querying/examples/). + +![query-editor-1](/images/docs/v3.3/project-user-guide/custom-application-monitoring/visualization/querying/query-editor-1.png) + +![query-editor-2](/images/docs/v3.3/project-user-guide/custom-application-monitoring/visualization/querying/query-editor-2.png) \ No newline at end of file diff --git a/content/en/docs/v3.4/project-user-guide/grayscale-release/_index.md b/content/en/docs/v3.4/project-user-guide/grayscale-release/_index.md new file mode 100644 index 000000000..f86106d9d --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/grayscale-release/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Grayscale Release" +weight: 10500 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/project-user-guide/grayscale-release/blue-green-deployment.md b/content/en/docs/v3.4/project-user-guide/grayscale-release/blue-green-deployment.md new file mode 100644 index 000000000..4f5abe9a4 --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/grayscale-release/blue-green-deployment.md @@ -0,0 +1,74 @@ +--- +title: "Kubernetes Blue-Green Deployment on Kubesphere" +keywords: 'KubeSphere, Kubernetes, Service Mesh, Istio, Grayscale Release, Blue-Green deployment' +description: 'Learn how to release a blue-green deployment on KubeSphere.' +linkTitle: "Blue-Green Deployment with Kubernetes" +weight: 10520 +--- + + +The blue-green release provides a zero downtime deployment, which means the new version can be deployed with the old one preserved. At any time, only one of the versions is active serving all the traffic, while the other one remains idle. If there is a problem with running, you can quickly roll back to the old version. + +![blue-green-0](/images/docs/v3.3/project-user-guide/grayscale-release/blue-green-deployment/blue-green-0.png) + + +## Prerequisites + +- You need to enable [KubeSphere Service Mesh](../../../pluggable-components/service-mesh/). +- You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). +- You need to enable **Application Governance** and have an available app so that you can implement the blue-green deployment for it. The sample app used in this tutorial is Bookinfo. For more information, see [Deploy Bookinfo and Manage Traffic](../../../quick-start/deploy-bookinfo-to-k8s/). + +## Create a Blue-green Deployment Job + +1. Log in to KubeSphere as `project-regular` and go to **Grayscale Release**. Under **Release Modes**, click **Create** on the right of **Blue-Green Deployment**. + +2. Set a name for it and click **Next**. + +3. On the **Service Settings** tab, select your app from the drop-down list and the Service for which you want to implement the blue-green deployment. If you also use the sample app Bookinfo, select **reviews** and click **Next**. + +4. On the **New Version Settings** tab, add another version (e.g `kubesphere/examples-bookinfo-reviews-v2:1.16.2`) as shown in the following figure and click **Next**. + +5. On the **Strategy Settings** tab, to allow the app version `v2` to take over all the traffic, select **Take Over** and click **Create**. + +6. The blue-green deployment job created is displayed under the **Release Jobs** tab. Click it to view details. + +7. Wait for a while and you can see all the traffic go to the version `v2`. + +8. The new **Deployment** is created as well. + +9. You can get the virtual service to identify the weight by running the following command: + + ```bash + kubectl -n demo-project get virtualservice -o yaml + ``` + + {{< notice note >}} + + - When you run the command above, replace `demo-project` with your own project (namely, namespace) name. + - If you want to run the command from the web kubectl on the KubeSphere console, you need to use the user `admin`. + + {{}} + +10. Expected output: + + ```yaml + ... + spec: + hosts: + - reviews + http: + - route: + - destination: + host: reviews + port: + number: 9080 + subset: v2 + weight: 100 + ... + ``` + +## Take a Job Offline + +After you implement the blue-green deployment, and the result meets your expectation, you can take the task offline with the version `v1` removed by clicking **Delete**. + + diff --git a/content/en/docs/v3.4/project-user-guide/grayscale-release/canary-release.md b/content/en/docs/v3.4/project-user-guide/grayscale-release/canary-release.md new file mode 100644 index 000000000..0aaa85250 --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/grayscale-release/canary-release.md @@ -0,0 +1,120 @@ +--- +title: "Canary Release" +keywords: 'KubeSphere, Kubernetes, Canary Release, Istio, Service Mesh' +description: 'Learn how to deploy a canary service on KubeSphere.' +linkTitle: "Canary Release" +weight: 10530 +--- + +On the back of [Istio](https://istio.io/), KubeSphere provides users with necessary control to deploy canary services. In a canary release, you introduce a new version of a service and test it by sending a small percentage of traffic to it. At the same time, the old version is responsible for handling the rest of the traffic. If everything goes well, you can gradually increase the traffic sent to the new version, while simultaneously phasing out the old version. In the case of any occurring issues, KubeSphere allows you to roll back to the previous version as you change the traffic percentage. + +This method serves as an efficient way to test performance and reliability of a service. It can help detect potential problems in the actual environment while not affecting the overall system stability. + +![canary-release-0](/images/docs/v3.3/project-user-guide/grayscale-release/canary-release/canary-release-0.png) + +## Prerequisites + +- You need to enable [KubeSphere Service Mesh](../../../pluggable-components/service-mesh/). +- You need to enable [KubeSphere Logging](../../../pluggable-components/logging/) so that you can use the Tracing feature. +- You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). +- You need to enable **Application Governance** and have an available app so that you can implement the canary release for it. The sample app used in this tutorial is Bookinfo. For more information, see [Deploy and Access Bookinfo](../../../quick-start/deploy-bookinfo-to-k8s/). + +## Step 1: Create a Canary Release Job + +1. Log in to KubeSphere as `project-regular` and navigate to **Grayscale Release**. Under **Release Modes**, click **Create** on the right of **Canary Release**. + +2. Set a name for it and click **Next**. + +3. On the **Service Settings** tab, select your app from the drop-down list and the Service for which you want to implement the canary release. If you also use the sample app Bookinfo, select **reviews** and click **Next**. + +4. On the **New Version Settings** tab, add another version of it (e.g `kubesphere/examples-bookinfo-reviews-v2:1.16.2`; change `v1` to `v2`) and click **Next**. + +5. You send traffic to these two versions (`v1` and `v2`) either by a specific percentage or by the request content such as `Http Header`, `Cookie` and `URI`. Select **Specify Traffic Distribution** and move the slider to the middle to change the percentage of traffic sent to these two versions respectively (for example, set 50% for either one). When you finish, click **Create**. + +## Step 2: Verify the Canary Release + +Now that you have two available app versions, access the app to verify the canary release. + +1. Visit the Bookinfo website and refresh your browser repeatedly. You can see that the **Book Reviews** section switching between v1 and v2 at a rate of 50%. + +2. The created canary release job is displayed under the tab **Release Jobs**. Click it to view details. + +3. You can see half of the traffic goes to each of them. + +4. The new Deployment is created as well. + +5. You can directly get the virtual Service to identify the weight by executing the following command: + + ```bash + kubectl -n demo-project get virtualservice -o yaml + ``` + + {{< notice note >}} + + - When you execute the command above, replace `demo-project` with your own project (namely, namespace) name. + - If you want to execute the command from the web kubectl on the KubeSphere console, you need to use the user `admin`. + + {{}} + +6. Expected output: + + ```bash + ... + spec: + hosts: + - reviews + http: + - route: + - destination: + host: reviews + port: + number: 9080 + subset: v1 + weight: 50 + - destination: + host: reviews + port: + number: 9080 + subset: v2 + weight: 50 + ... + ``` + +## Step 3: View Network Topology + +1. Execute the following command on the machine where KubeSphere runs to bring in real traffic to simulate the access to Bookinfo every 0.5 seconds. + + ```bash + watch -n 0.5 "curl http://productpage.demo-project.192.168.0.2.nip.io:32277/productpage?u=normal" + ``` + + {{< notice note >}} + Make sure you replace the hostname and port number in the above command with your own. + {{}} + +2. In **Traffic Monitoring**, you can see communications, dependency, health and performance among different microservices. + +3. Click a component (for example, **reviews**) and you can see the information of traffic monitoring on the right, displaying real-time data of **Traffic**, **Success rate**, and **Duration**. + +## Step 4: View Tracing Details + +KubeSphere provides the distributed tracing feature based on [Jaeger](https://www.jaegertracing.io/), which is used to monitor and troubleshoot microservices-based distributed applications. + +1. On the **Tracing** tab, you can see all phases and internal calls of requests, as well as the period in each phase. + +2. Click any item, and you can even drill down to see request details and where this request is being processed (which machine or container). + +## Step 5: Take Over All Traffic + +If everything runs smoothly, you can bring all the traffic to the new version. + +1. In **Release Jobs**, click the canary release job. + +2. In the displayed dialog box, click icon on the right of **reviews v2** and select **Take Over**. It means 100% of the traffic will be sent to the new version (v2). + + {{< notice note >}} + If anything goes wrong with the new version, you can roll back to the previous version v1 anytime. + {{}} + +3. Access Bookinfo again and refresh the browser several times. You can find that it only shows the result of **reviews v2** (i.e. ratings with black stars). + diff --git a/content/en/docs/v3.4/project-user-guide/grayscale-release/overview.md b/content/en/docs/v3.4/project-user-guide/grayscale-release/overview.md new file mode 100644 index 000000000..cf48a86f9 --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/grayscale-release/overview.md @@ -0,0 +1,39 @@ +--- +title: "Grayscale Release — Overview" +keywords: 'Kubernetes, KubeSphere, grayscale release, overview, service mesh' +description: 'Understand the basic concept of grayscale release.' +linkTitle: "Overview" +weight: 10510 +--- + +Modern, cloud-native applications are often composed of a group of independently deployable components, also known as microservices. In a microservices architecture, developers are able to make adjustments to their apps with great flexibility as they do not affect the network of services each performing a specific function. This kind of network of microservices making up an application is also called **service mesh**. + +A KubeSphere service mesh, built on the open-source project of [Istio](https://istio.io/), controls how different parts of an app interact with one another. Among others, grayscale release strategies represent an important part for users to test and release new app versions without affecting the communication among microservices. + +## Grayscale Release Strategies + +A grayscale release in KubeSphere ensures smooth transition as you upgrade your apps to a new version. The specific strategy adopted may be different but the ultimate goal is the same - identify potential problems in advance without affecting your apps running in the production environment. This not only minimizes risks of a version upgrade but also tests the performance of new app builds. + +KubeSphere provides users with three grayscale release strategies. + +### [Blue-green Deployment](../blue-green-deployment/) + +A blue-green deployment provides an efficient method of releasing new versions with zero downtime and outages as it creates an identical standby environment where the new app version runs. With this approach, KubeSphere routes all the traffic to either version. Namely, only one environment is live at any given time. In the case of any issues with the new build, it allows you to immediately roll back to the previous version. + +### [Canary Release](../canary-release/) + +A canary deployment reduces the risk of version upgrades to a minimum as it slowly rolls out changes to a small subset of users. More specifically, you have the option to expose a new app version to a portion of production traffic, which is defined by yourself on the highly responsive dashboard. Besides, KubeSphere gives you a visualized view of real-time traffic as it monitors requests after you implement a canary deployment. During the process, you can analyze the behavior of the new app version and choose to gradually increase the percentage of traffic sent to it. Once you are confident of the build, you can route all the traffic to it. + +### [Traffic Mirroring](../traffic-mirroring/) + +Traffic mirroring copies live production traffic and sends it to a mirrored service. By default, KubeSphere mirrors all the traffic while you can also manually define the percentage of traffic to be mirrored by specifying a value. Common use cases include: + +- Test new app versions. You can compare the real-time output of mirrored traffic and production traffic. +- Test clusters. You can use production traffic of instances for cluster testing. +- Test databases. You can use an empty database to store and load data. + +{{< notice note >}} + +The current KubeSphere version does not support grayscale release strategies for multi-cluster apps. + +{{}} diff --git a/content/en/docs/v3.4/project-user-guide/grayscale-release/traffic-mirroring.md b/content/en/docs/v3.4/project-user-guide/grayscale-release/traffic-mirroring.md new file mode 100644 index 000000000..7d7568fd4 --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/grayscale-release/traffic-mirroring.md @@ -0,0 +1,81 @@ +--- +title: "Traffic Mirroring" +keywords: 'KubeSphere, Kubernetes, Traffic Mirroring, Istio' +description: 'Learn how to conduct a traffic mirroring job on KubeSphere.' +linkTitle: "Traffic Mirroring" +weight: 10540 +--- + +Traffic mirroring, also called shadowing, is a powerful, risk-free method of testing your app versions as it sends a copy of live traffic to a service that is being mirrored. Namely, you implement a similar setup for acceptance test so that problems can be detected in advance. As mirrored traffic happens out of band of the critical request path for the primary service, your end users will not be affected during the whole process. + +## Prerequisites + +- You need to enable [KubeSphere Service Mesh](../../../pluggable-components/service-mesh/). +- You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). +- You need to enable **Application Governance** and have an available app so that you can mirror the traffic of it. The sample app used in this tutorial is Bookinfo. For more information, see [Deploy Bookinfo and Manage Traffic](../../../quick-start/deploy-bookinfo-to-k8s/). + +## Create a Traffic Mirroring Job + +1. Log in to KubeSphere as `project-regular` and go to **Grayscale Release**. Under **Release Modes**, click **Create** on the right of **Traffic Mirroring**. + +2. Set a name for it and click **Next**. + +3. On the **Service Settings** tab, select your app from the drop-down list and the Service of which you want to mirror the traffic. If you also use the sample app Bookinfo, select **reviews** and click **Next**. + +4. On the **New Version Settings** tab, add another version of it (for example, `kubesphere/examples-bookinfo-reviews-v2:1.16.2`; change `v1` to `v2`) and click **Next**. + +5. On the **Strategy Settings** tab, click **Create**. + +6. The traffic mirroring job created is displayed under the **Release Jobs** tab. Click it to view details. + +7. You can see the traffic is being mirrored to `v2` with real-time traffic displayed in the line chart. + +8. The new **Deployment** is created as well. + +9. You can get the virtual service to view `mirror` and `weight` by running the following command: + + ```bash + kubectl -n demo-project get virtualservice -o yaml + ``` + + {{< notice note >}} + + - When you run the command above, replace `demo-project` with your own project (namely, namespace) name. + - If you want to run the command from the web kubectl on the KubeSphere console, you need to use the user `admin`. + + {{}} + +10. Expected output: + + ```bash + ... + spec: + hosts: + - reviews + http: + - route: + - destination: + host: reviews + port: + number: 9080 + subset: v1 + weight: 100 + mirror: + host: reviews + port: + number: 9080 + subset: v2 + ... + ``` + + This route rule sends 100% of the traffic to `v1`. The `mirror` field specifies that you want to mirror to the service `reviews v2`. When traffic gets mirrored, the requests are sent to the mirrored service with their Host/Authority headers appended with `-shadow`. For example, `cluster-1` becomes `cluster-1-shadow`. + + {{< notice note >}} + +These requests are mirrored as “fire and forget”, which means that the responses are discarded. You can specify the `weight` field to mirror a fraction of the traffic, instead of mirroring all requests. If this field is absent, for compatibility with older versions, all traffic will be mirrored. For more information, see [Mirroring](https://istio.io/v1.5/pt-br/docs/tasks/traffic-management/mirroring/). + +{{}} + +## Take a Job Offline + +You can remove the traffic mirroring job by clicking **Delete**, which does not affect the current app version. diff --git a/content/en/docs/v3.4/project-user-guide/image-builder/_index.md b/content/en/docs/v3.4/project-user-guide/image-builder/_index.md new file mode 100644 index 000000000..d10a9e339 --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/image-builder/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Image Builder" +weight: 10600 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/project-user-guide/image-builder/binary-to-image.md b/content/en/docs/v3.4/project-user-guide/image-builder/binary-to-image.md new file mode 100644 index 000000000..63b377a2b --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/image-builder/binary-to-image.md @@ -0,0 +1,141 @@ +--- +title: "Binary to Image: Publish an Artifact to Kubernetes" +keywords: "KubeSphere, Kubernetes, Docker, B2I, Binary-to-Image" +description: "Use B2I to import an artifact and push it to a target repository." +linkTitle: "Binary to Image: Publish an Artifact to Kubernetes" +weight: 10620 +--- + +Binary-to-Image (B2I) is a toolkit and workflow for building reproducible container images from binary executables such as Jar, War, and binary packages. More specifically, you upload an artifact and specify a target repository such as Docker Hub or Harbor where you want to push the image. If everything runs successfully, your image will be pushed to the target repository and your application will be automatically deployed to Kubernetes if you create a Service in the workflow. + +In a B2I workflow, you do not need to write any Dockerfile. This not only reduces learning costs but improves release efficiency, which allows users to focus more on business. + +This tutorial demonstrates two different ways to build an image based on an artifact in a B2I workflow. Ultimately, the image will be released to Docker Hub. + +For demonstration and testing purposes, here are some example artifacts you can use to implement the B2I workflow: + +| Artifact Package | GitHub Repository | +| ------------------------------------------------------------ | ------------------------------------------------------------ | +| [b2i-war-java8.war](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-war-java8.war) | [spring-mvc-showcase](https://github.com/spring-projects/spring-mvc-showcase) | +| [b2i-war-java11.war](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-war-java11.war) | [springmvc5](https://github.com/kubesphere/s2i-java-container/tree/master/tomcat/examples/springmvc5) | +| [b2i-binary](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-binary) | [devops-go-sample](https://github.com/runzexia/devops-go-sample) | +| [b2i-jar-java11.jar](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-jar-java11.jar) | [ java-maven-example](https://github.com/kubesphere/s2i-java-container/tree/master/java/examples/maven) | +| [b2i-jar-java8.jar](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-jar-java8.jar) | [devops-maven-sample](https://github.com/kubesphere/devops-maven-sample) | + +## Prerequisites + +- You have enabled the [KubeSphere DevOps System](../../../pluggable-components/devops/). +- You need to create a [Docker Hub](https://www.dockerhub.com/) account. GitLab and Harbor are also supported. +- You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). +- Set a CI dedicated node for building images. This is not mandatory but recommended for the development and production environment as it caches dependencies and reduces build time. For more information, see [Set a CI Node for Dependency Caching](../../../devops-user-guide/how-to-use/devops-settings/set-ci-node/). + +## Create a Service Using Binary-to-Image (B2I) + +The steps below show how to upload an artifact, build an image and release it to Kubernetes by creating a Service in a B2I workflow. + +![service-build](/images/docs/v3.3/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/service-build.png) + +### Step 1: Create a Docker Hub Secret + +You must create a Docker Hub Secret so that the Docker image created through B2I can be push to Docker Hub. Log in to KubeSphere as `project-regular`, go to your project and create a Secret for Docker Hub. For more information, see [Create the Most Common Secrets](../../../project-user-guide/configuration/secrets/#create-the-most-common-secrets). + +### Step 2: Create a Service + +1. In the same project, navigate to **Services** under **Application Workloads** and click **Create**. + +2. Scroll down to **Create Service from Artifact** and select **WAR**. This tutorial uses the [spring-mvc-showcase](https://github.com/spring-projects/spring-mvc-showcase) project as a sample and uploads a war artifact to KubeSphere. Set a name, such as `b2i-war-java8`, and click **Next**. + +3. On the **Build Settings** page, provide the following information accordingly and click **Next**. + + **Service Type**: Select **Stateless Service** for this example. For more information about different Services, see [Service Type](../../../project-user-guide/application-workloads/services/#service-type). + + **Artifact File**: Upload the war artifact ([b2i-war-java8](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-war-java8.war)). + + **Build Environment**: Select **kubesphere/tomcat85-java8-centos7:v2.1.0**. + + **Image Name**: Enter `/` or `/` as the image name. + + **Image Tag**: The image tag. Enter `latest`. + + **Target Image Registry**: Select the Docker Hub Secret as the image is pushed to Docker Hub. + +4. On the **Pod Settings** page, scroll down to **Port Settings** to set the access policy for the container. Select **HTTP** for **Protocol**, customize the name (for example, `http-port`), and enter `8080` for both **Container Port** and **Service Port**. Click **Next** to continue. + + {{< notice note >}} + + For more information about how to set other parameters on the **Container Settings** page, see [Pod Settings](../../../project-user-guide/application-workloads/container-image-settings/). + + {{}} + +5. On the **Storage Settings** page, you can add a volume for the container. For more information, see [Volumes](../../../project-user-guide/storage/volumes/). Click **Next** to continue. + +6. On the **Advanced Settings** page, select **External Access** and choose **NodePort** as the access method. Click **Create** to finish the whole process. + +7. Click **Image Builders** from the navigation bar and you can see that the example image is being built. + +### Step 3: Check results + +1. Wait for a while and you can see the status of the image builder has reached **Successful**. + +2. Click this image to go to its details page. Under **Job Records**, click icon on the right of a record to see building logs. You can see `Build completed successfully` at the end of the log if everything runs normally. + +3. Go back to the **Services**, **Deployments**, and **Jobs** page, and you can see the corresponding Service, Deployment, and Job of the image have been all created successfully. + +4. In your Docker Hub repository, you can see that KubeSphere has pushed the image to the repository with the expected tag. + +### Step 4: Access the B2I Service + +1. On the **Services** page, click the B2I Service to go to its details page, where you can see the port number has been exposed. + +2. Access the Service at `http://://`. + + {{< notice note >}} + + You may need to open the port in your security groups and configure port forwarding rules depending on your deployment environment. + + {{}} + +## Use the Image Builder to build an image + +The example above implements the entire workflow of B2I by creating a Service. Alternatively, you can use the Image Builder directly to build an image based on an artifact while this method will not publish the image to Kubernetes. + +![build-binary](/images/docs/v3.3/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/build-binary.png) + +{{< notice note >}} + +Make sure you have created a Secret for Docker Hub. For more information, see [Create the Most Common Secrets](../../../project-user-guide/configuration/secrets/#create-the-most-common-secrets). + +{{}} + +### Step 1: Upload an artifact + +1. Log in to KubeSphere as `project-regular` and go to your project. + +2. Select **Image Builders** from the navigation bar and click **Create**. + +3. In the displayed dialog box, select **Binary** and click **Next**. + +4. On the **Build Settings** page, provide the following information accordingly and click **Create**. + + **Artifact File**: Download [b2i-binary](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-binary) and upload it to KubeSphere. + + **Build Environment**: Select **kubesphere/s2i-binary:v2.1.0**. + + **Image Name**: Customize an image name. + + **Image Tag**: The image tag. Enter `latest`. + + **Target Image Registry**: Select the Docker Hub Secret as the image is pushed to Docker Hub. + +5. On the **Image Builder** page, you can see that the image is being built. + +### Step 2: Check results + +1. Wait for a while and you can see the status of the image builder has reached **Successful**. + +2. Click this image builder to go to its details page. Under **Job Records**, click icon on the right of a record to see building logs. You can see `Build completed successfully` at the end of the log if everything runs normally. + +3. Go to the **Jobs** page, and you can see the corresponding Job of the image has been created successfully. + +4. In your Docker Hub repository, you can see that KubeSphere has pushed the image to the repository with the expected tag. + diff --git a/content/en/docs/v3.4/project-user-guide/image-builder/s2i-and-b2i-webhooks.md b/content/en/docs/v3.4/project-user-guide/image-builder/s2i-and-b2i-webhooks.md new file mode 100644 index 000000000..3b89fba20 --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/image-builder/s2i-and-b2i-webhooks.md @@ -0,0 +1,81 @@ +--- +title: "Configure S2I and B2I Webhooks" +keywords: 'KubeSphere, Kubernetes, S2I, Source-to-Image, B2I, Binary-to-Image, Webhook' +description: 'Learn how to configure S2I and B2I webhooks.' +linkTitle: "Configure S2I and B2I Webhooks" +weight: 10650 +--- + +KubeSphere provides Source-to-Image (S2I) and Binary-to-Image (B2I) features to automate image building and pushing and application deployment. In KubeSphere v3.1.x and later versions, you can configure S2I and B2I webhooks so that your Image Builder can be automatically triggered when there is any relevant activity in your code repository. + +This tutorial demonstrates how to configure S2I and B2I webhooks. + +## Prerequisites + +- You need to enable the [KubeSphere DevOps System](../../../pluggable-components/devops/). +- You need to create a workspace, a project (`demo-project`) and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create an S2I Image Builder and a B2I Image Builder. For more information, refer to [Source to Image: Publish an App without a Dockerfile](../source-to-image/) and [Binary to Image: Publish an Artifact to Kubernetes](../binary-to-image/). + +## Configure an S2I Webhook + +### Step 1: Expose the S2I trigger Service + +1. Log in to the KubeSphere web console as `admin`. Click **Platform** in the upper-left corner and then select **Cluster Management**. + +2. In **Services** under **Application Workloads**, select **kubesphere-devops-system** from the drop-down list and click **s2ioperator-trigger-service** to go to its details page. + +3. Click **More** and select **Edit External Access**. + +4. In the displayed dialog box, select **NodePort** from the drop-down list for **Access Method** and then click **OK**. + + {{< notice note >}} + + This tutorial selects **NodePort** for demonstration purposes. You can also select **LoadBalancer** based on your needs. + + {{}} + +5. You can view the **NodePort** on the details page. It is going to be included in the S2I webhook URL. + +### Step 2: Configure an S2I webhook + +1. Log out of KubeSphere and log back in as `project-regular`. Go to `demo-project`. + +2. In **Image Builders**, click the S2I Image Builder to go to its details page. + +3. You can see an auto-generated link shown in **Remote Trigger**. Copy `/s2itrigger/v1alpha1/general/namespaces/demo-project/s2ibuilders/felixnoo-s2i-sample-latest-zhd/` as it is going to be included in the S2I webhook URL. + +4. Log in to your GitHub account and go to the source code repository used for the S2I Image Builder. Go to **Webhooks** under **Settings** and then click **Add webhook**. + +5. In **Payload URL**, enter `http://:/s2itrigger/v1alpha1/general/namespaces/demo-project/s2ibuilders/felixnoo-s2i-sample-latest-zhd/`. You can select trigger events based on your needs and then click **Add webhook**. This tutorial selects **Just the push event** for demonstration purposes. + + {{< notice note >}} + + `` is your own IP address, `` is the NodePort you get in step 1, and `/s2itrigger/v1alpha1/general/namespaces/demo-project/s2ibuilders/felixnoo-s2i-sample-latest-zhd/` is from the S2I remote trigger link. Make sure you use your own IP, Service NodePort and S2I remote trigger link. You may also need to configure necessary port forwarding rules and open the port in your security groups depending on where your Kubernetes cluster is deployed. + + {{}} + +6. Once the webhook is added, you can click the webhook to view delivery details in **Recent Deliveries**. You can see a green tick if the Payload URL is valid. + +7. After you finish all the above operations, the S2I image builder will be automatically triggered if there is a push event to the source code repository. + +## Configure a B2I Webhook + +You can follow the same steps to configure a B2I webhook. + +1. Expose the S2I trigger Service. + +2. View the **Remote Trigger** on the details page of your B2I image builder. + +3. Add the payload URL in the source code repository. The B2I payload URL format is the same as that of S2I payload URL. + + {{< notice note >}} + + You may need to configure necessary port forwarding rules and open the port in your security groups depending on where your Kubernetes cluster is deployed. + + {{}} + +4. The B2I Image Builder will be automatically triggered if there is a relevant event to the source code repository. + + + + diff --git a/content/en/docs/v3.4/project-user-guide/image-builder/s2i-introduction.md b/content/en/docs/v3.4/project-user-guide/image-builder/s2i-introduction.md new file mode 100644 index 000000000..329670856 --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/image-builder/s2i-introduction.md @@ -0,0 +1,39 @@ +--- +title: "S2I Workflow and Logic" +keywords: 'KubeSphere, Kubernetes, Docker, S2I, Source-to-Image' +description: 'Understand how S2I works and why it works in the expected way.' +linkTitle: "S2I Workflow and Logic" +weight: 10630 +--- + +Source-to-Image (S2I) is an automation tool for building images from source code. S2I injects source code into an Image Builder for compiling and then automatically packages the compiled code into a Docker image. + +For more information about how to use S2I in KubeSphere, refer to [Source to Image: Publish an App without a Dockerfile](../source-to-image/). Besides, you can refer to the code repositories [S2IOperator](https://github.com/kubesphere/s2ioperator#source-to-image-operator) and [S2IRun](https://github.com/kubesphere/s2irun#s2irun) for more details. + +## S2I Workflow and Logic + +### Image Builder + +For interpreted languages like Python and Ruby, the build-time and runtime environments for an application are typically the same. For example, a Ruby-based Image Builder usually contains Bundler, Rake, Apache, GCC, and other packages needed to set up a runtime environment. The following diagram describes the build workflow. + +![s2i-builder](/images/docs/v3.3/project-user-guide/image-builder/s2i-intro/s2i-builder.png) + +### How S2I works + +S2I performs the following steps: + +1. Start a container from the Image Builder with the application source code injected into a specified directory. +2. Execute the `assemble` script from the Image Builder to build that source code into a ready-to-run application by installing dependencies and moving the source code into a working directory. +3. Set the `run` script provided by the Image Builder as the image entrypoint for starting the container, and then commit a new image as the application image to meet user needs. + +See the S2I workflow chart as below. + +![s2i-flow](/images/docs/v3.3/project-user-guide/image-builder/s2i-intro/s2i-flow.png) + +### Runtime Image + +For compiled languages like Go, C, C++, or Java, the dependencies necessary for compiling will increase the size of resulting images. To build slimmer images, S2I uses a phased build workflow with unnecessary files removed from images. An artifact, which is an executable like a Jar file or binary file, will be extracted when building finishes in the Image Builder, and then injected into a Runtime Image for execution. + +See the building workflow as below. + +![s2i-runtime-build](/images/docs/v3.3/project-user-guide/image-builder/s2i-intro/s2i-runtime-build.png) \ No newline at end of file diff --git a/content/en/docs/v3.4/project-user-guide/image-builder/s2i-templates.md b/content/en/docs/v3.4/project-user-guide/image-builder/s2i-templates.md new file mode 100644 index 000000000..d013fcdd8 --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/image-builder/s2i-templates.md @@ -0,0 +1,324 @@ +--- +title: "Customize S2I Templates" +keywords: 'KubeSphere, Kubernetes, Docker, S2I, Source-to-Image' +description: 'Customize S2I templates and understand different template parameters.' +linkTitle: "Customize S2I Templates" +weight: 10640 +--- + +Once you have understood the workflow and logic of Source-to-Image (S2I), you can customize Image Builder templates (i.e. S2I/B2I templates) based on your projects to extend S2I capabilities. KubeSphere provides several common Image Builder templates based on different languages, such as [Python](https://github.com/kubesphere/s2i-python-container/) and [Java](https://github.com/kubesphere/s2i-java-container/). + +This tutorial demonstrates how to create an Image Builder that contains an NGINX service. If you need to use runtime images in your project, refer to [this document](https://github.com/kubesphere/s2irun/blob/master/docs/runtime_image.md) for more information about how to create a runtime image. + +## Prerequisites + +S2I template customization can be divided into two parts. + +- Part 1: S2I Image Builder customization + - assemble (required): the `assemble` script that builds application artifacts from source code. + - run (required): the `run` script that executes an application. + - save-artifacts (optional): the `save-artifacts` script that manages all dependencies in an incremental building process. + - usage (optional): the script that provides instructions. + - test (optional): the script for testing. +- Part 2: definition of S2I template + +You need to have the required elements for S2I template customization ready in advance. + +{{< notice note >}} + +The Image Builder is compatible with that of OpenShift, and you can reuse it in KubeSphere. For more information about S2I Image Builders, refer to [S2IRun](https://github.com/kubesphere/s2irun/blob/master/docs/builder_image.md#s2i-builder-image-requirements). + +{{}} + +## Create an Image Builder + +### Step 1: Prepare an S2I directory + +1. The [S2I command line tool](https://github.com/openshift/source-to-image/releases) provides an easy-to-use command to initialize a base directory structure required by the Builder. Run the following commands to install the S2I CLI. + + ```bash + $ wget https://github.com/openshift/source-to-image/releases/download/v1.1.14/source-to-image-v1.1.14-874754de-linux-386.tar.gz + $ tar -xvf source-to-image-v1.1.14-874754de-linux-386.tar.gz + $ ls + s2i source-to-image-v1.1.14-874754de-linux-386.tar.gz sti + $ cp s2i /usr/local/bin + ``` + +2. This tutorial uses `nginx-centos7` as the name of the Image Builder. Run the `s2i create` command to initialize the base directory structure. + + ```bash + s2i create nginx-centos7 s2i-builder-docs + ``` + +3. The directory structure is initialized as follows. + + ``` + s2i-builder-docs/ + Dockerfile - a standard Dockerfile to define the Image Builder + Makefile - a script for testing and building the Image Builder + test/ + run - a script that runs the application to test whether the Image Builder is working properly + test-app/ - directory of the test application + s2i/bin + assemble - a script that builds the application + run - a script that runs the application + usage - a script that prints the usage of the Image Builder + ``` + +### Step 2: Modify the Dockerfile + +A Dockerfile installs all of the necessary tools and libraries that are needed to build and run an application. This file will also copy the S2I scripts into the output image. + +Modify the Dockerfile as follows to define the Image Builder. + +#### Dockerfile + +``` +# nginx-centos7 +FROM kubespheredev/s2i-base-centos7:1 + +# Here you can specify the maintainer for the image that you're building +LABEL maintainer="maintainer name " + +# Define the current version of the application +ENV NGINX_VERSION=1.6.3 + +# Set the labels that are used for KubeSphere to describe the Image Builder. +LABEL io.k8s.description="Nginx Webserver" \ + io.k8s.display-name="Nginx 1.6.3" \ + io.kubesphere.expose-services="8080:http" \ + io.kubesphere.tags="builder,nginx,html" + +# Install the nginx web server package and clean the yum cache +RUN yum install -y epel-release && \ + yum install -y --setopt=tsflags=nodocs nginx && \ + yum clean all + +# Change the default port for nginx +RUN sed -i 's/80/8080/' /etc/nginx/nginx.conf +RUN sed -i 's/user nginx;//' /etc/nginx/nginx.conf + +# Copy the S2I scripts to /usr/libexec/s2i in the Image Builder +COPY ./s2i/bin/ /usr/libexec/s2i + +RUN chown -R 1001:1001 /usr/share/nginx +RUN chown -R 1001:1001 /var/log/nginx +RUN chown -R 1001:1001 /var/lib/nginx +RUN touch /run/nginx.pid +RUN chown -R 1001:1001 /run/nginx.pid +RUN chown -R 1001:1001 /etc/nginx + +USER 1001 + +# Set the default port for applications built using this image +EXPOSE 8080 + +# Modify the usage script in your application dir to inform the user how to run this image. +CMD ["/usr/libexec/s2i/usage"] +``` + +{{< notice note >}} + +S2I scripts will use the flags defined in the Dockerfile as parameters. If you need to use a base image different from those provided by KubeSphere, refer to [S2I Scripts](https://github.com/kubesphere/s2irun/blob/master/docs/builder_image.md#s2i-scripts). + +{{}} + +### Step 3: Create S2I Scripts + +1. Create an `assemble` script as follows to copy the configuration file and static contents to the target container. + + ```bash + #!/bin/bash -e + + if [[ "$1" == "-h" ]]; then + exec /usr/libexec/s2i/usage + fi + + echo "---> Building and installing application from source..." + if [ -f /tmp/src/nginx.conf ]; then + mv /tmp/src/nginx.conf /etc/nginx/nginx.conf + fi + + if [ "$(ls -A /tmp/src)" ]; then + mv /tmp/src/* /usr/share/nginx/html/ + fi + ``` + + {{< notice note >}} + + By default, `s2i build` places the application source code in `/tmp/src`. The above commands copy the application source code to the working directory `/opt/app-root/src` defined by `kubespheredev/s2i-base-centos7:1`. + + {{}} + +2. Create a `run` script as follows. In this tutorial, it only starts the `nginx` server. + + ```bash + #!/bin/bash -e + + exec /usr/sbin/nginx -g "daemon off;" + ``` + + {{< notice note >}} + + This tutorial uses the `exec` command to execute the host process of `nginx` server to let all signals sent from `docker` be received by `nginx` while `nginx` can use the standard input and output streams of the container. Besides, the `save-artifacts` script allows a new build to reuse content from a previous version of application image. The `save-artifacts` script can be deleted because this tutorial does not implement incremental building. + + {{}} + +3. Create a `usage` script as follows. It prints out instructions on how to use the image. + + ```bash + #!/bin/bash -e + cat < 48f8574c05df + Step 2/17 : LABEL maintainer="Runze Xia " + ---> Using cache + ---> d60ebf231518 + Step 3/17 : ENV NGINX_VERSION=1.6.3 + ---> Using cache + ---> 5bd34674d1eb + Step 4/17 : LABEL io.k8s.description="Nginx Webserver" io.k8s.display-name="Nginx 1.6.3" io.kubesphere.expose-services="8080:http" io.kubesphere.tags="builder,nginx,html" + ---> Using cache + ---> c837ad649086 + Step 5/17 : RUN yum install -y epel-release && yum install -y --setopt=tsflags=nodocs nginx && yum clean all + ---> Running in d2c8fe644415 + + ………… + ………… + ………… + + Step 17/17 : CMD ["/usr/libexec/s2i/usage"] + ---> Running in c24819f6be27 + Removing intermediate container c24819f6be27 + ---> c147c86f2cb8 + Successfully built c147c86f2cb8 + Successfully tagged kubespheredev/nginx-centos7-s2ibuilder-sample:latest + ``` + +3. With the Image Builder created, run the following command to create an application image. + + ```bash + $ s2i build ./test/test-app kubespheredev/nginx-centos7-s2ibuilder-sample:latest sample-app + ---> Building and installing application from source... + Build completed successfully + ``` + + {{< notice note >}} + + Following the logic defined in the `assemble` script, S2I creates an application image using the Image Builder as a base and injecting the source code from the `test/test-app` directory. + + {{}} + +4. Run the following command to run the application image. + + ```bash + docker run -p 8080:8080 sample-app + ``` + + You can access the Nginx application at `http://localhost:8080`. + +### Step 5: Push the image and create an S2I template + +Once you finish testing the S2I Image Builder locally, you can push the image to your custom image repository. You also need to create a YAML file as the S2I Builder template as follows. + +#### s2ibuildertemplate.yaml + +```yaml +apiVersion: devops.kubesphere.io/v1alpha1 +kind: S2iBuilderTemplate +metadata: + labels: + controller-tools.k8s.io: "1.0" + builder-type.kubesphere.io/s2i: "s2i" + name: nginx-demo +spec: + containerInfo: + - builderImage: kubespheredev/nginx-centos7-s2ibuilder-sample + codeFramework: nginx # type of code framework + defaultBaseImage: kubespheredev/nginx-centos7-s2ibuilder-sample # default Image Builder (can be replaced by a customized image) + version: 0.0.1 # Builder template version + description: "This is an S2I builder template for NGINX builds whose result can be run directly without any further application server." # Builder template description +``` + +### Step 6: Use the S2I template on KubeSphere + +1. Run the following command to submit the S2I template created above to KubeSphere. + + ```bash + $ kubectl apply -f s2ibuildertemplate.yaml + s2ibuildertemplate.devops.kubesphere.io/nginx created + ``` + +2. You can find the customized S2I template available in **Build Environment** when you create an S2I build on KubeSphere. + +## S2I Template Parameters Definition + +Refer to the following detailed descriptions of S2I template labels passed as parameters for frontend classification. + +| Label Name | Option | Definition | +| ------------------------------------- | ---------------------- | ------------------------------------------------------------ | +| builder-type.kubesphere.io/s2i: "s2i" | "s2i" | The type of this template is S2I, which builds images based on application source code. | +| builder-type.kubesphere.io/b2i | "b2i" | The type of this template is B2I, which builds images based on binary files or other artifacts. | +| binary-type.kubesphere.io | "jar", "war", "binary" | This type is complementary to the type of B2I and will be required when B2I is selected. For example, select the type of "jar" when a JAR package is provided. In KubeSphere v2.1.1 and later, it is also allowed to customize B2I templates. | + +Refer to the following detailed descriptions of S2I template parameters. The required parameters are marked with an asterisk. + +| Parameter | Type | Definition | +| ------------------------------------------ | -------- | ------------------------------------------------------------ | +| *containerInfo | []struct | The information about Image Builder. | +| *containerInfo.builderImage | string | S2I Image Builder, such as kubesphere/java-8-centos7:v2.1.0. | +| containerInfo.runtimeImage | string | S2I Runtime Image, such as kubesphere/java-8-runtime:v2.1.0. | +| containerInfo.buildVolumes | []string | The information about mounted volumes. The format is "volume_name:mount_path", such as ["s2i_java_cache:/tmp/artifacts","test_cache:test_path"]. | +| containerInfo.runtimeArtifacts | []struct | The list of original path and target path for the output artifact; only add it for phased building. | +| containerInfo.runtimeArtifacts.source | string | The original path of artifact in Image Builder. | +| containerInfo.runtimeArtifacts.destination | string | The target path of artifact in Runtime Image. | +| containerInfo.runtimeArtifacts.keep | bool | Whether to keep the data in the output image. | +| *defaultBaseImage | string | The default Image Builder. | +| *codeFramework | string | The code framework type, such as Java and Ruby. | +| environment | []struct | The list of environment variables in the building process. | +| environment.key | string | The name of environment variables. | +| environment.type | string | The type of environment variable keys. | +| environment.description | string | The description of environment variables. | +| environment.optValues | []string | The list of parameters for environment variables. | +| environment.required | bool | Whether the environment variable is required to be set. | +| environment.defaultValue | string | The default value of environment variables. | +| environment.value | string | The value of environment variables. | +| iconPath | string | The application name. | +| version | string | The version of S2I template. | +| description | string | The description of the template's functions and usage. | + diff --git a/content/en/docs/v3.4/project-user-guide/image-builder/source-to-image.md b/content/en/docs/v3.4/project-user-guide/image-builder/source-to-image.md new file mode 100644 index 000000000..859c7c0d1 --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/image-builder/source-to-image.md @@ -0,0 +1,113 @@ +--- +title: "Source to Image: Publish an App without a Dockerfile" +keywords: 'KubeSphere, Kubernetes, Docker, S2I, Source-to-Image' +description: 'Use S2I to import a Java sample project in KubeSphere, create an image and publish it to Kubernetes.' +linkTitle: "Source to Image: Publish an App without a Dockerfile" +weight: 10610 +--- + +Source-to-Image (S2I) is a toolkit and workflow for building reproducible container images from source code. S2I produces ready-to-run images by injecting source code into a container image and letting the container prepare that source code for execution. KubeSphere integrates S2I to automatically build images and publish them to Kubernetes without any Dockerfile. + +This tutorial demonstrates how to use S2I to import source code of a Java sample project into KubeSphere by creating a Service. Based on the source code, the KubeSphere Image Builder will create a Docker image, push it to a target repository and publish it to Kubernetes. + +![build-process](/images/docs/v3.3/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/build-process.png) + +## Prerequisites + +- You need to enable the [KubeSphere DevOps System](../../../pluggable-components/devops/) as S2I is integrated into it. +- You need to create a [GitHub](https://github.com/) account and a [Docker Hub](https://www.dockerhub.com/) account. GitLab and Harbor are also supported. This tutorial uses a GitHub repository to provide the source code for building and pushes an image to Docker Hub. +- You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). +- Set a CI dedicated node for building images. This is not mandatory but recommended for the development and production environment as it caches dependencies and reduces build time. For more information, see [Set a CI Node for Dependency Caching](../../../devops-user-guide/how-to-use/devops-settings/set-ci-node/). + +## Use Source-to-Image (S2I) + +### Step 1: Fork the example repository + +Log in to GitHub and fork the GitHub repository [devops-maven-sample](https://github.com/kubesphere/devops-maven-sample) to your personal GitHub account. + +### Step 2: Create Secrets + +Log in to KubeSphere as `project-regular`. Go to your project and create a Secret for Docker Hub and GitHub respectively. For more information, see [Create the Most Common Secrets](../../../project-user-guide/configuration/secrets/#create-the-most-common-secrets). + +{{< notice note >}} + +You do not need to create the GitHub Secret if your forked repository is open to the public. + +{{}} + +### Step 3: Create a Service + +1. In the same project, navigate to **Services** under **Application Workloads** and click **Create**. + +2. Choose **Java** under **Create Service from Source Code**, name it `s2i-demo` and click **Next**. + + {{< notice note >}} + + KubeSphere has integrated common S2I templates such as Java, Node.js and Python. If you want to use other languages or customize your S2I templates, see [Customize S2I Templates](../s2i-templates/). + + {{}} + +3. On the **Build Settings** page, provide the following information accordingly and click **Next**. + + **Service Type**: Select **Stateless Service** for this example. For more information about different Services, see [Service Type](../../../project-user-guide/application-workloads/services/#service-type). + + **Build Environment**: Select **kubesphere/java-8-centos7:v2.1.0**. + + **Code Repository URL**: The source code repository address (currently support Git). You can specify the code branch and the relative path in the source code terminal. The URL supports HTTP and HTTPS. Paste the forked repository URL (your own repository address) into this field. + + **Code Repository Branch**: The branch that is used for image building. Enter `master` for this tutorial. You can enter `dependency` for a cache test. + + **Code Repository Key**: You do not need to provide any Secret for a public repository. Select the GitHub Secret if you want to use a private repository. + + **Image Name**: Customize an image name. As this tutorial will push an image to Docker Hub, enter `dockerhub_username/s2i-sample`. `dockerhub_username` is your Docker ID and make sure it has the permission to push and pull images. + + **Image Tag**: The image tag. Enter `latest`. + + **Target Image Registry**: Select the Docker Hub Secret as the image is pushed to Docker Hub. + + **Advanced Settings**: You can define the code relative path. Use the default `/` for this field. + +4. On the **Pod Settings** page, scroll down to **Port Settings** to set the access policy for the container. Select **HTTP** for **Protocol**, customize the name (for example, `http-1`), and enter `8080` for both **Container Port** and **Service Port**. + +5. Scroll down to **Health Check** and select it. Set **Readiness Check** by filling out the following parameters. Click **√** when you finish setting the probe and then click **Next** to continue. + + **HTTP Request**: Select **HTTP** as the protocol, enter `/` as the path (root path in this tutorial), and enter `8080` as the port exposed. + + **Initial Delay (s)**: The number of seconds after the container has started before the liveness probe is initiated. Enter `30` for this field. + + **Timeout (s)**: The number of seconds after which the probe times out. Enter `10` for this field. + + For other fields, use the default value directly. For more information about how to configure probes and set other parameters on the **Container Settings** page, see [Pod Settings](../../../project-user-guide/application-workloads/container-image-settings/). + +6. On the **Storage Settings** page, you can add a volume for the container. For more information, see [Volumes](../../../project-user-guide/storage/volumes/). Click **Next** to continue. + +7. On the **Advanced Settings** page, select **External Access** and select **NodePort** as the access method. Click **Create** to finish the whole process. + +8. Click **Image Builders** from the navigation bar and you can see that the example image is being built. + +### Step 4: Check results + +1. Wait for a while and you can see the status of the image builder has reached **Successful**. + +2. Click this image builder to go to its details page. Under **Job Records**, click icon on the right of a record to see building logs. You can see `Build completed successfully` at the end of the log if everything runs normally. + +3. Go back to the **Services**, **Deployments**, and **Jobs** page, and you can see the corresponding Service, Deployment, and Job of the image have been all created successfully. + +4. In your Docker Hub repository, you can see that KubeSphere has pushed the image to the repository with the expected tag. + +### Step 5: Access the S2I Service + +1. On the **Services** page, click the S2I Service to go to its details page. + +2. To access the Service, you can either use the endpoint with the `curl` command or visit `:`. For example: + + ```bash + $ curl 10.10.131.44:8080 + Really appreciate your star, that is the power of our life. + ``` + + {{< notice note >}} + + If you want to access the Service outside the cluster, you may need to open the port in your security groups and configure port forwarding rules depending on your deployment environment. + + {{}} \ No newline at end of file diff --git a/content/en/docs/v3.4/project-user-guide/storage/_index.md b/content/en/docs/v3.4/project-user-guide/storage/_index.md new file mode 100644 index 000000000..2f56225bd --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/storage/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Volume Management" +weight: 10300 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/project-user-guide/storage/volume-snapshots.md b/content/en/docs/v3.4/project-user-guide/storage/volume-snapshots.md new file mode 100644 index 000000000..835a8aba4 --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/storage/volume-snapshots.md @@ -0,0 +1,78 @@ +--- +title: "Volume Snapshots" +keywords: 'KubeSphere, Kubernetes, Volume, Snapshots' +description: 'Learn how to manage a snapshot of a persistent volume on KubeSphere.' +linkTitle: "Volume Snapshots" +weight: 10320 +--- + +Many storage systems provide the ability to create a snapshot of a persistent volume. A snapshot represents a point-in-time copy of a volume. It can be used either to provision a new volume (pre-populated with the snapshot data) or to restore the existing volume to a previous state (represented by the snapshot). For more information, see [the Kubernetes documentation](https://kubernetes.io/docs/concepts/storage/volume-snapshots/). + +This tutorial demonstrates how to create and use a volume snapshot. + +## Prerequisites + +- You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +- You need to install Kubernetes 1.17 or higher. + +- Your underlying storage plugin supports snapshots. + +- You have an available PVC so that you can create a snapshot for it. For more information, see [Volumes](../volumes/). + +- You need to create a [volume snapshot class](../../../cluster-administration/snapshotclass/). + +## Create a Volume Snapshot + +You can create a volume snapshot using either of the following ways. + +### Method 1: From the Volume Snapshot Page + +1. Log in to the web console of KubeSphere as `project-regular`. On the navigation pane on the left, choose **Storage > Volume Snapshots**. + +2. On the **Volume Snapshots** page on the right, click **Create**. + +3. On the **Create Snapshot** page that is displayed, select a persistent volume claim that supports snapshots, enter a snapshot name, select a snapshot volume class, and then click **OK**. You can view the created volume snapshot in the volume snapshot list. + +4. Click the Volume Snapshot Content tab, and you can view details of the volume snapshot, such as its status, capacity, and volume snapshot class. +### Method 2: From the Persistent Volume Claims Page + +1. Log in to the web console of KubeSphere as `project-regular`. On the **Persistent Volume Claims** page of your project, select a volume for which you want to create a snapshot. + +2. On the details page, choose **More > Create Snapshot**. + +3. In the displayed dialog box, set a name for the snapshot which serves as a unique identifier and select a **Volume Snapshot Class**. Click **OK** to finish. You can view the created volume snapshot in the volume snapshot list. + +## Use a Snapshot to Create a PVC + +There are two ways for you to use a snapshot to create a PVC. + +### From the Snapshot Details Page + +1. Log in to the web console of KubeSphere as `project-regular`. On a snapshot's details page, click **Create Volume** to use the snapshot. Generally, the steps are the same as creating a PVC directly. + +2. In the displayed dialog box, set a name for the PVC. Click **Next** to continue. + + {{< notice note >}} + + The resource you create is a Persistent Volume Claim (PVC). + + {{}} + +3. On the **Storage Settings** tab, select an access mode and click **Next**. + +4. On the **Advanced Settings** tab, add metadata for the PVC, such as labels and annotations. Click **Create** to finish. + + The created PVC is displayed on the **Persistent Storage Claims** page. + +### From the Persistent Storage Claims Page + +1. Log in to the web console of KubeSphere as `project-regular`. On the **Persistent Storage Claims** page of a project, click **Create**. + +2. In the displayed dialog box, set a name for the volume. Click **Next** to continue. + +3. On the **Storage Settings** tab, select **From Volume Snapshot** under the **Creation Method** section. Select a snapshot and an access mode, and click **Next** to continue. + +4. On the **Advanced Settings** tab, add metadata for the PVC, such as labels and annotations. Click **Create** to finish creating the PVC. + + The PVC created is displayed on the **Persistent Storage Claims** page. \ No newline at end of file diff --git a/content/en/docs/v3.4/project-user-guide/storage/volumes.md b/content/en/docs/v3.4/project-user-guide/storage/volumes.md new file mode 100644 index 000000000..3ec63ef55 --- /dev/null +++ b/content/en/docs/v3.4/project-user-guide/storage/volumes.md @@ -0,0 +1,239 @@ +--- +title: "Persistent Volume Claims" +keywords: 'Kubernetes, Persistent Volumes, Persistent Volume Claims, Volume Clone, Volume Snapshot, Volume Expansion' +description: 'Learn how to create, edit, and mount a PVC on KubeSphere.' +linkTitle: "Persistent Volume Claims" +weight: 10310 +--- + +When you create an application workload in a project, you can create a [Persistent Volume Claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) (PVC) for it. A PVC allows you to create a storage request, further provisioning persistent storage to applications. More specifically, persistent storage is managed by persistent volume resources. + +Cluster administrators configure Persistent Volumes (PVs) using storage classes. In other words, to create a PVC in a project, your cluster must have an available storage class. If no customized storage class is configured when you install KubeSphere, [OpenEBS](https://openebs.io/) is installed in your cluster by default to provide local persistent volumes. However, it does not support dynamic volume provisioning. In a production environment, it is recommended you configure storage classes in advance to provide persistent storage services for your apps. + +This tutorial demonstrates how to create a PVC, mount a PVC, and use PVC features from its details page. + +## Prerequisites + +- You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +- If you want to dynamically provision a volume, you need to [configure a storage class](../../../cluster-administration/storageclass/) that supports dynamic provisioning. + +## Create a PVC + +KubeSphere binds a PVC to a PV that satisfies the request you set for the PVC, such as capacity and access mode. When you create an application workload, you can select the desired PVC and mount it to your workload. + +1. Log in to the web console of KubeSphere as `project-regular` and go to a project. Click **Persistent Volume Claims** under **Storage** from the navigation bar, and you see all PVCs that have been mounted to workloads in the project. + +2. To create a PVC, click **Create** on the **Persistent Volume Claims** page. + +3. In the displayed dialog box, set a name (for example, `demo-volume`) for the PVC, select a project, and click **Next**. + + {{< notice note >}} + + You can see the PVC's manifest file in YAML format by enabling **Edit YAML** in the upper-right corner. You can edit the manifest file directly to create a PVC. Alternatively, you can follow the steps below to create a PVC using the console. + + {{}} + +4. On the **Storage Settings** page, select a method to create a PVC. + + - **From Storage Class**. You can configure storage classes both [before](../../../installing-on-linux/persistent-storage-configurations/understand-persistent-storage/) and [after](../../../cluster-administration/storageclass/) the installation of KubeSphere. + + - **From Volume Snapshot**. To use a snapshot to create a PVC, you must create a volume snapshot first. + + Select **From Storage Class** in this example. For more information about how to create a PVC by snapshot, see [Volume Snapshots](../volume-snapshots/). + +5. Select a storage class from the drop-down list. This tutorial uses `csi-standard`, a standard storage class provided by QingCloud Platform. You can select your own storage class. + +6. Depending on the storage class you select, you may see different access modes in this section as some PVs only support specific access modes. In total, there are three access modes. + + - **ReadWriteOnce**: The volume can be mounted as read-write by a single node. + - **ReadOnlyMany**: The volume can be mounted as read-only by many nodes. + - **ReadWriteMany**: The volume can be mounted as read-write by many nodes. + + Select the desired access mode. + +7. Under **Volume Capacity**, specify the size of the PVC. Click **Next** to continue. + +8. On the **Advanced Settings** page, you can add metadata to the PVC, such as **Labels** and **Annotations**. They can be used as identifiers to search for and schedule resources. + +9. Click **Create** to finish creating a PVC. + +10. A created PVC displays on the **Persistent Volume Claims** page in a project. After it is mounted to a workload, it will turn to **Mounted** under the **Mount Status** column. + + {{< notice note >}} + +Newly-created PVCs are also displayed on the **Persistent Volume Claims** page in **Cluster Management**. Project users such as `project-regular` can view PVs under the **Persistent Volumes** column. Cluster administrators have the responsibility to view and keep track of created PVCs in a project. Conversely, if a cluster administrator creates a PVC for a project in **Cluster Management**, the PVC is also displayed on the **Persistent Volume Claims** page in a project. + +{{}} + +## Mount a PVC + +When you create application workloads, such as [Deployments](../../../project-user-guide/application-workloads/deployments/), [StatefulSets](../../../project-user-guide/application-workloads/statefulsets/) and [DaemonSets](../../../project-user-guide/application-workloads/daemonsets/), you can mount PVCs to them. + +{{< notice note >}} + +This tutorial does not explain how to create workloads. For more information, see related guides in [Application Workloads](../../application-workloads/deployments/). + +{{}} + +On the **Storage Settings** page, you can see there are different volumes that you can mount to your workload. + +- **Add Persistent Volume Claim Template** (Only available to [StatefulSets](../../../project-user-guide/application-workloads/statefulsets/)): A PVC template is used to dynamically create a PVC. Mount the PVC of the StorageClass type to the Pod by setting the name, storage class, access mode, capacity and path, which are all indicated by the field `volumeClaimTemplates`. + +- **Mount Volume**: Support emptyDir volumes and PVCs. + + In **Mount Volume**, there are three kinds of volumes: + + - **Persistent Volume**: Use a PVC to mount. + + Persistent volumes can be used to save users' persistent data. You need to create PVCs in advance so that you can choose an existing PVC from the list. + + - **Temporary Volume**: Use an emptyDir volume to mount. + + The temporary volume represents [emptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir), which is first created when a Pod is assigned to a node, and exists as long as that Pod is running on that node. An emptyDir volume offers an empty directory from which containers in the Pod can read and write. Depending on your deployment environment, an emptyDir volume can be stored on any medium that is backing the node, which could be a disk or SSD. When the Pod is removed from the node for any reason, the data in the emptyDir is deleted forever. + + - **HostPath Volume**: Use a hostPath volume to mount. + + A HostPath volume mounts a file or directory from the host node's filesystem into your Pod. This is not something that most Pods will need, but it offers a powerful escape hatch for some applications. For more information, refer to [the Kubernetes documentation](https://kubernetes.io/docs/concepts/storage/volumes/#hostpath). + +- **Mount ConfigMap or Secret**: Support key-value pairs of [ConfigMaps](../../../project-user-guide/configuration/configmaps/) or [Secrets](../../../project-user-guide/configuration/secrets/). + + A [Secret](https://kubernetes.io/docs/concepts/storage/volumes/#secret) volume is used to provide sensitive information, such as passwords, OAuth tokens, and SSH keys, for Pods. Secret volumes are backed by tmpfs (a RAM-backed filesystem) so they are never written to non-volatile storage. + + A [ConfigMap](https://kubernetes.io/docs/concepts/storage/volumes/#configmap) is used to store configuration data in the form of key-value pairs. The ConfigMap resource provides a way to inject configuration data into Pods. The data stored in a ConfigMap object can be referenced in a volume of type `configMap` and then consumed by containerized applications running in a Pod. ConfigMaps are often used in the following cases: + + - Set the value of environment variables. + - Set command parameters in containers. + - Create a configuration file in volumes. + +## View and Manage PVCs + +After a PVC is created, you can see detailed information of it, edit it, or leverage PVC features. To view PVC details, click a PVC on the **Persistent Volume Claims** page. + +### View Details of a PVC + +On the **Persistent Volume Claims** page, click a PVC to view its details. + +1. Click the **Resource Status** tab, view the PVC usage and mounted pods. + +2. Click the **Metadata** tab, view the labels and annotations of the PVC. + +3. Click the **Events** tab, view the events of the PVC. + +4. Click the **Snapshots** tab, view the snapshots of the PVC. +### Edit a PVC + +On the details page, you can click **Edit Information** to change its basic information. Click **More**, and you can edit its YAML file or delete this PVC. + +To delete a PVC, make sure the PVC is not mounted to any workload. To unmount a PVC, go to the details page of a workload. From the **More** drop-down list, click **Edit Settings**. On the **Edit Settings** dialog box, click **Storage**. Hover your mouse on the PVC, and click the dustbin icon to unmount it. + +If the status of a PVC remains **Terminating** for a long time after you clicked **Delete**, manually delete it by using the following command: + +```bash +kubectl patch pvc -p '{"metadata":{"finalizers":null}}' +``` + +### Use PVC Features + +From the **More** drop-down menu, there are other additional options provided by KubeSphere based on the underlying storage plugin, also known as `Storage Capability`. PVC features include the following: + +- **Clone**: Create a same PVC. +- **Create Snapshot**: Create a volume snapshot which can be used to create PVCs. For more information, see [Volume Snapshots](../volume-snapshots/). +- **Expand**: Increase the size of a PVC. Keep in mind that you cannot reduce the size of a PVC on the console due to possible data loss. + +For more information about `Storage Capability`, see [Design Documentation](https://github.com/kubesphere/community/blob/master/sig-storage/concepts-and-designs/storage-capability-interface.md). + +{{< notice note >}} + +Some in-tree or special CSI plugins may not be covered by `Storage Capability`. If KubeSphere does not display the correct features in your cluster, you can make adjustments according to [this guide](https://github.com/kubesphere/kubesphere/issues/2986). + +{{}} + +### Monitor PVCs + +KubeSphere retrieves metric data of PVCs with `Filesystem` mode from Kubelet to monitor PVCs including capacity usage and inode usage. + +For more information about PVC monitoring, see [Research on Volume Monitoring](https://github.com/kubesphere/kubesphere/issues/2921). + +## View the PV List and Manage PVs + ### View the PV List + +1. Click the **Persistent Volumes** tab on the **Persistent Volume Claims** page to view the PV list page that provides the following information: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterDescription
Name Name of the PV. It is specified by the field .metadata.name in the manifest file of the PV.
Status + Current status of the PV. It is specified by the field .status.phase in the manifest file of the PV, including: +
    +
  • Available: The PV is available and not yet bound to a PVC.
  • +
  • Bound: The PV is bound to a PVC.
  • +
  • Deleting: The PV is being deleted.
  • +
  • Failed: The PV is unavailable.
  • +
+
CapacityCapacity of the PV. It is specified by the field .spec.capacity.storage in the manifest file of the PV.
Access Mode + Access mode of the PV. It is specified by the field .spec.accessModes in the manifest file of the PV, including: +
    +
  • RWO: The PV can be mounted as read-write by a single node.
  • +
  • ROX: The PV can be mounted as read-only by multiple nodes.
  • +
  • RWX: The PV can be mounted as read-write by multiple nodes.
  • +
+
Reclaim Policy + Reclaim policy of the PV. It is specified by the field .spec.persistentVolumeReclaimPolicy in the manifest file of the PV, including: +
    +
  • Retain: When a PVC is deleted, the PV still exists and requires manual reclamation.
  • +
  • Delete: Remove both the PV and the associated storage assets in the volume plugin infrastructure.
  • +
  • Recycle: Erase the data on the PV and make it available again for a new PVC.
  • +
+
Creation TimeTime when the PV was created.
+ +2. Click icon on the right of a PV, and you can perform the following: + + - **Edit Information**: Edit information of the PV. + - **Edit YAML**: Edit the YAML file of the PV. + - **Delete**: Delete the PV. A PV in the **Bound** status cannot be deleted. + +### View the PV Details Page + +1. Click the name of a PV to go to its details page. + +2. On the details page, click **Edit Information** to edit the basic information of the PV. + +3. Click **More**, and you can perform the following: + + - **View YAML**: View the YAML file of the PV. + - **Delete**: Delete the PV and return to the list page. A PV in the **Bound** status cannot be deleted. + +4. Click the **Resource Status** tab to view the PVC to which the PV is bound. + +5. Click the **Metadata** tab to view the labels and annotations of the PV. + +6. Click the **Events** tab to view the events of the PV. \ No newline at end of file diff --git a/content/en/docs/v3.4/quick-start/_index.md b/content/en/docs/v3.4/quick-start/_index.md new file mode 100644 index 000000000..5441c5300 --- /dev/null +++ b/content/en/docs/v3.4/quick-start/_index.md @@ -0,0 +1,16 @@ +--- +title: "Quickstarts" +description: "Help you to better understand KubeSphere with detailed graphics and contents" +layout: "second" + +linkTitle: "Quickstarts" + +weight: 2000 + +icon: "/images/docs/v3.3/docs.svg" + +--- + +Quickstarts include six hands-on lab exercises that help you quickly get started with KubeSphere. It is highly recommended that you go though all of these tutorials to explore the basic feature of KubeSphere. + + diff --git a/content/en/docs/v3.4/quick-start/all-in-one-on-linux.md b/content/en/docs/v3.4/quick-start/all-in-one-on-linux.md new file mode 100644 index 000000000..586d4ffad --- /dev/null +++ b/content/en/docs/v3.4/quick-start/all-in-one-on-linux.md @@ -0,0 +1,259 @@ +--- +title: "All-in-One Installation of Kubernetes and KubeSphere on Linux" +keywords: 'KubeSphere, Kubernetes, All-in-One, Installation' +description: 'Install KubeSphere on Linux with a minimal installation package. The tutorial serves as a basic kick-starter for you to understand the container platform, paving the way for learning the following guides.' +linkTitle: "All-in-One Installation on Linux" +weight: 2100, +showSubscribe: true +--- + +For those who are new to KubeSphere and looking for a quick way to discover the [container platform](https://kubesphere.io/), the all-in-one mode is your best choice to get started. It features rapid deployment and hassle-free configurations with KubeSphere and Kubernetes all provisioned on your machine. + +## Video Demonstration + +{{< youtube PtVQZVb3AgE >}} + +## Step 1: Prepare a Linux Machine + +To get started with all-in-one installation, you only need to prepare one host according to the following requirements for hardware and operating system. + +### Hardware recommendations + + + + + + + + + + + + + + + + + + + + + + + + + +
OSMinimum Requirements
Ubuntu 16.04, 18.04, 20.04, 22.042 CPU cores, 4 GB memory, and 40 GB disk space
Debian Buster, Stretch2 CPU cores, 4 GB memory, and 40 GB disk space
CentOS 7.x2 CPU cores, 4 GB memory, and 40 GB disk space
Red Hat Enterprise Linux 72 CPU cores, 4 GB memory, and 40 GB disk space
SUSE Linux Enterprise Server 15/openSUSE Leap 15.22 CPU cores, 4 GB memory, and 40 GB disk space
+ +{{< notice note >}} + +The preceding system requirements and the following instructions are for the default minimal installation without any pluggable components enabled. If your machine has at least 8 CPU cores and 16 GB memory, it is recommended that you enable all components. For more information, see [Enable Pluggable Components](../../pluggable-components/). + +{{}} + +### Node requirements + +- The node can be accessed through `SSH`. +- `sudo`/`curl`/`openssl`/`tar` should be used. + +### Container runtimes + +Your cluster must have an available container runtime. If you use KubeKey to set up a cluster, KubeKey installs the latest version of Docker by default. Alternatively, you can manually install Docker or other container runtimes before you create a cluster. + + + + + + + + + + + + + + + + + + + + + + +
Supported Container RuntimeVersion
Docker19.3.8 +
containerdLatest
CRI-O (experimental, not fully tested)Latest
iSula (experimental, not fully tested)Latest
+ +### Dependency requirements + +KubeKey can install Kubernetes and KubeSphere together. The dependency that needs to be installed may be different based on the Kubernetes version to be installed. You can refer to the following list to see if you need to install relevant dependencies on your node in advance. + + + + + + + + + + + + + + + + + + + + + + + + + + + +
DependencyKubernetes Version ≥ 1.18Kubernetes Version < 1.18
socatRequiredOptional but recommended
conntrackRequiredOptional but recommended
ebtablesOptional but recommendedOptional but recommended
ipsetOptional but recommendedOptional but recommended
+ +{{< notice info >}} + +Developed in Go, KubeKey represents a brand-new installation tool as a replacement for the ansible-based installer used before. KubeKey provides users with flexible installation choices, as they can install KubeSphere and Kubernetes separately or install them at one time, which is convenient and efficient. + +{{}} + +### Network and DNS requirements + +- Make sure the DNS address in `/etc/resolv.conf` is available. Otherwise, it may cause some issues of DNS in the cluster. +- If your network configuration uses firewall rules or security groups, you must ensure infrastructure components can communicate with each other through specific ports. It is recommended that you turn off the firewall. For more information, see [Port Requirements](../../installing-on-linux/introduction/port-firewall/). +- Supported CNI plugins: Calico and Flannel. Others (such as Cilium and Kube-OVN) may also work but note that they have not been fully tested. + +{{< notice tip >}} + +- It is recommended that your OS be clean (without any other software installed). Otherwise, there may be conflicts. +- It is recommended that a registry mirror (a booster) be prepared if you have trouble downloading images from `dockerhub.io`. For more information, see [Configure a Booster for Installation](../../faq/installation/configure-booster/). + +{{}} + +## Step 2: Download KubeKey + +Perform the following steps to download KubeKey. + +{{< tabs >}} + +{{< tab "Good network connections to GitHub/Googleapis" >}} + +Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or run the following command: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "Poor network connections to GitHub/Googleapis" >}} + +Run the following command first to make sure you download KubeKey from the correct zone. + +```bash +export KKZONE=cn +``` + +Run the following command to download KubeKey: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +After you download KubeKey, if you transfer it to a new machine also with poor network connections to Googleapis, you must run `export KKZONE=cn` again before you proceed with the following steps. + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +The commands above download the latest release of KubeKey. You can change the version number in the command to download a specific version. + +{{}} + +Make `kk` executable: + +```bash +chmod +x kk +``` + +## Step 3: Get Started with Installation + +You only need to run one command for all-in-one installation. The template is as follows: + +```bash +./kk create cluster [--with-kubernetes version] [--with-kubesphere version] +``` + +To create a Kubernetes cluster with KubeSphere installed, refer to the following command as an example: + +```bash +./kk create cluster --with-kubernetes v1.22.12 --with-kubesphere v3.3.2 +``` + +{{< notice note >}} + +- Recommended Kubernetes versions for KubeSphere 3.3: v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, and * v1.24.x. For Kubernetes versions with an asterisk, some features of edge nodes may be unavailable due to incompatability. Therefore, if you want to use edge nodes, you are advised to install Kubernetes v1.21.x. If you do not specify a Kubernetes version, KubeKey installs Kubernetes v1.23.10 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../installing-on-linux/introduction/kubekey/#support-matrix). +- For all-in-one installation, you do not need to change any configuration. +- If you do not add the flag `--with-kubesphere` in the command in this step, KubeSphere will not be deployed. KubeKey will install Kubernetes only. If you add the flag `--with-kubesphere` without specifying a KubeSphere version, the latest version of KubeSphere will be installed. +- KubeKey will install [OpenEBS](https://openebs.io/) to provision LocalPV for the development and testing environment by default, which is convenient for new users. For other storage classes, see [Persistent Storage Configurations](../../installing-on-linux/persistent-storage-configurations/understand-persistent-storage/). + +{{}} + +After you run the command, you will see a table for environment check. For details, see [Node requirements](#node-requirements) and [Dependency requirements](#dependency-requirements). Type `yes` to continue. + +## Step 4: Verify the Installation + +Run the following command to check the result. + +```bash +kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f +``` + +The output displays the IP address and port number of the web console, which is exposed through `NodePort 30880` by default. Now, you can access the console at `:30880` with the default account and password (`admin/P@88w0rd`). + +```bash +##################################################### +### Welcome to KubeSphere! ### +##################################################### + +Console: http://192.168.0.2:30880 +Account: admin +Password: P@88w0rd + +NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. + +##################################################### +https://kubesphere.io 20xx-xx-xx xx:xx:xx +##################################################### +``` + +{{< notice note >}} + +You may need to configure port forwarding rules and open the port in your security group so that external users can access the console. + +{{}} + +After logging in to the console, you can check the status of different components in **System Components**. You may need to wait for some components to be up and running if you want to use related services. You can also use `kubectl get pod --all-namespaces` to inspect the running status of KubeSphere workloads. + +## Enable Pluggable Components (Optional) + +This guide is used only for the minimal installation by default. For more information about how to enable other components in KubeSphere, see [Enable Pluggable Components](../../pluggable-components/). + +## Code Demonstration + diff --git a/content/en/docs/v3.4/quick-start/create-workspace-and-project.md b/content/en/docs/v3.4/quick-start/create-workspace-and-project.md new file mode 100644 index 000000000..329e47215 --- /dev/null +++ b/content/en/docs/v3.4/quick-start/create-workspace-and-project.md @@ -0,0 +1,239 @@ +--- +title: "Create Workspaces, Projects, Users and Roles" +keywords: 'KubeSphere, Kubernetes, Multi-tenant, Workspace, User, Role, Project' +description: 'Take advantage of the multi-tenant system of KubeSphere for fine-grained access control at different levels.' +linkTitle: "Create Workspaces, Projects, Users and Roles" +weight: 2300 +--- + +This quickstart demonstrates how to create workspaces, roles and users which are required for other tutorials. Meanwhile, you will learn how to create projects and DevOps projects within your workspace where your workloads are running. After reading this tutorial, you will become familiar with the multi-tenant management system of KubeSphere. + +## Prerequisites + +KubeSphere needs to be installed in your machine. + +## Architecture + +The multi-tenant system of KubeSphere features **three** levels of hierarchical structure which are cluster, workspace, and project. A project in KubeSphere is a Kubernetes [namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/). + +You are required to create a new [workspace](../../workspace-administration/what-is-workspace/) to work with instead of using the system workspace where system resources are running and most of them are viewable only. In addition, it is strongly recommended different tenants work with corresponding roles in a workspace for security considerations. + +You can create multiple workspaces within a KubeSphere cluster. Under each workspace, you can also create multiple projects. Each level has multiple built-in roles. Besides, KubeSphere allows you to create roles with customized authorization as well. The KubeSphere hierarchy is applicable for enterprise users with different teams or groups, and different roles within each team. + +## Hands-on Lab + +### Step 1: Create a user + +After KubeSphere is installed, you need to add different users with varied roles to the platform so that they can work at different levels on various resources. Initially, you only have one default user, which is `admin`, granted the role `platform-admin`. In the first step, you create a sample user `ws-manager`. + +1. Log in to the web console as `admin` with the default user and password (`admin/P@88w0rd`). + + {{< notice tip >}} + For account security, it is highly recommended that you change your password the first time you log in to the console. To change your password, select **User Settings** in the drop-down list in the upper-right corner. In **Password Settings**, set a new password. You also can change the console language in **User Settings**. + {{}} + +2. Click **Platform** in the upper-left corner, and then select **Access Control**. In the left nevigation pane, select **Platform Roles**. The built-in roles are shown in the following table. + + + + + + + + + + + + + + + + + + + + +
Built-in RolesDescription
platform-self-provisionerCreate workspaces and become the admin of the created workspaces.
platform-regularHas no access to any resources before joining a workspace or cluster.
platform-adminManage all resources on the platform.
+ + {{< notice note >}} + Built-in roles are created automatically by KubeSphere and cannot be edited or deleted. + {{}} + +3. In **Users**, click **Create**. In the displayed dialog box, provide all the necessary information (marked with *) and select `platform-self-provisioner` for **Platform Role**. + + Click **OK** after you finish. The new user will display on the **Users** page. + + {{< notice note >}} + If you have not specified a platform role, the created user cannot perform any operations. In this case, you need to create a workspace and invite the created user to the workspace. + {{}} + +4. Repeat the previous steps to create other users that will be used in other tutorials. + + {{< notice tip >}} + - To log out, click your username in the upper-right corner and select **Log Out**. + - The following usernames are for example only. You can change them as needed. + {{}} + + + + + + + + + + + + + + + + + + + + + + +
UserAssigned Platform RoleUser Permissions
ws-adminplatform-regularManage all resources in a workspace after being invited to the workspace (This user is used to invite new members to a workspace in this example).
project-adminplatform-regularCreate and manage projects and DevOps projects, and invite new members to the projects.
project-regularplatform-regularproject-regular will be invited to a project or DevOps project by project-admin. This user will be used to create workloads, pipelines and other resources in a specified project.
+ +5. On **Users** page, view the created users. + + {{< notice note >}} + + You can click the icon on the right of the username to enable or disable the user. Additionally, you can batch disable and enable users. + + {{}} +### Step 2: Create a workspace + +As the basic logic unit for the management of projects, DevOps projects and organization members, workspaces underpin the multi-tenant system of KubeSphere. + +1. In the navigation pane on the left, click **Workspaces**. You can see there is only one default workspace `system-workspace`, where system-related components and services run. Deleting this workspace is not allowed. + +2. On the **Workspaces** page on the right, click **Create**, set a name for the new workspace (for example, `demo-workspace`) and set user `ws-admin` as the workspace manager. + +3. Click **Create** after you finish. + + {{< notice note >}} + + If you have enabled the [multi-cluster feature](../../multicluster-management/), you need to [assign an available cluster](../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/#select-available-clusters-when-you-create-a-workspace) (or multiple clusters) to the workspace so that projects can be created on the cluster(s) later. + + {{}} + +4. Log out of the console and log back in as `ws-admin`. In **Workspace Settings**, select **Workspace Members** and click **Invite**. + +5. Invite both `project-admin` and `project-regular` to the workspace. Assign them the role `workspace-self-provisioner` and `workspace-viewer` respectively and click **OK**. + + {{< notice note >}} +The actual role name follows a naming convention: `-`. For example, in this workspace named `demo-workspace`, the actual role name of the role `viewer` is `demo-workspace-viewer`. + {{}} + +5. After you add both `project-admin` and `project-regular` to the workspace, click **OK**. In **Workspace Members**, you can see three members listed. + + + + + + + + + + + + + + + + + + + + + + + +
UserAssigned Workspace RoleRole Permissions
ws-admindemo-workspace-adminManage all resources under the workspace (use this user to invite new members to the workspace).
project-admindemo-workspace-self-provisionerCreate and manage projects and DevOps projects, and invite new members to join the projects.
project-regulardemo-workspace-viewerproject-regular will be invited by project-admin to join a project or DevOps project. The user can be used to create workloads, pipelines, etc.
+ +### Step 3: Create a project + +In this step, you create a project using user `project-admin` created in the previous step. A project in KubeSphere is the same as a namespace in Kubernetes, which provides virtual isolation for resources. For more information, see [Namespaces](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/). + +1. Log in to KubeSphere as `project-admin`. In **Projects**, click **Create**. + +2. Enter the project name (for example, `demo-project`) and click **OK**. You can also add an alias and description for the project. + +3. In **Projects**, click the project created just now to view its detailed information. + +4. On the **Overview** page of the project, the project quota remains unset by default. You can click **Edit Quotas** and specify [resource requests and limits](../../workspace-administration/project-quotas/) as needed (for example, 1 core for CPU and 1000Gi for memory). + +5. Invite `project-regular` to this project and grant this user role `operator`. + + {{< notice info >}} + The user granted role `operator` is a project maintainer who can manage resources other than users and roles in the project. + {{}} + +6. Before creating a [Route](../../project-user-guide/application-workloads/routes/) which is [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) in Kubernetes, you need to enable a gateway for this project. The gateway is an [NGINX Ingress controller](https://github.com/kubernetes/ingress-nginx) running in the project. To set a gateway, go to **Gateway Settings** in **Project Settings** and click **Enable Gateway**. User `project-admin` is still used in this step. + +7. Select the access method **NodePort** and click **OK**. + +8. Under **Project Gateway**, you can obtain the Gateway Address and the NodePort of http and https in the list. + + {{< notice note >}} + If you want to expose services using the type `LoadBalancer`, you need to use the LoadBalancer plugin of cloud providers. If your Kubernetes cluster is running in a bare metal environment, it is recommended that you use [OpenELB](https://github.com/kubesphere/openelb) as the LoadBalancer plugin. + {{}} + +### Step 4: Create a role + +After you finish the above steps, you know that users can be granted different roles at different levels. The roles used in previous steps are all built-in ones created by KubeSphere. In this step, you will learn how to define a customized role to meet the needs in your work. + +1. Log in to the KubeSphere web console as `admin` again and go to **Access Control**. + +2. Click **Platform Roles** on the left navigation pane, and then click **Create** on the right. + + {{< notice note >}} + + The preset roles on the **Platform Roles** page cannot be edited and deleted. + + {{}} + +3. In the **Create Platform Role** dialog box, set the name (for example, `clusters-admin`), alias, and description of the role, and click **Edit Permissions**. + + {{< notice note >}} + + This example demonstrates how to create a role responsible for cluster management. + + {{}} + +4. In the **Edit Permissions** dialog box, set the role permissions (for example, select **Cluster Management**) and click **OK**. + + {{< notice note >}} + + * In this example, the role `clusters-admin` contains the permissions **Cluster Management** and **Cluster Viewing**. + * Some permissions are interdependent. The dependency is specified by the **Depends on** field under each permission. + * When a permission is selected, the permission it depends on is automatically selected. + * To deselect a permission, you need to deselect its subordinate permissions first. + + {{}} + +5. On the **Platform Roles** page, you can click the name of the created role to view the role details and click icon to edit the role, edit the role permissions, or delete the role. + +6. On the **Users** page, you can assign the role to a user when you create a user or edit an existing user. + + +### Step 5: Create a DevOps project (Optional) + +{{< notice note >}} + +To create a DevOps project, you must install the KubeSphere DevOps system in advance, which is a pluggable component providing CI/CD pipelines, Binary-to-image, Source-to-image, and more. For more information about how to enable DevOps, see [KubeSphere DevOps System](../../pluggable-components/devops/). + +{{}} + +1. Log in to the console as `project-admin`. In **DevOps Projects**, click **Create**. + +2. Enter the DevOps project name (for example, `demo-devops`) and click **OK**. You can also add an alias and description for the project. + +3. In **DevOps Projects**, click the project created just now to view its detailed information. + +4. Go to **Project Management** and select **Project Members**. Click **Invite** to invite user `project-regular` and grant the role `operator`, who is allowed to create pipelines and credentials. + +You are now familiar with the multi-tenant management system of KubeSphere. In other tutorials, user `project-regular` will also be used to demonstrate how to create applications and resources in a project or DevOps project. diff --git a/content/en/docs/v3.4/quick-start/deploy-bookinfo-to-k8s.md b/content/en/docs/v3.4/quick-start/deploy-bookinfo-to-k8s.md new file mode 100644 index 000000000..2d058ba5a --- /dev/null +++ b/content/en/docs/v3.4/quick-start/deploy-bookinfo-to-k8s.md @@ -0,0 +1,94 @@ +--- +title: "Deploy and Access Bookinfo" +keywords: 'KubeSphere, Kubernetes, Bookinfo, Istio' +description: 'Explore the basics of KubeSphere service mesh by deploying an example application Bookinfo.' +linkTitle: "Deploy and Access Bookinfo" +weight: 2400 +--- + +[Istio](https://istio.io/), as an open-source service mesh solution, provides powerful features of traffic management for microservices. The introduction of traffic management from the official website of [Istio](https://istio.io/latest/docs/concepts/traffic-management/) is as follows: + +*Istio’s traffic routing rules let you easily control the flow of traffic and API calls between services. Istio simplifies configuration of service-level properties like circuit breakers, timeouts, and retries, and makes it easy to set up important tasks like A/B testing, canary rollouts, and staged rollouts with percentage-based traffic splits. It also provides out-of-box failure recovery features that help make your application more robust against failures of dependent services or the network.* + +To provide consistent user experiences of managing microservices, KubeSphere integrates Istio on the container platform. This tutorial demonstrates how to deploy a sample application Bookinfo composed of four separate microservices and access it through a NodePort. + +## Prerequisites + +- You need to enable [KubeSphere Service Mesh](../../pluggable-components/service-mesh/). + +- You need to finish all tasks in [Create Workspaces, Projects, Users and Roles](../create-workspace-and-project/). + +- You need to enable **Application Governance**. For more information, see [Set a Gateway](../../project-administration/project-gateway/#set-a-gateway). + + {{< notice note >}} + + You need to enable **Application Governance** so that you can use the Tracing feature. Once it is enabled, check whether an annotation (for example, `nginx.ingress.kubernetes.io/service-upstream: true`) is added for your Route (Ingress) if the Route is inaccessible. + {{}} + +## What is Bookinfo + +Bookinfo is composed of the following four separate microservices. There are three versions of the **reviews** microservice. + +- The **productpage** microservice calls the **details** and **reviews** microservices to populate the page. +- The **details** microservice contains book information. +- The **reviews** microservice contains book reviews. It also calls the **ratings** microservice. +- The **ratings** microservice contains book ranking information that accompanies a book review. + +The following figure shows the end-to-end architecture of the application. For more information, see [Bookinfo Application](https://istio.io/latest/docs/examples/bookinfo/). + +![bookinfo](/images/docs/v3.3/quickstart/deploy-bookinfo-to-k8s/bookinfo.png) + +## Hands-on Lab + +### Step 1: Deploy Bookinfo + +1. Log in to the console as `project-regular` and go to your project (`demo-project`). Go to **Apps** under **Application Workloads**, and then click **Deploy Sample App** on the right of the page. + +2. Click **Next** in the displayed dialog box where required fields are pre-populated and relevant components are already set. You do not need to change the settings and just click **Create** on the final page. + + {{< notice note >}} + +KubeSphere creates the hostname automatically. To change the hostname, hover over the default route rule and click icon to edit it. For more information, see [Create a Microservices-based App](../../project-user-guide/application/compose-app/). + +{{}} + +1. In **Workloads**, verify that the status of all four Deployments is `Running`, indicating that the app has been created successfully. + + {{< notice note >}}It may take a few minutes before the Deployments are up and running. +{{}} + +### Step 2: Access Bookinfo + +1. In **Apps**, go to **Composed Apps** and click the app `bookinfo` to see its details page. + + {{< notice note >}}If you do not see the app in the list, refresh your page. + {{}} + +2. On the details page, record the hostname and port number of the app, which will be used to access Bookinfo. + +3. As the app will be accessed outside the cluster through a NodePort, you need to open the port in your security group for outbound traffic and set port forwarding rules if necessary. + +4. Edit your local host file (`/etc/hosts`) by adding an entry in it to map the hostname to the IP address. For example: + + ```bash + 139.198.179.20 productpage.demo-project.192.168.0.2.nip.io + ``` + + {{< notice note >}} + +Do not copy the preceding content to your local host file. Replace it with your own IP address and hostname. + {{}} + +5. When you finish, click **Access Service** to access the app. + +6. On the app details page, click **Normal user** in the lower-left corner. + +7. In the following figure, you can notice that only **Reviewer1** and **Reviewer2** are displayed without any stars in the **Book Reviews** section. This is the status of this app version. To explore more features of traffic management, you can implement a [canary release](../../project-user-guide/grayscale-release/canary-release/) for this app. + + ![ratings-page](/images/docs/v3.3/quickstart/deploy-bookinfo-to-k8s/ratings-page.png) + + {{< notice note >}} + +KubeSphere provides three kinds of grayscale strategies based on Istio, including [blue-green deployment](../../project-user-guide/grayscale-release/blue-green-deployment/), [canary release](../../project-user-guide/grayscale-release/canary-release/) and [traffic mirroring](../../project-user-guide/grayscale-release/traffic-mirroring/). + {{}} + diff --git a/content/en/docs/v3.4/quick-start/enable-pluggable-components.md b/content/en/docs/v3.4/quick-start/enable-pluggable-components.md new file mode 100644 index 000000000..84a3e0369 --- /dev/null +++ b/content/en/docs/v3.4/quick-start/enable-pluggable-components.md @@ -0,0 +1,146 @@ +--- +title: "Enable Pluggable Components" +keywords: 'KubeSphere, Kubernetes, pluggable, components' +description: 'Install pluggable components of KubeSphere so that you can explore the container platform in an all-around way. Pluggable components can be enabled both before and after the installation.' +linkTitle: "Enable Pluggable Components" +weight: 2600 +--- + +This tutorial demonstrates how to enable pluggable components of KubeSphere both before and after the installation. Refer to the table below for all pluggable components of KubeSphere. + +| Configuration | Component | Description | +| ---------------- | ------------------------------------- | ------------------------------------------------------------ | +| `alerting` | KubeSphere Alerting System | You can customize alerting policies for workloads and nodes. After an alerting policy is triggered, alert messages can be sent to your recipients through different channels (for example, email and Slack). | +| `auditing` | KubeSphere Auditing Log System | Provide a security-relevant chronological set of records, recording the sequence of activities that happen in the platform, initiated by different tenants. | +| `devops` | KubeSphere DevOps System | Provide an out-of-box CI/CD system based on Jenkins, and automated workflow tools including Source-to-Image and Binary-to-Image. | +| `events` | KubeSphere Events System | Provide a graphical web console for the exporting, filtering and alerting of Kubernetes events in multi-tenant Kubernetes clusters. | +| `logging` | KubeSphere Logging System | Provide flexible logging functions for log query, collection and management in a unified console. Additional log collectors can be added, such as Elasticsearch, Kafka and Fluentd. | +| `metrics_server` | HPA | The Horizontal Pod Autoscaler automatically scales the number of Pods based on needs. | +| `networkpolicy` | Network policy | Allow network isolation within the same cluster, which means firewalls can be set up between certain instances (Pods). | +| `kubeedge` | KubeEdge | Add edge nodes to your cluster and run workloads on them. | +| `openpitrix` | KubeSphere App Store | Provide an app store for Helm-based applications and allow users to manage apps throughout the entire lifecycle. | +| `servicemesh` | KubeSphere Service Mesh (Istio-based) | Provide fine-grained traffic management, observability and tracing, and visualized traffic topology. | +| `ippool` | Pod IP Pool | Create Pod IP pools and assign IP addresses from the Pools to your Pods. | +| `topology` | Service Topology | Integrate [Weave Scope](https://www.weave.works/oss/scope/) to view service-to-service communication (topology) of your apps and containers. | + +For more information about each component, see [Overview of Enable Pluggable Components](../../pluggable-components/overview/). + +{{< notice note >}} + +- `multicluster` is not covered in this tutorial. If you want to enable this feature, you need to set a corresponding value for `clusterRole`. For more information, see [Multi-cluster Management](../../multicluster-management/). +- Make sure your machine meets the hardware requirements before the installation. Here is the recommendation if you want to enable all pluggable components: CPU ≥ 8 Cores, Memory ≥ 16 G, Disk Space ≥ 100 G. + +{{}} + +## Enable Pluggable Components Before Installation + +For most of the pluggable components, you can follow the steps below to enable them. If you need to enable [KubeEdge](../../pluggable-components/kubeedge/), [Pod IP Pools](../../pluggable-components/pod-ip-pools/) and [Service Topology](../../pluggable-components/service-topology/), refer to the corresponding tutorials directly. + +### Installing on Linux + +When you implement multi-node installation of KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components. + +1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file `config-sample.yaml`. Modify the file by executing the following command: + + ```bash + vi config-sample.yaml + ``` + + {{< notice note >}} +If you adopt [All-in-one Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a `config-sample.yaml` file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable pluggable components in this mode (for example, for testing purpose), refer to the [following section](#enable-pluggable-components-after-installation) to see how pluggable components can be installed after installation. + {{}} + +2. In this file, enable the pluggable components you want to install by changing `false` to `true` for `enabled`. Here is [the complete file](https://github.com/kubesphere/kubekey/blob/release-2.2/docs/config-example.md) for your reference. Save the file after you finish. + +3. Create a cluster using the configuration file: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +### Installing on Kubernetes + +When you install KubeSphere on Kubernetes, you need to use [ks-installer](https://github.com/kubesphere/ks-installer/) by applying two YAML files as below. + +1. First download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) and edit it. + + ```bash + vi cluster-configuration.yaml + ``` + +2. To enable the pluggable component you want to install, change `false` to `true` for `enabled` under the component in this file. + +3. Save this local file and execute the following commands to start the installation. + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f cluster-configuration.yaml + ``` + +Whether you install KubeSphere on Linux or on Kubernetes, you can check the status of the components you have enabled in the web console of KubeSphere after installation. Go to **System Components**, and you can see the component status. + +## Enable Pluggable Components After Installation + +The KubeSphere web console provides a convenient way for users to view and operate on different resources. To enable pluggable components after installation, you only need to make few adjustments on the console directly. For those who are accustomed to the Kubernetes command-line tool, kubectl, they will have no difficulty in using KubeSphere as the tool is integrated into the console. + +{{< notice note >}} + +If you need to enable [KubeEdge](../../pluggable-components/kubeedge/), [Pod IP Pools](../../pluggable-components/pod-ip-pools/) and [Service Topology](../../pluggable-components/service-topology/), refer to the corresponding tutorials directly. + +{{}} + +1. Log in to the console as `admin`. Click **Platform** in the top-left corner and select **Cluster Management**. + +2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detail page. + + {{< notice info >}} +A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects. + {{}} + +3. In **Custom Resources**, click the three dots on the right of `ks-installer` and select **Edit YAML**. + +4. In this YAML file, enable the pluggable components you want to install by changing `false` to `true` for `enabled`. After you finish, click **OK** to save the configuration. + +5. You can use the web kubectl to check the installation process by executing the following command: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + {{< notice tip >}} + +You can find the web kubectl tool by clicking the hammer icon in the bottom-right corner of the console. + +{{}} + +6. The output will display a message as below if the component is successfully installed. + + ```yaml + ##################################################### + ### Welcome to KubeSphere! ### + ##################################################### + + Console: http://192.168.0.2:30880 + Account: admin + Password: P@88w0rd + + NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. + + ##################################################### + https://kubesphere.io 20xx-xx-xx xx:xx:xx + ##################################################### + ``` + +7. In **System Components**, you can see the status of different components. + + {{< notice tip >}} + +If you do not see relevant components in the above image, some Pods may not be ready yet. You can execute `kubectl get pod --all-namespaces` through kubectl to see the status of Pods. + {{}} diff --git a/content/en/docs/v3.4/quick-start/minimal-kubesphere-on-k8s.md b/content/en/docs/v3.4/quick-start/minimal-kubesphere-on-k8s.md new file mode 100644 index 000000000..d65388114 --- /dev/null +++ b/content/en/docs/v3.4/quick-start/minimal-kubesphere-on-k8s.md @@ -0,0 +1,62 @@ +--- +title: "Minimal KubeSphere on Kubernetes" +keywords: 'KubeSphere, Kubernetes, Minimal, Installation' +description: 'Install KubeSphere on an existing Kubernetes cluster with a minimal installation package. Your Kubernetes cluster can be hosted on cloud or on-premises.' +linkTitle: "Minimal KubeSphere on Kubernetes" +weight: 2200, +showSubscribe: true +--- + +In addition to installing KubeSphere on a Linux machine, you can also deploy it on existing Kubernetes clusters. This tutorial demonstrates the general steps of completing a minimal KubeSphere installation on Kubernetes. For more information, see [Installing on Kubernetes](../../installing-on-kubernetes/). + +## Prerequisites + +- To install KubeSphere 3.3 on Kubernetes, your Kubernetes version must be v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, and * v1.24.x. For Kubernetes versions with an asterisk, some features of edge nodes may be unavailable due to incompatability. Therefore, if you want to use edge nodes, you are advised to install Kubernetes v1.21.x. +- Make sure your machine meets the minimal hardware requirement: CPU > 1 Core, Memory > 2 GB. +- A **default** Storage Class in your Kubernetes cluster needs to be configured before the installation. + +{{< notice note >}} + +- The CSR signing feature is activated in `kube-apiserver` when it is started with the `--cluster-signing-cert-file` and `--cluster-signing-key-file` parameters. See [RKE installation issue](https://github.com/kubesphere/kubesphere/issues/1925#issuecomment-591698309). +- For more information about the prerequisites of installing KubeSphere on Kubernetes, see [Prerequisites](../../installing-on-kubernetes/introduction/prerequisites/). + +{{}} + +## Video Demonstration + +{{< youtube 6wdOBD4gyg4 >}} + +## Deploy KubeSphere + +After you make sure your machine meets the conditions, perform the following steps to install KubeSphere. + +1. Run the following commands to start installation: + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml + ``` + +2. After KubeSphere is successfully installed, you can run the following command to view the installation logs: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + +3. Use `kubectl get pod --all-namespaces` to see whether all Pods are running normally in relevant namespaces of KubeSphere. If they are, check the port (`30880` by default) of the console by running the following command: + + ```bash + kubectl get svc/ks-console -n kubesphere-system + ``` + +4. Make sure port `30880` is opened in your security group and access the web console through the NodePort (`IP:30880`) with the default account and password (`admin/P@88w0rd`). + +5. After logging in to the console, you can check the status of different components in **System Components**. You may need to wait for some components to be up and running if you want to use related services. + +## Enable Pluggable Components (Optional) + +This guide is used only for the minimal installation by default. For more information about how to enable other components in KubeSphere, see [Enable Pluggable Components](../../pluggable-components/). + +## Code Demonstration + diff --git a/content/en/docs/v3.4/quick-start/wordpress-deployment.md b/content/en/docs/v3.4/quick-start/wordpress-deployment.md new file mode 100644 index 000000000..8eab4069b --- /dev/null +++ b/content/en/docs/v3.4/quick-start/wordpress-deployment.md @@ -0,0 +1,135 @@ +--- +title: "Compose and Deploy WordPress" +keywords: 'KubeSphere, Kubernetes, app, WordPress' +description: 'Learn the entire process of deploying an example application in KubeSphere, including credential creation, volume creation, and component settings.' +linkTitle: "Compose and Deploy WordPress" +weight: 2500 +--- + +## WordPress Introduction + +WordPress is a free and open-source content management system written in PHP, allowing users to build their own websites. A complete WordPress application includes the following Kubernetes objects with MySQL serving as the backend database. + +![WordPress](/images/docs/v3.3/quickstart/wordpress-deployment/WordPress.png) + +## Objective + +This tutorial demonstrates how to create an application (WordPress as an example) in KubeSphere and access it outside the cluster. + +## Prerequisites + +An account `project-regular` is needed with the role of `operator` assigned in one of your projects (the user has been invited to the project). For more information, see [Create Workspaces, Projects, Users and Roles](../create-workspace-and-project/). + +## Estimated Time + +About 15 minutes. + +## Hands-on Lab + +### Step 1: Create Secrets + +#### Create a MySQL Secret + +The environment variable `WORDPRESS_DB_PASSWORD` is the password to connect to the database in WordPress. In this step, you need to create a Secret to store the environment variable that will be used in the MySQL Pod template. + +1. Log in to the KubeSphere console using the account `project-regular`. Go to the detail page of `demo-project` and navigate to **Configuration**. In **Secrets**, click **Create** on the right. + +2. Enter the basic information (for example, name it `mysql-secret`) and click **Next**. On the next page, select **Default** for **Type** and click **Add Data** to add a key-value pair. Enter the Key (`MYSQL_ROOT_PASSWORD`) and Value (`123456`) and click **√** in the lower-right corner to confirm. When you finish, click **Create** to continue. + +#### Create a WordPress Secret + +Follow the same steps above to create a WordPress Secret `wordpress-secret` with the key `WORDPRESS_DB_PASSWORD` and value `123456`. Secrets created display in the list. + +### Step 2: Create a PVC + +1. Go to **Persistent Volume Claims** under **Storage** and click **Create**. + +2. Enter the basic information of the Persistent Volume Claims (PVC), for example, `wordpress-pvc`, and click **Next**. + +3. In **Storage Settings**, you need to choose an available **Storage Class**, and set **Access Mode** and **Volume Capacity**. You can use the default value directly. Click **Next** to continue. + +4. For **Advanced Settings**, you do not need to add extra information for this step and click **Create** to finish. + +### Step 3: Create an application + +#### Add MySQL backend components + +1. Navigate to **Apps** under **Application Workloads**, select **Composed Apps** and click **Create**. + +2. Enter the basic information (for example, `wordpress` for **Name**) and click **Next**. + +3. In **Service Settings**, click **Create Service** to create a service in the app. + +4. Select **Stateful Service** to define the service type. + +5. Enter the name for the stateful service (for example, **mysql**) and click **Next**. + +6. In **Containers**, click **Add Container**. + +7. Enter `mysql:5.6` in the search box, press **Enter** and click **Use Default Ports**. After that, do not click **√** in the lower-right corner as the setting is not finished yet. + + {{< notice note >}} + +In **Advanced Settings**, make sure the memory limit is no less than 1000 Mi or MySQL may fail to start due to a lack of memory. + +{{}} + +1. Scroll down to **Environment Variables** and click **From secret**. Enter the name `MYSQL_ROOT_PASSWORD` and choose the resource `mysql-secret` and the key `MYSQL_ROOT_PASSWORD` created in the previous step. Click **√** after you finish and **Next** to continue. + +2. Click **Add Persistent Volume Claim Template** under **Storage Settings**. Enter the PVC name prefix (`mysql`) and **Mount Path** (mode: `ReadAndWrite`, path: `/var/lib/mysql`). + + Click **√** after you finish and click **Next** to continue. + +3. In **Advanced Settings**, you can click **Create** directly or set other options based on your needs. + +#### Add the WordPress frontend component + +12. In **Services** under **Application Workloads**, click **Create** again and select **Stateless Service** this time. Enter the name `wordpress` and click **Next**. + +13. Similar to previous steps, click **Add Container**, enter `wordpress:4.8-apache` in the search box, press **Enter** and click **Use Default Ports**. + +14. Scroll down to **Environment Variables** and click **From secret**. Two environment variables need to be added here. Enter the values as follows. + + - For `WORDPRESS_DB_PASSWORD`, choose `wordpress-secret` and `WORDPRESS_DB_PASSWORD` created in Task 1. + + - Click **Add Environment Variable**, and enter `WORDPRESS_DB_HOST` and `mysql` for the key and value. + + {{< notice warning >}} + +For the second environment variable added here, the value must be the same as the name you set for MySQL in step 5. Otherwise, WordPress cannot connect to the corresponding database of MySQL. + +{{}} + + Click **√** to save it and **Next** to continue. + +1. Under **Storage Settings**, click **Mount Volume**, and then click **Select Persistent Volume Claim**. + +2. Select `wordpress-pvc` created in the previous step, set the mode as `ReadAndWrite`, and enter `/var/www/html` as its mount path. Click **√** to save it, and then click **Next** to continue. + +3. In **Advanced Settings**, you can click **Create** directly or set other options based on your needs. + +4. The frontend component is also set now. Click **Next** to continue. + +5. You can set route rules (Ingress) here or click **Create** directly. + +6. The app will display in the list after you create it. + +### Step 4: Verify resources + +In **Workloads**, check the status of `wordpress-v1` and `mysql-v1` in **Deployments** and **StatefulSets** respectively. If they are running properly, it means WordPress has been created successfully. + +### Step 5: Access WordPress using NodePort + +1. To access the Service outside the cluster, in the navigation pane on the left, click **Application Workloads > Services** first. Click the three dots on the right of `wordpress` and select **Edit External Access**. + +2. Select `NodePort` for **Access Method** and click **OK**. + +3. Click the Service and you can see the port is exposed. + +4. Access this application at `{Node IP}:{NodePort}`. + + {{< notice note >}} + +Make sure the port is opened in your security groups before you access the Service. + +{{}} \ No newline at end of file diff --git a/content/en/docs/v3.4/reference/_index.md b/content/en/docs/v3.4/reference/_index.md new file mode 100644 index 000000000..a30065be7 --- /dev/null +++ b/content/en/docs/v3.4/reference/_index.md @@ -0,0 +1,14 @@ +--- +title: "Reference" +description: "The glossary used by KubeSphere and how to use the KubeSphere API to build your own application" +layout: "second" + +linkTitle: "Reference" + +weight: 17000 + +icon: "/images/docs/v3.3/docs.svg" + +--- + +This chapter contains the glossary that is often used in KubeSphere and the information about the KubeSphere API. diff --git a/content/en/docs/v3.4/reference/api-changes/_index.md b/content/en/docs/v3.4/reference/api-changes/_index.md new file mode 100644 index 000000000..9df9ff639 --- /dev/null +++ b/content/en/docs/v3.4/reference/api-changes/_index.md @@ -0,0 +1,12 @@ +--- +title: "API Changes" +description: "API Change Overview" +layout: "single" + +linkTitle: "API Changes" + +weight: 17300 + +icon: "/images/docs/v3.3/docs.svg" + +--- diff --git a/content/en/docs/v3.4/reference/api-changes/logging.md b/content/en/docs/v3.4/reference/api-changes/logging.md new file mode 100644 index 000000000..2158ff499 --- /dev/null +++ b/content/en/docs/v3.4/reference/api-changes/logging.md @@ -0,0 +1,27 @@ +--- +title: "Logging" +keywords: 'Kubernetes, KubeSphere, API, Logging' +description: 'The API changes of the component **logging** in KubeSphere 3.3.' +linkTitle: "Logging" +weight: 17310 +--- + +The API changes of the component **logging** in KubeSphere 3.3. + +## Time Format + +The time format of query parameters must be Unix timestamps (the number of seconds that has elapsed since the Unix epoch). Milliseconds are no longer allowed. The change affects the parameters `start_time` and `end_time`. + +## Deprecated APIs + +The following APIs are removed: + +- GET /workspaces/{workspace} +- GET /namespaces/{namespace} +- GET /namespaces/{namespace}/workloads/{workload} +- GET /namespaces/{namespace}/pods/{pod} +- The whole log setting API group + +## Fluent Operator + +In KubeSphere 3.3, the whole log setting APIs are removed from the KubeSphere core since the project Fluent Operator is refactored in an incompatible way. Please refer to [Fluent Operator docs](https://github.com/kubesphere/fluentbit-operator) for how to configure log collection in KubeSphere 3.3. \ No newline at end of file diff --git a/content/en/docs/v3.4/reference/api-changes/monitoring.md b/content/en/docs/v3.4/reference/api-changes/monitoring.md new file mode 100644 index 000000000..fea133380 --- /dev/null +++ b/content/en/docs/v3.4/reference/api-changes/monitoring.md @@ -0,0 +1,110 @@ +--- +title: "Monitoring" +keywords: 'Kubernetes, KubeSphere, API, Monitoring' +description: 'The API changes of the component **monitoring** in KubeSphere v3.3.2.' +linkTitle: "Monitoring" +weight: 17320 +--- + +## API Version + +The monitoring API version is bumped to `v1alpha3`. + +## Time Format + +The time format of query parameters must be in Unix timestamps (the number of seconds that has elapsed since the Unix epoch). Decimals are no longer allowed. The change affects the parameters `start`, `end` and `time`. + +## Deprecated Metrics + +In KubeSphere 3.3, the metrics on the left have been renamed to the ones on the right. + +|V2.0|V3.3| +|---|---| +|workload_pod_cpu_usage | workload_cpu_usage| +|workload_pod_memory_usage| workload_memory_usage| +|workload_pod_memory_usage_wo_cache | workload_memory_usage_wo_cache| +|workload_pod_net_bytes_transmitted | workload_net_bytes_transmitted| +|workload_pod_net_bytes_received | workload_net_bytes_received| + +The following metrics have been deprecated and removed. + +|Deprecated Metrics| +|---| +|cluster_workspace_count| +|cluster_account_count| +|cluster_devops_project_count| +|coredns_up_sum| +|coredns_cache_hits| +|coredns_cache_misses| +|coredns_dns_request_rate| +|coredns_dns_request_duration| +|coredns_dns_request_duration_quantile| +|coredns_dns_request_by_type_rate| +|coredns_dns_request_by_rcode_rate| +|coredns_panic_rate| +|coredns_proxy_request_rate| +|coredns_proxy_request_duration| +|coredns_proxy_request_duration_quantile| +|prometheus_up_sum| +|prometheus_tsdb_head_samples_appended_rate| + +New metrics are introduced in KubeSphere 3.3. + +|New Metrics| +|---| +|kubesphere_workspace_count| +|kubesphere_user_count| +|kubesphere_cluser_count| +|kubesphere_app_template_count| + +## Response Fields + +In KubeSphere 3.3, the response fields `metrics_level`, `status` and `errorType` are removed. + +In addition, the field name `resource_name` has been replaced with the specific resource type names. These types are `node`, `workspace`, `namespace`, `workload`, `pod`, `container` and `persistentvolumeclaim`. For example, instead of `resource_name: node1`, you will get `node: node1`. See the example response below: + +```json +{ + "results":[ + { + "metric_name":"node_cpu_utilisation", + "data":{ + "resultType":"vector", + "result":[ + { + "metric":{ + "__name__":"node:node_cpu_utilisation:avg1m", + "node":"master" + }, + "value":[ + 1588841175.979, + "0.04587499999997817" + ] + }, + { + "metric":{ + "__name__":"node:node_cpu_utilisation:avg1m", + "node":"node1" + }, + "value":[ + 1588841175.979, + "0.06379166666670245" + ] + }, + { + "metric":{ + "__name__":"node:node_cpu_utilisation:avg1m", + "node":"node2" + }, + "value":[ + 1588841175.979, + "0.19008333333367772" + ] + } + ] + } + } + ] +} + +``` diff --git a/content/en/docs/v3.4/reference/api-changes/notification.md b/content/en/docs/v3.4/reference/api-changes/notification.md new file mode 100644 index 000000000..692ec3dcf --- /dev/null +++ b/content/en/docs/v3.4/reference/api-changes/notification.md @@ -0,0 +1,15 @@ +--- +title: "Notification" +keywords: 'Kubernetes, KubeSphere, API, Notification' +description: 'The API changes of the component **notification** in KubeSphere v3.1.0.' +linkTitle: "Notification" +weight: 17330 +--- + +## API Version + +The notification API version is bumped to `v2alpha1`. + +## Deprecated API + +In KubeSphere 3.1.0, the APIs of version `v1` are deprecated:. diff --git a/content/en/docs/v3.4/reference/api-docs.md b/content/en/docs/v3.4/reference/api-docs.md new file mode 100644 index 000000000..e4e084d25 --- /dev/null +++ b/content/en/docs/v3.4/reference/api-docs.md @@ -0,0 +1,122 @@ +--- +title: "KubeSphere API" +keywords: 'Kubernetes, KubeSphere, API' +description: 'The REST API is the fundamental fabric of KubeSphere. This guide shows you how to access the KubeSphere API server.' +linkTitle: "KubeSphere API" +weight: 17200 +--- + +## Architecture + +The KubeSphere API server validates and configures data for API objects. The API Server services REST operations and provides the frontend to the cluster's shared state through which all other components interact. + +![ks-apiserver](/images/docs/v3.3/reference/kubesphere-api/ks-apiserver.png) + +## Use the KubeSphere API + +KubeSphere v3.0 moves the functionalities of **ks-apigateway** and **ks-account** into **ks-apiserver** to make the architecture more compact and clear. In order to use the KubeSphere API, you need to expose **ks-apiserver** to your client. + + +### Step 1: Expose the KubeSphere API service + +If you are going to access KubeSphere inside the cluster, you can skip the following section and just use the KubeSphere API server endpoint **`http://ks-apiserver.kubesphere-system.svc`**. + +On the other hand, you need to expose the KubeSphere API server endpoint outside the cluster first. + +There are many ways to expose a Kubernetes service. For demonstration purposes, this example uses `NodePort`. Change the service type of `ks-apiserver` to `NodePort` by using the following command. + +```bash +$ kubectl -n kubesphere-system patch service ks-apiserver -p '{"spec":{"type":"NodePort"}}' +$ kubectl -n kubesphere-system get svc +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +etcd ClusterIP 10.233.34.220 2379/TCP 44d +ks-apiserver NodePort 10.233.15.31 80:31407/TCP 49d +ks-console NodePort 10.233.3.45 80:30880/TCP 49d +``` + +Now, you can access `ks-apiserver` outside the cluster through the URL like `http://[node ip]:31407`, where `[node ip]` means the IP address of any node in your cluster. + +### Step 2: Generate a token + +You need to identify yourself before making any call to the API server. The following example uses the password `P#$$w0rd`. The user needs to issue a request to generate a token as below: + +```bash +curl -X POST -H 'Content-Type: application/x-www-form-urlencoded' \ + 'http://[node ip]:31407/oauth/token' \ + --data-urlencode 'grant_type=password' \ + --data-urlencode 'username=admin' \ + --data-urlencode 'password=P#$$w0rd' \ + --data-urlencode 'client_id=kubesphere' \ + --data-urlencode 'client_secret=kubesphere' +``` + +{{< notice note >}} + +Replace `[node ip]` with your actual IP address. You can configure client credentials in `ClusterConfiguration`, there is a default client credential `client_id` and `client_secret` is `kubesphere`. + +{{}} + +If the identity is correct, the server will respond as shown in the following output. `access_token` is the token to access the KubeSphere API Server. + +```json +{ + "access_token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6ImFkbWluIiwidWlkIjoiYTlhNjJmOTEtYWQ2Yi00MjRlLWIxNWEtZTFkOTcyNmUzNDFhIiwidG9rZW5fdHlwZSI6ImFjY2Vzc190b2tlbiIsImV4cCI6MTYwMDg1MjM5OCwiaWF0IjoxNjAwODQ1MTk4LCJpc3MiOiJrdWJlc3BoZXJlIiwibmJmIjoxNjAwODQ1MTk4fQ.Hcyf-CPMeq8XyQQLz5PO-oE1Rp1QVkOeV_5J2oX1hvU", + "token_type": "Bearer", + "refresh_token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6ImFkbWluIiwidWlkIjoiYTlhNjJmOTEtYWQ2Yi00MjRlLWIxNWEtZTFkOTcyNmUzNDFhIiwidG9rZW5fdHlwZSI6InJlZnJlc2hfdG9rZW4iLCJleHAiOjE2MDA4NTk1OTgsImlhdCI6MTYwMDg0NTE5OCwiaXNzIjoia3ViZXNwaGVyZSIsIm5iZiI6MTYwMDg0NTE5OH0.PerssCLVXJD7BuCF3Ow8QUNYLQxjwqC8m9iOkRRD6Tc", + "expires_in": 7200 +} +``` + +### Step 3: Make the call + +If you have everything you need to access the KubeSphere API server, make the call using the access token acquired above as shown in the following example to get the node list: + +```bash +$ curl -X GET -H "Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6ImFkbWluIiwidWlkIjoiYTlhNjJmOTEtYWQ2Yi00MjRlLWIxNWEtZTFkOTcyNmUzNDFhIiwidG9rZW5fdHlwZSI6ImFjY2Vzc190b2tlbiIsImV4cCI6MTYwMDg1MjM5OCwiaWF0IjoxNjAwODQ1MTk4LCJpc3MiOiJrdWJlc3BoZXJlIiwibmJmIjoxNjAwODQ1MTk4fQ.Hcyf-CPMeq8XyQQLz5PO-oE1Rp1QVkOeV_5J2oX1hvU" \ + -H 'Content-Type: application/json' \ + 'http://[node ip]:31407/kapis/resources.kubesphere.io/v1alpha3/nodes' + +{ + "items": [ + { + "metadata": { + "name": "node3", + "selfLink": "/api/v1/nodes/node3", + "uid": "dd8c01f3-76e8-4695-9e54-45be90d9ec53", + "resourceVersion": "84170589", + "creationTimestamp": "2020-06-18T07:36:41Z", + "labels": { + "a": "a", + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/os": "linux", + "gitpod.io/theia.v0.4.0": "available", + "gitpod.io/ws-sync": "available", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "node3", + "kubernetes.io/os": "linux", + "kubernetes.io/role": "new", + "node-role.kubernetes.io/worker": "", + "topology.disk.csi.qingcloud.com/instance-type": "Standard", + "topology.disk.csi.qingcloud.com/zone": "ap2a" + }, + "annotations": { + "csi.volume.kubernetes.io/nodeid": "{\"disk.csi.qingcloud.com\":\"i-icjxhi1e\"}", + "kubeadm.alpha.kubernetes.io/cri-socket": "/var/run/dockershim.sock", + "node.alpha.kubernetes.io/ttl": "0", + .... +``` + +{{< notice note >}} + +Replace `[node ip]` with your actual IP address. + +{{}} + +## API Reference + +The KubeSphere API swagger JSON file can be found in the repository https://github.com/kubesphere/kubesphere/tree/release-3.3/api. + +- KubeSphere specified the API [swagger json](https://github.com/kubesphere/kubesphere/blob/release-3.1/api/ks-openapi-spec/swagger.json) file. It contains all the APIs that are only applied to KubeSphere. +- KubeSphere specified the CRD [swagger json](https://github.com/kubesphere/kubesphere/blob/release-3.1/api/openapi-spec/swagger.json) file. It contains all the generated CRDs API documentation. It is same as Kubernetes API objects. + +You can explore the KubeSphere API document from [here](https://kubesphere.io/api/kubesphere) as well. diff --git a/content/en/docs/v3.4/reference/environment-requirements.md b/content/en/docs/v3.4/reference/environment-requirements.md new file mode 100644 index 000000000..9362efa76 --- /dev/null +++ b/content/en/docs/v3.4/reference/environment-requirements.md @@ -0,0 +1,37 @@ +--- +title: "Environment Requirements" +keywords: 'KubeSphere, Kubernetes, docker, cluster, configuration' +description: 'Understand the environment requirements for KubeSphere.' +linkTitle: "Environment Requirements" +weight: 17500 +--- + +This page summarizes the some requirements for installing and using KubeSphere. + +## System Requirements + +{{< content "common/system-requirements.md" >}} + +## Dependency Requirements + +{{< content "common/dependency-requirements.md" >}} + +## Container Runtime Requirements + +{{< content "common/container-runtime-requirements.md" >}} + +## Network Requirements + +{{< content "common/network-requirements.md" >}} + +## Supported Kubernetes Versions + +{{< content "common/kubernetes-versions.md" >}} + +## Supported CSI Plugins + +{{< content "common/csi-plugins.md" >}} + +## Supported Web Browsers for Accessing the Console + +![console-browser](/images/docs/v3.3/reference/environment-requirements/console-browser.png) \ No newline at end of file diff --git a/content/en/docs/v3.4/reference/glossary.md b/content/en/docs/v3.4/reference/glossary.md new file mode 100644 index 000000000..2337d3b2f --- /dev/null +++ b/content/en/docs/v3.4/reference/glossary.md @@ -0,0 +1,160 @@ +--- +title: "Glossary" +keywords: 'Kubernetes, KubeSphere, devops, docker, helm, jenkins, istio, prometheus, glossary' +description: 'The glossary used in KubeSphere.' +linkTitle: "Glossary" +weight: 17100 +--- + +This glossary includes general terms and technical terms that are specific to KubeSphere. + +## General + +- **Workspace**
+ A logical unit to organize a tenant's workload projects (i.e. Kubernetes namespaces) and DevOps projects. It also features access control of different resources and allows team members to share information. +- **System workspace**
A special place to organize system projects of KubeSphere, Kubernetes and optional components such as App Store, service mesh and DevOps. +- **Workspace member**
The users that are invited to a workspace who have certain permissions to work in the workspace. +- **Project**
+ A project in KubeSphere is a Kubernetes namespace. +- **Multi-cluster project**
+ A project whose workloads are deployed across multiple clusters. +- **Project member**
+ The users that are invited to a project who have certain permissions to work in the project. +- **Workbench**
The landing page for a tenant. It displays authorized resources that the tenant can access such as workspaces and projects. +- **Volume**
+ A KubeSphere Volume is a Kubernetes PersistentVolumeClaim (PVC). +- **Public cluster**
+ Cluster administrators can set cluster visibility so that a cluster is available to certain workspaces. A public cluster means all platform users can access the cluster, in which they are able to create and schedule resources. +- **KubeKey**
+ A brand-new installation tool developed in Go. It is able to install KubeSphere and Kubernetes together or install Kubernetes only. It supports the deployment of cloud-native add-ons (YAML or Chart) as it creates a cluster. It can also be used to scale and upgrade a cluster. +- **ks-installer**
+ The package to deploy KubeSphere on existing Kubernetes clusters. + +## Applications and Workloads + +- **OpenPitrix**
+ An open-source system to package, deploy and manage different types of apps. + +- **App template**
+ A template for a specific application that tenants can use to deploy new application instances. + +- **App repository**
+ A web accessible repository that hosts different app templates. + +- **App Store**
+ A public place for different tenants to share various applications. + +- **Deployment**
You use a Deployment to describe a desired state. The Kubernetes Deployment controller changes the actual state to the desired state at a controlled rate. In other words, a Deployment runs multiple replicas of an application and replaces any instances if they fail. For more information, see [Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/). + +- **StatefulSet**
A StatefulSet is the workload object used to manage stateful applications, such as MySQL. For more information, see [StatefulSets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/). + +- **DaemonSet**
A DaemonSet ensures that all (or some) nodes run a copy of a Pod, such as Fluentd and Logstash. For more information, see [DaemonSets](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/). + +- **Job**
A Job creates one or more Pods and ensures that a specified number of them successfully terminate. For more information, see [Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/). + +- **CronJob**
A CronJob creates Jobs on a time-based schedule. A CronJob object is like one line of a crontab (cron table) file. It runs a Job periodically on a given schedule. For more information, see [CronJob](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/). + +- **Service**
A Kubernetes Service is an abstraction object which defines a logical set of Pods and a policy by which to access them - sometimes called a microservice. For more information, see [Service](https://kubernetes.io/docs/concepts/services-networking/service/). + +## DevOps + +- **DevOps project**
+ A specific project for DevOps where you manage pipelines and credentials. + +- **SCM**
+ Source Control Management, such as GitHub and Gitlab. + +- **In-SCM**
+ The pipeline based on a Jenkinsfile that is hosted in SCM. + +- **Out-of-SCM**
+ The pipeline created through graphical editing panels without a Jenkinsfile. + +- **CI node**
+ A specific node for pipelines, S2I jobs or B2I jobs. Generally, applications often need to pull various dependencies during the building process. It might cause some issues like long pulling time, or unstable network causing failure. To build robust pipelines and speed up the building by using caches, you configure one or a set of CI nodes to which KubeSphere schedules the tasks of CI/CD pipelines and S2I/B2I. + +- **B2I**
+ Binary-to-Image. B2I is a toolkit and workflow for building reproducible container images from binary executables such as Jar, War, and binary packages. + +- **S2I**
Source-to-Image. S2I is a toolkit and workflow for building reproducible container images from source code. S2I produces ready-to-run images by injecting source code into a container image and letting the container prepare that source code for execution. + + +## Logging, Events and Auditing + +- **Exact query**
+ The method to search results that perfectly match the keyword entered. + +- **Fuzzy query**
The method to search results that partially match the keyword entered. + +- **Audit policy**
An audit policy defines a set of rules about what events should be recorded and what data they should include. + +- **Audit rule**
+ An auditing rule defines how to process auditing logs. + +- **Audit webhook**
+ The webhook that the Kubernetes auditing logs will be sent to. + +## Monitoring, Alert and Notification + +- **Cluster Status Monitoring**
+ The monitoring of related metrics such as node status, component status, CPU, memory, network, and disk of the cluster. + +- **Application Resource Monitoring**
+ The monitoring of application resources across the platform, such as the number of projects and DevOps projects, as well as the number of workloads and services of a specific type. + +- **Allocated CPU**
+ The metric is calculated based on the total CPU requests of Pods, for example, on a node. It represents the amount of CPU reserved for workloads on this node, even if workloads are using fewer CPU resources. + +- **Allocated Memory**
+ The metric is calculated based on the total memory requests of Pods, for example, on a node. It represents the amount of memory reserved for workloads on this node, even if workloads are using fewer memory resources. + +- **Log Collection**
+ The Log Collection function allows the system to collect container logs saved on volumes and send the logs to standard output. + +- **Notification Receiver**
+ The channel to receive notifications, such as email, DingTalk, WeCom, Slack, and webhook. + +## Network + +- **Route**
+ A KubeSphere Route is a Kubernetes Ingress. + +- **Gateway**
+ Before creating a route, you need to enable the Internet access gateway which forwards requests to the corresponding backend service. + +## Service Mesh + +- **Canary release**
+ A graceful application release method that introduces a new version of a service and tests it by sending a small percentage of traffic to it. At the same time, the old version is responsible for handling the rest of the traffic. If everything goes well, you can gradually increase the traffic sent to the new version, while simultaneously phasing out the old version. In the case of any occurring issues, it allows you to roll back to the previous version as you change the traffic percentage. + +- **Blue-green release/deployment**
+ A zero downtime application deployment where the new version can be deployed with the old one preserved. At any time, only one of the versions is active serving all the traffic, while the other one remains idle. If there is a problem with running, you can quickly roll back to the old version. + +- **Traffic mirroring**
+ A risk-free method of testing your app versions as it sends a copy of live traffic to a service that is being mirrored. It is also called shadowing. + +- **Application governance**
+ A switch to control the tracing of your application within a project. + +## Multi-cluster Management + +- **Host Cluster** **(H Cluster)**
+ The cluster that manages Member Clusters. The multi-cluster control plane is deployed on the Host Cluster. + +- **Member Cluster** **(M Cluster)**
+ A cluster serving as a member managed by the Host Cluster in a multi-cluster architecture. + +- **Direct connection**
+ A way to connect the Host Cluster and the Member Cluster when the kube-apiserver address of the Member Cluster is accessible on any node of the Host Cluster. + +- **Agent connection**
+ A way to connect the Host Cluster and the Member Cluster when the Host Cluster cannot access the Member Cluster directly. + +- **jwtSecret**
+ The secret needed for the Host Cluster and the Member Cluster to communicate with each other. + +- **Tower**
+ When you use agent connection, there is a proxy component installed on the Host Cluster and agent installed on the Member Cluster. Tower consists of both the proxy and the agent. + +- **Proxy service address**
+ The communication service address of the Host Cluster required by the tower agent in the Member Cluster when agent connection is adopted. diff --git a/content/en/docs/v3.4/reference/storage-system-installation/_index.md b/content/en/docs/v3.4/reference/storage-system-installation/_index.md new file mode 100644 index 000000000..1750262d2 --- /dev/null +++ b/content/en/docs/v3.4/reference/storage-system-installation/_index.md @@ -0,0 +1,12 @@ +--- +title: "Storage System Installation" +description: "Storage System Installation" +layout: "single" + +linkTitle: "Storage System Installation" + +weight: 17400 + +icon: "/images/docs/v3.3/docs.svg" + +--- diff --git a/content/en/docs/v3.4/reference/storage-system-installation/glusterfs-server.md b/content/en/docs/v3.4/reference/storage-system-installation/glusterfs-server.md new file mode 100644 index 000000000..3a2a6acc5 --- /dev/null +++ b/content/en/docs/v3.4/reference/storage-system-installation/glusterfs-server.md @@ -0,0 +1,516 @@ +--- + title: "Set up a GlusterFS Server" +keywords: 'Kubernetes, KubeSphere, GlusterFS' +description: 'How to set up a GlusterFS Server' +linkTitle: "Set up a GlusterFS Server" +weight: 17420 +--- + +As an open-source distributed file system, [GlusterFS](https://kubernetes.io/docs/concepts/storage/volumes/#glusterfs) allows you to mount `glusterfs` volumes to your Pods. If a `glusterfs` volume is pre-populated with data, they can be shared among your Pods in a Kubernetes cluster. + +This tutorial demonstrates how to configure GlusterFS on three server machines and install [Heketi](https://github.com/heketi/heketi) to manage your GlusterFS cluster. + +Once you have GlusterFS and Heketi set up, you can install GlusterFS on your client machine and use KubeKey to create a KubeSphere cluster with GlusterFS as a storage class. + +## Prepare GlusterFS Nodes + +There are three server machines of Ubuntu 16.04 in this example with each having one attached disk. + +| Hostname | IP Address | Operating System | Device | +| -------- | ----------- | ------------------------------------- | --------------- | +| server1 | 192.168.0.2 | Ubuntu 16.04, 4 Cores, 4 GB of Memory | /dev/vdd 300 GB | +| server2 | 192.168.0.3 | Ubuntu 16.04, 4 Cores, 4 GB of Memory | /dev/vdd 300 GB | +| server3 | 192.168.0.4 | Ubuntu 16.04, 4 Cores, 4 GB of Memory | /dev/vdd 300 GB | + +{{< notice note >}} + +- Heketi will be installed on `server1`, which provides a RESTful management interface to manage the lifecycle of GlusterFS volumes. You can install it on a separate machine as well. + +- Attach more block storage disks to your server machine if you need more storage space. +- Data will be saved to `/dev/vdd` (block device), which must be original without partitioning or formatting. + +{{}} + +## Set up Passwordless SSH Login + +### Configure root login + +1. Log in to `server1` and switch to the root user. + + ```bash + sudo -i + ``` + +2. Change the root user password: + + ```bash + passwd + ``` + + {{< notice note >}} + +Make sure password authentication is enabled in the file `/etc/ssh/sshd_config` (the value of `PasswordAuthentication` should be `yes`). + +{{}} + +3. Change the root user password of `server2` and `server3` as well. + +### Add hosts file entries + +1. Configure your DNS or edit the `/etc/hosts` file on all server machines to add their hostnames and IP addresses: + + ```bash + vi /etc/hosts + ``` + + ```txt + # hostname loopback address + 192.168.0.2 server1 + 192.168.0.3 server2 + 192.168.0.4 server3 + ``` + +2. Make sure you add the above entries to the `hosts` file of all server machines. + +### Configure passwordless SSH login + +1. On `server1`, create a key by running the following command. Press **Enter** directly for all the prompts. + + ```bash + ssh-keygen + ``` + +2. Copy the key to all GlusterFS nodes. + + ```bash + ssh-copy-id root@server1 + ``` + + ```bash + ssh-copy-id root@server2 + ``` + + ```bash + ssh-copy-id root@server3 + ``` + +3. Verify that you can access all server machines from `server1` through passwordless login. + + ```bash + ssh root@server1 + ``` + + ```bash + ssh root@server2 + ``` + + ```bash + ssh root@server3 + ``` + +## Install GlusterFS on All Server Machines + +1. On `server1`, run the following command to install `software-properties-common`. + + ```bash + apt-get install software-properties-common + ``` + +2. Add the community GlusterFS PPA. + + ```bash + add-apt-repository ppa:gluster/glusterfs-7 + ``` + +3. Make sure you are using the latest package. + + ```bash + apt-get update + ``` + +4. Install the GlusterFS server. + + ```bash + apt-get install glusterfs-server -y + ``` + +5. Make sure you run the above commands on `server2` and `server3` as well and verify the version on all machines. + + ```text + glusterfs -V + ``` + +{{< notice note >}} + +The above commands may be slightly different if you do no install GlusterFS on Ubuntu. For more information, see [the Gluster documentation](https://docs.gluster.org/en/latest/Install-Guide/Install/#installing-gluster). + +{{}} + +## Load Kernel Modules + +1. Run the following commands to load three necessary kernel modules on `server1`. + + ```bash + echo dm_thin_pool | sudo tee -a /etc/modules + ``` + + ```bash + echo dm_snapshot | sudo tee -a /etc/modules + ``` + + ```bash + echo dm_mirror | sudo tee -a /etc/modules + ``` + +2. Intall `thin-provisioning-tools`. + + ```bash + apt-get -y install thin-provisioning-tools + ``` + +3. Make sure you run the above commands on `server2` and `server3` as well. + +## Create a GlusterFS Cluster + +1. Run the following command on `server1` to add other nodes and create a cluster. + + ```bash + gluster peer probe server2 + ``` + + ``` + gluster peer probe server3 + ``` + +2. Verify that all nodes in the cluster are connected successfully. + + ```bash + gluster peer status + ``` + +3. Expected output: + + ```bash + Number of Peers: 2 + + Hostname: server2 + Uuid: e1192d6a-b65e-4ce8-804c-72d9425211a6 + State: Peer in Cluster (Connected) + + Hostname: server3 + Uuid: 9bd733e4-96d4-49d5-8958-6c947a2b4fa6 + State: Peer in Cluster (Connected) + ``` + +## Install Heketi + +As GlusterFS itself does not provide a way for API calls, you can install [Heketi](https://github.com/heketi/heketi) to manage the lifecycle of GlusterFS volumes with a RESTful API for Kubernetes calls. In this way, your Kubernetes cluster can dynamically provision GlusterFS volumes. Heketi v7.0.0 will be installed in this example. For more information about available Heketi versions, see its [Release Page](https://github.com/heketi/heketi/releases/). + +1. Download Heketi on `server1`. + + ```bash + wget https://github.com/heketi/heketi/releases/download/v7.0.0/heketi-v7.0.0.linux.amd64.tar.gz + ``` + + {{< notice note >}} + + You can also install Heketi on a separate machine. + + {{}} + +2. Unzip the file. + + ``` + tar -xf heketi-v7.0.0.linux.amd64.tar.gz + ``` + + ``` + cd heketi + ``` + + ``` + cp heketi /usr/bin + ``` + + ``` + cp heketi-cli /usr/bin + ``` + +3. Create a Heketi service file. + + ``` + vi /lib/systemd/system/heketi.service + ``` + + ``` + [Unit] + Description=Heketi Server + [Service] + Type=simple + WorkingDirectory=/var/lib/heketi + ExecStart=/usr/bin/heketi --config=/etc/heketi/heketi.json + Restart=on-failure + StandardOutput=syslog + StandardError=syslog + [Install] + WantedBy=multi-user.target + ``` + +4. Create Heketi folders. + + ```bash + mkdir -p /var/lib/heketi + ``` + + ``` + mkdir -p /etc/heketi + ``` + +5. Create a JSON file for Heketi configurations. + + ``` + vi /etc/heketi/heketi.json + ``` + + An example file: + + ```json + { + "_port_comment": "Heketi Server Port Number", + "port": "8080", + + "_use_auth": "Enable JWT authorization. Please enable for deployment", + "use_auth": false, + + "_jwt": "Private keys for access", + "jwt": { + "_admin": "Admin has access to all APIs", + "admin": { + "key": "123456" + }, + "_user": "User only has access to /volumes endpoint", + "user": { + "key": "123456" + } + }, + + "_glusterfs_comment": "GlusterFS Configuration", + "glusterfs": { + "_executor_comment": [ + "Execute plugin. Possible choices: mock, ssh", + "mock: This setting is used for testing and development.", + " It will not send commands to any node.", + "ssh: This setting will notify Heketi to ssh to the nodes.", + " It will need the values in sshexec to be configured.", + "kubernetes: Communicate with GlusterFS containers over", + " Kubernetes exec api." + ], + "executor": "ssh", + + "_sshexec_comment": "SSH username and private key file information", + "sshexec": { + "keyfile": "/root/.ssh/id_rsa", + "user": "root" + }, + + "_kubeexec_comment": "Kubernetes configuration", + "kubeexec": { + "host" :"https://kubernetes.host:8443", + "cert" : "/path/to/crt.file", + "insecure": false, + "user": "kubernetes username", + "password": "password for kubernetes user", + "namespace": "Kubernetes namespace", + "fstab": "Optional: Specify fstab file on node. Default is /etc/fstab" + }, + + "_db_comment": "Database file name", + "db": "/var/lib/heketi/heketi.db", + "brick_max_size_gb" : 1024, + "brick_min_size_gb" : 1, + "max_bricks_per_volume" : 33, + + + "_loglevel_comment": [ + "Set log level. Choices are:", + " none, critical, error, warning, info, debug", + "Default is warning" + ], + "loglevel" : "debug" + } + } + ``` + + {{< notice note >}} + + The account `admin` and its `key` value must be provided when you install GlusterFS as a storage class of your KubeSphere cluster. + + {{}} + +6. Start Heketi. + + ```bash + systemctl start heketi + ``` + +7. Check the status of Heketi. + + ```bash + systemctl status heketi + ``` + + If you can see `active (running)`, it means the installation is successful. Expected output: + + ```bash + ● heketi.service - Heketi Server + Loaded: loaded (/lib/systemd/system/heketi.service; disabled; vendor preset: enabled) + Active: active (running) since Tue 2021-03-09 13:04:30 CST; 4s ago + Main PID: 9282 (heketi) + Tasks: 8 + Memory: 6.5M + CPU: 62ms + CGroup: /system.slice/heketi.service + └─9282 /usr/bin/heketi --config=/etc/heketi/heketi.json + + Mar 09 13:04:30 server1 systemd[1]: Started Heketi Server. + Mar 09 13:04:30 server1 heketi[9282]: Heketi v7.0.0 + Mar 09 13:04:30 server1 heketi[9282]: [heketi] INFO 2021/03/09 13:04:30 Loaded ssh executor + Mar 09 13:04:30 server1 heketi[9282]: [heketi] INFO 2021/03/09 13:04:30 Adv: Max bricks per volume set to 33 + Mar 09 13:04:30 server1 heketi[9282]: [heketi] INFO 2021/03/09 13:04:30 Adv: Max brick size 1024 GB + Mar 09 13:04:30 server1 heketi[9282]: [heketi] INFO 2021/03/09 13:04:30 Adv: Min brick size 1 GB + Mar 09 13:04:30 server1 heketi[9282]: [heketi] INFO 2021/03/09 13:04:30 GlusterFS Application Loaded + Mar 09 13:04:30 server1 heketi[9282]: [heketi] INFO 2021/03/09 13:04:30 Started Node Health Cache Monitor + Mar 09 13:04:30 server1 heketi[9282]: Listening on port 8080 + ``` + +8. Enable Heketi. + + ```bash + systemctl enable heketi + ``` + + Expected output: + + ```bash + Created symlink from /etc/systemd/system/multi-user.target.wants/heketi.service to /lib/systemd/system/heketi.service. + ``` + +9. Create a topology configuration file for Heketi. It contains the information of clusters, nodes, and disks added to Heketi. + + ```bash + vi /etc/heketi/topology.json + ``` + + An example file: + + ```json + { + "clusters": [ + { + "nodes": [ + { + "node": { + "hostnames": { + "manage": [ + "192.168.0.2" + ], + "storage": [ + "192.168.0.2" + ] + }, + "zone": 1 + }, + "devices": [ + "/dev/vdd" + ] + }, + { + "node": { + "hostnames": { + "manage": [ + "192.168.0.3" + ], + "storage": [ + "192.168.0.3" + ] + }, + "zone": 1 + }, + "devices": [ + "/dev/vdd" + ] + }, + { + "node": { + "hostnames": { + "manage": [ + "192.168.0.4" + ], + "storage": [ + "192.168.0.4" + ] + }, + "zone": 1 + }, + "devices": [ + "/dev/vdd" + ] + } + ] + } + ] + } + ``` + + {{< notice note >}} + + - Replace the IP addresses above with your own. + - Add your own disk name for `devices`. + + {{}} + +10. Load the Heketi JSON file. + + ```bash + export HEKETI_CLI_SERVER=http://localhost:8080 + ``` + + ```bash + heketi-cli topology load --json=/etc/heketi/topology.json + ``` + + Expected output: + + ```bash + Creating cluster ... ID: 2d9e11adede04fe6d07cb81c5a1a7ea4 + Allowing file volumes on cluster. + Allowing block volumes on cluster. + Creating node 192.168.0.2 ... ID: 0a9f240ab6fd96ea014948c5605be675 + Adding device /dev/vdd ... OK + Creating node 192.168.0.3 ... ID: 2468086cadfee8ef9f48bc15db81c88a + Adding device /dev/vdd ... OK + Creating node 192.168.0.4 ... ID: 4c21b33d5c32029f5b7dc6406977ec34 + Adding device /dev/vdd ... OK + ``` + +11. The above output displays both your cluster ID and node ID. Run the following command to view your cluster information. + + ```bash + heketi-cli cluster info 2d9e11adede04fe6d07cb81c5a1a7ea4 # Use your own cluster ID. + ``` + + Expected output: + + ```bash + Cluster id: 2d9e11adede04fe6d07cb81c5a1a7ea4 + Nodes: + 0a9f240ab6fd96ea014948c5605be675 + 2468086cadfee8ef9f48bc15db81c88a + 4c21b33d5c32029f5b7dc6406977ec34 + Volumes: + + Block: true + + File: true + ``` + diff --git a/content/en/docs/v3.4/reference/storage-system-installation/nfs-server.md b/content/en/docs/v3.4/reference/storage-system-installation/nfs-server.md new file mode 100644 index 000000000..b69cec05e --- /dev/null +++ b/content/en/docs/v3.4/reference/storage-system-installation/nfs-server.md @@ -0,0 +1,102 @@ +--- +title: "Set up an NFS Server" +keywords: 'Kubernetes, KubeSphere, NFS Server' +description: 'How to set up an NFS Server' +linkTitle: "Set up an NFS Server" +weight: 17410 +--- + +KubeSphere supports [NFS-client Provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client) as a storage plugin. In order to use it, you must configure the NFS server in advance. With the NFS server in place, an NFS client mounts a directory on the server machine so that files residing on the NFS server are accessible to the NFS client. Namely, you need to create and export a directory that your client machine can access. + +Once your NFS server machine is ready, you can use [KubeKey](../../../installing-on-linux/introduction/kubekey/) to install NFS-client Provisioner by Helm charts together with Kubernetes and KubeSphere. The exported directory of your NFS server must be provided in your Chart configurations used by KubeKey during installation. + +{{< notice note >}} + +- You can also create the storage class of NFS-client after you install a KubeSphere cluster. +- NFS is incompatible with some applications, for example, Prometheus, which may result in pod creation failures. If you need to use NFS in the production environment, ensure that you have understood the risks. For more information, contact support@kubesphere.cloud. + +{{}} + +This tutorial demonstrates how to install the NFS server on Ubuntu 16.04 as an example. + +## Install and Configure an NFS Server + +### Step 1: Install the NFS kernel server + +To set up your server machine, you must install the NFS kernel server on it. + +1. Run the following command so that you will be using the latest package on Ubuntu for installation. + + ```bash + sudo apt-get update + ``` + +2. Install the NFS kernel server. + + ```bash + sudo apt install nfs-kernel-server + ``` + +### Step 2: Create an export directory + +Your NFS client will mount a directory on the server machine which has been exported by the NFS server. + +1. Run the following command to specify a mount folder name (for example, `/mnt/demo`). + + ```bash + sudo mkdir -p /mnt/demo + ``` + +2. For demonstration purposes, remove restrictive permissions of the folder so that all your clients can access the directory. + + ```bash + sudo chown nobody:nogroup /mnt/demo + ``` + + ```bash + sudo chmod 777 /mnt/demo + ``` + +### Step 3: Grant your client machine access to the NFS server + +1. Run the following command: + + ```bash + sudo nano /etc/exports + ``` + +2. Add your client information to the file. + + ```bash + /mnt/demo clientIP(rw,sync,no_subtree_check) + ``` + + If you have multiple client machines, you can add them all in the file. Alternatively, specify a subnet in the file so that all the clients within it can access the NFS server. For example: + + ```bash + /mnt/demo 192.168.0.0/24(rw,sync,no_subtree_check) + ``` + + {{< notice note >}} + + - `rw`: Read and write operations. The client machine will have both read and write access to the volume. + - `sync`: Changes will be written to disk and memory. + - `no_subtree_check`: Prevent subtree checking. It disables the security verification required for a client to mount permitted subdirectories. + + {{}} + +3. Save the file when you finish editing it. + +### Step 4: Apply the configuration + +1. Run the following command to export your shared directory. + + ```bash + sudo exportfs -a + ``` + +2. Restart the NFS kernel server. + + ```bash + sudo systemctl restart nfs-kernel-server + ``` diff --git a/content/en/docs/v3.4/release/_index.md b/content/en/docs/v3.4/release/_index.md new file mode 100644 index 000000000..fe9928658 --- /dev/null +++ b/content/en/docs/v3.4/release/_index.md @@ -0,0 +1,14 @@ +--- +title: "Release Notes" +description: "Release Notes of Different KubeSphere Versions" +layout: "second" + +linkTitle: "Release Notes" + +weight: 18000 + +icon: "/images/docs/v3.3/docs.svg" + +--- + +This chapter lists the release notes of all versions of KubeSphere, helping you gain a comprehensive understanding of upgrades and feature enhancements in every version release. diff --git a/content/en/docs/v3.4/release/release-v200.md b/content/en/docs/v3.4/release/release-v200.md new file mode 100644 index 000000000..93d303f89 --- /dev/null +++ b/content/en/docs/v3.4/release/release-v200.md @@ -0,0 +1,92 @@ +--- +title: "Release Notes for 2.0.0" +keywords: "kubernetes, docker, kubesphere, jenkins, istio, prometheus" +description: "KubeSphere release notes for 2.0.0." + +linkTitle: "Release Notes - 2.0.0" +weight: 18900 +--- + +KubeSphere 2.0.0 was released on **May 18th, 2019**. + +## What's New in 2.0.0 + +### Component Upgrades + +- Support Kubernetes [Kubernetes 1.13.5](https://github.com/kubernetes/kubernetes/releases/tag/v1.13.5) +- Integrate [QingCloud Cloud Controller](https://github.com/yunify/qingcloud-cloud-controller-manager). After installing load balancer, QingCloud load balancer can be created through KubeSphere console and the backend workload is bound automatically.  +- Integrate [QingStor CSI v0.3.0](https://github.com/yunify/qingstor-csi/tree/v0.3.0) storage plugin and support physical NeonSAN storage system. Support SAN storage service with high availability and high performance. +- Integrate [QingCloud CSI v0.2.1](https://github.com/yunify/qingcloud-csi/tree/v0.2.1) storage plugin and support many types of volume to create QingCloud block services. +- Harbor is upgraded to 1.7.5. +- GitLab is upgraded to 11.8.1. +- Prometheus is upgraded to 2.5.0. + +### Microservice Governance + +- Integrate Istio 1.1.1 and support visualization of service mesh management. +- Enable the access to the project's external websites and the application traffic governance. +- Provide built-in sample microservice [Bookinfo Application](https://istio.io/docs/examples/bookinfo/). +- Support traffic governance. +- Support traffic images. +- Provide load balancing of microservice based on Istio. +- Support canary release. +- Enable blue-green deployment. +- Enable circuit breaking. +- Enable microservice tracing. + +### DevOps (CI/CD Pipeline) + +- CI/CD pipeline provides email notification and supports the email notification during construction. +- Enhance CI/CD graphical editing pipelines, and more pipelines for common plugins and execution conditions. +- Provide source code vulnerability scanning based on SonarQube 7.4. +- Support [Source to Image](https://github.com/kubesphere/s2ioperator) feature. + +### Monitoring + +- Provide Kubernetes component independent monitoring page including etcd, kube-apiserver and kube-scheduler. +- Optimize several monitoring algorithm. +- Optimize monitoring resources. Reduce Prometheus storage and the disk usage up to 80%. + +### Logging + +- Provide unified log console in terms of tenant. +- Enable accurate and fuzzy retrieval. +- Support real-time and history logs. +- Support combined log query based on namespace, workload, Pod, container, key words and time limit.   +- Support detail page of single and direct logs. Pods and containers can be switched. +- [FluentBit Operator](https://github.com/kubesphere/fluentbit-operator) supports logging gathering settings: ElasticSearch, Kafka and Fluentd can be added, activated or turned off as log collectors. Before sending to log collectors, you can configure filtering conditions for needed logs. + +### Alerting and Notifications + +- Email notifications are available for cluster nodes and workload resources.  +- Notification rules: combined multiple monitoring resources are available. Different warning levels, detection cycle, push times and threshold can be configured. +- Time and notifiers can be set. +- Enable notification repeating rules for different levels. + +### Security Enhancement + +- Fix RunC Container Escape Vulnerability [Runc container breakout](https://log.qingcloud.com/archives/5127) +- Fix Alpine Docker's image Vulnerability [Alpine container shadow breakout](https://www.alpinelinux.org/posts/Docker-image-vulnerability-CVE-2019-5021.html) +- Support single and multi-login configuration items. +- Verification code is required after multiple invalid logins. +- Enhance passwords' policy and prevent weak passwords. +- Others security enhancements. + +### Interface Optimization + +- Optimize multiple user experience of console, such as the switch between DevOps project and other projects. +- Optimize many Chinese-English webpages. + +### Others + +- Support Etcd backup and recovery. +- Support regular cleanup of the docker's image. + +## Bugs Fixes + +- Fix delay updates of the resource and deleted pages. +- Fix the left dirty data after deleting the HPA workload. +- Fix incorrect Job status display. +- Correct resource quota, Pod usage and storage metrics algorithm. +- Adjust CPU usage percentages. +- many more bugfix diff --git a/content/en/docs/v3.4/release/release-v201.md b/content/en/docs/v3.4/release/release-v201.md new file mode 100644 index 000000000..d4f043ec7 --- /dev/null +++ b/content/en/docs/v3.4/release/release-v201.md @@ -0,0 +1,19 @@ +--- +title: "Release Notes for 2.0.1" +keywords: "kubernetes, docker, kubesphere, jenkins, istio, prometheus" +description: "KubeSphere release notes for 2.0.1." + +linkTitle: "Release Notes - 2.0.1" +weight: 18800 +--- + +KubeSphere 2.0.1 was released on **June 9th, 2019**. + +## Bug Fix + +- Fix the issue that CI/CD pipeline cannot recognize correct special characters in the code branch. +- Fix CI/CD pipeline's issue of being unable to check logs. +- Fix no-log data output problem caused by index document fragmentation abnormity during the log query. +- Fix prompt exceptions when searching for logs that do not exist. +- Fix the line-overlap problem on traffic governance topology and fixed invalid image strategy application. +- Many more bugfix diff --git a/content/en/docs/v3.4/release/release-v202.md b/content/en/docs/v3.4/release/release-v202.md new file mode 100644 index 000000000..d655a0678 --- /dev/null +++ b/content/en/docs/v3.4/release/release-v202.md @@ -0,0 +1,40 @@ +--- +title: "Release Notes for 2.0.2" +keywords: "kubernetes, docker, kubesphere, jenkins, istio, prometheus" +description: "KubeSphere release notes for 2.0.2." + +linkTitle: "Release Notes - 2.0.2" +weight: 18700 +--- + +KubeSphere 2.0.2 was released on July 9, 2019, which fixes known bugs and enhances existing feature. If you have installed versions of 1.0.x, 2.0.0 or 2.0.1, please download KubeSphere installer v2.0.2 to upgrade. + +## What's New in 2.0.2 + +### Enhanced Features + +- [API docs](../../reference/api-docs/) are available on the official website. +- Block brute-force attacks. +- Standardize the maximum length of resource names. +- Upgrade the gateway of project (Ingress Controller) to the version of 0.24.1. Support Ingress grayscale release. + +## List of Fixed Bugs + +- Fix the issue that traffic topology displays resources outside of this project. +- Fix the extra service component issue from traffic topology under specific circumstances. +- Fix the execution issue when "Source to Image" reconstructs images under specific circumstances. +- Fix the page display problem when "Source to Image" job fails. +- Fix the log checking problem when Pod status is abnormal. +- Fix the issue that disk monitor cannot detect some types of volume mounting, such as LVM volume. +- Fix the problem of detecting deployed applications. +- Fix incorrect status of application component. +- Fix host node's number calculation errors. +- Fix input data loss caused by switching reference configuration buttons when adding environmental variables. +- Fix the rerun job issue that the Operator role cannot execute. +- Fix the initialization issue on IPv4 environment uuid. +- Fix the issue that the log detail page cannot be scrolled down to check past logs. +- Fix wrong APIServer addresses in KubeConfig files. +- Fix the issue that DevOps project's name cannot be changed. +- Fix the issue that container logs cannot specify query time. +- Fix the saving problem on relevant repository's secrets under certain circumstances. +- Fix the issue that application's service component creation page does not have image registry's secrets. diff --git a/content/en/docs/v3.4/release/release-v210.md b/content/en/docs/v3.4/release/release-v210.md new file mode 100644 index 000000000..87ddd7758 --- /dev/null +++ b/content/en/docs/v3.4/release/release-v210.md @@ -0,0 +1,155 @@ +--- +title: "Release Notes for 2.1.0" +keywords: "kubernetes, docker, kubesphere, jenkins, istio, prometheus" +description: "KubeSphere release notes for 2.1.0." + +linkTitle: "Release Notes - 2.1.0" +weight: 18600 +--- + +KubeSphere 2.1.0 was released on Nov 11th, 2019, which fixes known bugs, adds some new features and brings some enhancement. If you have installed versions of 2.0.x, please upgrade it and enjoy the better user experience of v2.1.0. + +## Installer Enhancement + +- Decouple some components and make components including DevOps, service mesh, app store, logging, alerting and notification optional and pluggable +- Add Grafana (v5.2.4) as the optional component +- Upgrade Kubernetes to 1.15.5. It is also compatible with 1.14.x and 1.13.x +- Upgrade [OpenPitrix](https://openpitrix.io/) to v0.4.5 +- Upgrade the log forwarder Fluent Bit to v1.3.2 +- Upgrade Jenkins to v2.176.2 +- Upgrade Istio to 1.3.3 +- Optimize the high availability for core components + +## App Store + +### Features + +Support upload / test / review / deploy / publish/ classify / upgrade / deploy and delete apps, and provide nine built-in applications + +### Upgrade & Enhancement + +- The application repository configuration is moved from global to each workspace +- Support adding application repository to share applications in a workspace + +## Storage + +### Features + +- Support Local Volume with dynamic provisioning +- Provide the real-time monitoring feature for QingCloud block storage + +### Upgrade & Enhancement + +QingCloud CSI is adapted to CSI 1.1.0, supports upgrade, topology, create or delete a snapshot. It also supports creating PVC based on a snapshot + +### BUG Fixes + +Fix the StorageClass list display problem + +## Observability + +### Features + +- Support for collecting the file logs on the disk. It is used for the Pod which preserves the logs as the file on the disk +- Support integrating with external ElasticSearch 7.x +- Ability to search logs containinh Chinese words +- Add initContainer log display +- Ability to export logs +- Support for canceling the notification from alerting + +### UPGRADE & ENHANCEMENT + +- Improve the performance of log search +- Refine the hints when the logging service is abnormal +- Optimize the information when the monitoring metrics request is abnormal +- Support pod anti-affinity rule for Prometheus + +### BUG FIXES + +- Fix the mistaken highlights in the logs search result +- Fix log search not matching phrases correctly +- Fix the issue that log could not be retrieved for a deleted workload when it is searched by workload name +- Fix the issue where the results were truncated when the log is highlighted +- Fix some metrics exceptions: node `inode`, maximum pod tolerance +- Fix the issue with an incorrect number of alerting targets +- Fix filter failure problem of multi-metric monitoring +- Fix the problem of no logging and monitoring information on taint nodes (Adjust the toleration attributes of node-exporter and fluent-bit to deploy on all nodes by default, ignoring taints) + +## DevOps + +### Features + +- Add support for branch exchange and git log export in S2I +- Add B2I, ability to build Binary/WAR/JAR package and release to Kubernetes +- Support dependency cache for the pipeline, S2I, and B2I +- Support delete Kubernetes resource action in `kubernetesDeploy` step +- Multi-branch pipeline supports trigger other pipelines when create or delete the branch + +### Upgrades & Enhancement + +- Support BitBucket in the pipeline +- Support Cron script validation in the pipeline +- Support Jenkinsfile syntax validation +- Support custom the link in SonarQube +- Support event trigger build in the pipeline +- Optimize the agent node selection in the pipeline +- Accelerate the start speed of the pipeline +- Use dynamical volume as the work directory of the Agent in the pipeline, also contributes to Jenkins [#589](https://github.com/jenkinsci/kubernetes-plugin/pull/598) +- Optimize the Jenkins kubernetesDeploy plugin, add more resources and versions (v1, app/v1, extensions/v1beta1、apps/v1beta2、apps/v1beta1、autoscaling/v1、autoscaling/v2beta1、autoscaling/v2beta2、networking.k8s.io/v1、batch/v1beta1、batch/v2alpha1), also contributes to Jenkins [#614](https://github.com/jenkinsci/kubernetes-plugin/pull/614) +- Add support for PV, PVC, Network Policy in deploy step of the pipeline, also contributes to Jenkins [#87](https://github.com/jenkinsci/kubernetes-cd-plugin/pull/87)、[#88](https://github.com/jenkinsci/kubernetes-cd-plugin/pull/88) + +### Bug Fixes + +- Fix the issue that 400 bad request in GitHub Webhook +- incompatible change: DevOps Webhook's URL prefix is changed from `/webhook/xxx` to `/devops_webhook/xxx` + +## Authentication and authority + +### Features + +Support sync and authenticate with AD account + +### Upgrades & Enhancement + +- Reduce the LDAP component's RAM consumption +- Add protection against brute force attacks + +### Bug Fixes + +- Fix LDAP connection pool leak +- Fix the issue where users could not be added in the workspace +- Fix sensitive data transmission leaks + +## User Experience + +### Features + +Ability to wizard management of projects (namespace) that are not assigned to the workspace + +### Upgrades & Enhancement + +- Support bash-completion in web kubectl +- Optimize the host information display +- Add connection test of the email server +- Add prompt on resource list page +- Optimize the project overview page and project basic information +- Simplify the service creation process +- Simplify the workload creation process +- Support real-time status update in the resource list +- optimize YAML editing +- Support image search and image information display +- Add the pod list to the workload page +- Update the web terminal theme +- Support container switching in container terminal +- Optimize Pod information display, and add Pod scheduling information +- More detailed workload status display + +### Bug Fixes + +- Fix the issue where the default request resource of the project is displayed incorrectly +- Optimize the web terminal design, make it much easier to find +- Fix the Pod status update delay +- Fix the issue where a host could not be searched based on roles +- Fix DevOps project quantity error in workspace detail page +- Fix the issue with the workspace list pages not turning properly +- Fix the problem of inconsistent result ordering after query on workspace list page diff --git a/content/en/docs/v3.4/release/release-v211.md b/content/en/docs/v3.4/release/release-v211.md new file mode 100644 index 000000000..270575452 --- /dev/null +++ b/content/en/docs/v3.4/release/release-v211.md @@ -0,0 +1,122 @@ +--- +title: "Release Notes for 2.1.1" +keywords: "kubernetes, docker, kubesphere, jenkins, istio, prometheus" +description: "KubeSphere release notes for 2.1.1." + +linkTitle: "Release Notes - 2.1.1" +weight: 18500 +--- + +KubeSphere 2.1.1 was released on Feb 23rd, 2020, which has fixed known bugs and brought some enhancements. For the users who have installed versions of 2.0.x or 2.1.0, make sure to read the user manual carefully about how to upgrade before doing that, and feel free to raise any questions on [GitHub](https://github.com/kubesphere/kubesphere/issues). + +## What's New in 2.1.1 + +## Installer + +### UPGRADE & ENHANCEMENT + +- Support Kubernetes v1.14.x、v1.15.x、v1.16.x、v1.17.x,also solve the issue of Kubernetes API Compatibility#[1829](https://github.com/kubesphere/kubesphere/issues/1829) +- Simplify the steps of installation on existing Kubernetes, and remove the step of specifying cluster's CA certification, also specifying Etcd certification is no longer mandatory step if users don't need Etcd monitoring metrics +- Backup the configuration of CoreDNS before upgrading + +### BUG FIXES + +- Fix the issue of importing apps to App Store + +## App Store + +### UPGRADE & ENHANCEMENT + +- Upgrade OpenPitrix to v0.4.8 + +### BUG FIXES + +- Fix the latest version display issue for the published app #[1130](https://github.com/kubesphere/kubesphere/issues/1130) +- Fix the column name display issue in app approval list page #[1498](https://github.com/kubesphere/kubesphere/issues/1498) +- Fix the searching issue by app name/workspace #[1497](https://github.com/kubesphere/kubesphere/issues/1497) +- Fix the issue of failing to create app with the same name of previously deleted app #[1821](https://github.com/kubesphere/kubesphere/pull/1821) #[1564](https://github.com/kubesphere/kubesphere/issues/1564) +- Fix the issue of failing to deploy apps in some cases #[1619](https://github.com/kubesphere/kubesphere/issues/1619) #[1730](https://github.com/kubesphere/kubesphere/issues/1730) + +## Storage + +### UPGRADE & ENHANCEMENT + +- Support CSI plugins of Alibaba Cloud and Tencent Cloud + +### BUG FIXES + +- Fix the paging issue of storage class list page #[1583](https://github.com/kubesphere/kubesphere/issues/1583) #[1591](https://github.com/kubesphere/kubesphere/issues/1591) +- Fix the issue that the value of imageFeatures parameter displays '2' when creating ceph storage class #[1593](https://github.com/kubesphere/kubesphere/issues/1593) +- Fix the issue that search filter fails to work in persistent volumes list page #[1582](https://github.com/kubesphere/kubesphere/issues/1582) +- Fix the display issue for abnormal persistent volume #[1581](https://github.com/kubesphere/kubesphere/issues/1581) +- Fix the display issue for the persistent volumes which associated storage class is deleted #[1580](https://github.com/kubesphere/kubesphere/issues/1580) #[1579](https://github.com/kubesphere/kubesphere/issues/1579) + +## Observability + +### UPGRADE & ENHANCEMENT + +- Upgrade Fluent Bit to v1.3.5 #[1505](https://github.com/kubesphere/kubesphere/issues/1505) +- Upgrade Kube-state-metrics to v1.7.2 +- Upgrade Elastic Curator to v5.7.6 #[517](https://github.com/kubesphere/ks-installer/issues/517) +- Fluent Bit Operator support to detect the location of soft linked docker log folder dynamically on host machines +- Fluent Bit Operator support to manage the instance of Fluent Bit by declarative configuration through updating the ConfigMap of Operator +- Fix the issue of sort orders in alert list page #[1397](https://github.com/kubesphere/kubesphere/issues/1397) +- Adjust the metric of container memory usage with 'container_memory_working_set_bytes' + +### BUG FIXES + +- Fix the lag issue of container logs #[1650](https://github.com/kubesphere/kubesphere/issues/1650) +- Fix the display issue that some replicas of workload have no logs on container detail log page #[1505](https://github.com/kubesphere/kubesphere/issues/1505) +- Fix the compatibility issue of Curator to support ElasticSearch 7.x #[517](https://github.com/kubesphere/ks-installer/issues/517) +- Fix the display issue of container log page during container initialization #[1518](https://github.com/kubesphere/kubesphere/issues/1518) +- Fix the blank node issue when these nodes are resized #[1464](https://github.com/kubesphere/kubesphere/issues/1464) +- Fix the display issue of components status in monitor center, to keep them up-to date #[1858](https://github.com/kubesphere/kubesphere/issues/1858) +- Fix the wrong monitoring targets number in alert detail page #[61](https://github.com/kubesphere/console/issues/61) + +## DevOps + +### BUG FIXES + +- Fix the issue of UNSTABLE state not visible in the pipeline #[1428](https://github.com/kubesphere/kubesphere/issues/1428) +- Fix the format issue of KubeConfig in DevOps pipeline #[1529](https://github.com/kubesphere/kubesphere/issues/1529) +- Fix the image repo compatibility issue in B2I, to support image repo of Alibaba Cloud #[1500](https://github.com/kubesphere/kubesphere/issues/1500) +- Fix the paging issue in DevOps pipelines' branches list page #[1517](https://github.com/kubesphere/kubesphere/issues/1517) +- Fix the issue of failing to display pipeline configuration after modifying it #[1522](https://github.com/kubesphere/kubesphere/issues/1522) +- Fix the issue of failing to download generated artifact in S2I job #[1547](https://github.com/kubesphere/kubesphere/issues/1547) +- Fix the issue of [data loss occasionally after restarting Jenkins]( https://ask.kubesphere.io/forum/d/283-jenkins) +- Fix the issue that only 'PR-HEAD' is fetched when binding pipeline with GitHub #[1780](https://github.com/kubesphere/kubesphere/issues/1780) +- Fix 414 issue when updating DevOps credential #[1824](https://github.com/kubesphere/kubesphere/issues/1824) +- Fix wrong s2ib/s2ir naming issue from B2I/S2I #[1840](https://github.com/kubesphere/kubesphere/issues/1840) +- Fix the issue of failing to drag and drop tasks on pipeline editing page #[62](https://github.com/kubesphere/console/issues/62) + +## Authentication and Authorization + +### UPGRADE & ENHANCEMENT + +- Generate client certification through CSR #[1449](https://github.com/kubesphere/kubesphere/issues/1449) + +### BUG FIXES + +- Fix content loss issue in KubeConfig token file #[1529](https://github.com/kubesphere/kubesphere/issues/1529) +- Fix the issue that users with different permission fail to log in on the same browser #[1600](https://github.com/kubesphere/kubesphere/issues/1600) + +## User Experience + +### UPGRADE & ENHANCEMENT + +- Support to edit SecurityContext in workload editing page #[1530](https://github.com/kubesphere/kubesphere/issues/1530) +- Support to configure init container in workload editing page #[1488](https://github.com/kubesphere/kubesphere/issues/1488) +- Add support of startupProbe, also add periodSeconds, successThreshold, failureThreshold parameters in probe editing page #[1487](https://github.com/kubesphere/kubesphere/issues/1487) +- Optimize the status update display of Pods #[1187](https://github.com/kubesphere/kubesphere/issues/1187) +- Optimize the error message report on console #[43](https://github.com/kubesphere/console/issues/43) + +### BUG FIXES + +- Fix the status display issue for the Pods that are not under running status #[1187](https://github.com/kubesphere/kubesphere/issues/1187) +- Fix the issue that the added annotation can't be deleted when creating service of QingCloud LoadBalancer #[1395](https://github.com/kubesphere/kubesphere/issues/1395) +- Fix the display issue when selecting workload on service editing page #[1596](https://github.com/kubesphere/kubesphere/issues/1596) +- Fix the issue of failing to edit configuration file when editing 'Job' #[1521](https://github.com/kubesphere/kubesphere/issues/1521) +- Fix the issue of failing to update the service of 'StatefulSet' #[1513](https://github.com/kubesphere/kubesphere/issues/1513) +- Fix the issue of image searching for QingCloud and Alibaba Cloud image repos #[1627](https://github.com/kubesphere/kubesphere/issues/1627) +- Fix resource ordering issue with the same creation timestamp #[1750](https://github.com/kubesphere/kubesphere/pull/1750) +- Fix the issue of failing to edit configuration file when editing service #[41](https://github.com/kubesphere/console/issues/41) diff --git a/content/en/docs/v3.4/release/release-v300.md b/content/en/docs/v3.4/release/release-v300.md new file mode 100644 index 000000000..f8ee049c2 --- /dev/null +++ b/content/en/docs/v3.4/release/release-v300.md @@ -0,0 +1,207 @@ +--- +title: "Release Notes for 3.0.0" +keywords: "Kubernetes, KubeSphere, release-notes" +description: "KubeSphere release notes for 3.0.0." + +linkTitle: "Release Notes - 3.0.0" +weight: 18400 +--- + +## How to get v3.0.0 + +- [Install KubeSphere v3.0.0 on Linux](../../installing-on-linux/) +- [Install KubeSphere v3.0.0 on existing Kubernetes](../../installing-on-kubernetes/) + +## Release Notes + +## **Installer** + +### FEATURES + +- A brand-new installer: [KubeKey](https://github.com/kubesphere/kubekey), v1.0.0, which is a turnkey solution to installing Kubernetes with KubeSphere on different platforms. It is more easy to use and reduces the dependency on OS environment + +### UPGRADES & ENHANCEMENTS + +- Be compatible with Kubernetes 1.15.x, 1.16.x, 1.17.x and 1.18.x for [ks-installer](https://github.com/kubesphere/ks-installer), v3.0.0 +- [KubeKey](https://github.com/kubesphere/kubekey) officially supports Kubernetes 1.15.12, 1.16.13, 1.17.9 and 1.18.6 (Please avoid using KubeKey to install Kubernetes 1.15 to 1.15.5 and 1.16 to 1.16.2, because Kubernetes has an [API validation issue](https://github.com/kubernetes/kubernetes/issues/83778)) +- Add support for EulerOS, UOS and KylinOS +- Add support for Kunpeng and Phytium CPU +- Use ClusterConfiguration CRD to store ks-installer's configuration instead of ConfigMap + +## **Cluster Management** + +### FEATURES + +- Support management of multiple Kubernetes clusters +- Support Federated Deployment and Federated StatefulSet across multiple clusters + +## **Observability** + +### FEATURES + +- Support custom monitoring for 3rd-party application metrics in KubeSphere console +- Add Kubernetes and KubeSphere auditing support, including audit event archiving, searching and alerting +- Add Kubernetes event management support, including Kubernetes event archiving, searching and alerting based by [kube-events](https://github.com/kubesphere/kube-events) +- Add tenant control to auditing and support Kubernetes event searching. A tenant user can only search his or her own auditing logs and Kubernetes events +- Support archiving auditing logs and Kubernetes events to Elasticsearch, Kafka or Fluentd +- Add multi-tenant notification support by [Notification Manager](https://github.com/kubesphere/notification-manager) +- Support Alertmanager v0.21.0 + +### UPGRADES & ENHANCEMENTS + +- Upgrade Prometheus Operator to v0.38.3 (KubeSphere customized version ) +- Upgrade Prometheus to v2.20.1 +- Upgrade Node Exporter to v0.18.1 +- Upgrade kube-state-metrics to v1.9.6 +- Upgrade metrics server to v0.3.7 +- metrics-server is enabled by default (Disabled if KubeSphere is installed on existing Kubernetes) +- Upgrade Fluent Bit Operator to v0.2.0 +- Upgrade Fluent Bit to v1.4.6 +- Significantly improve log searching performance +- Allow platform admins to view pod logs from deleted namespaces +- Adjust the display style of log searching results in Toolbox +- Optimize log collection configuration for log files on pod's volume + +### BUG FIXES + +- Fix time skew in metric graphs for newly created namespaces (#[2868](https://github.com/kubesphere/kubesphere/issues/2868)) +- Fix workload-level alerting not working as expected (#[2834](https://github.com/kubesphere/kubesphere/issues/2834)) +- Fix no metric data for NotReady nodes + +## **DevOps** + +### FEATURES + +- Refactor DevOps framework, and use CRDs to manage DevOps resources + +### UPGRADES & ENHANCEMENTS + +- Remove Sonarqube from installer default packages, and support for external Sonarqube + +### BUG FIXES + +- Fix the issue that DevOps permission data is missing in a very limited number of cases + +- Fix the issue that the Button in the Stage page doesn't work (#[449](https://github.com/kubesphere/console/issues/449)) +- Fix the issue that the parameterized pipeline failed to send the parameter's value (#[2699](https://github.com/kubesphere/kubesphere/issues/2699)) + +## **App Store** + +### FEATURES + +- Support Helm V3 +- Support deploying application templates onto multiple clusters +- Support application template upgrade +- Users can view events that occur during repository synchronization + +### UPGRADES & ENHANCEMENTS + +- Users can use the same application repository name + +- Support the application template which contains CRDs + +- Merge all OpenPitrix services into one service + +- Support HTTP basic authentication when adding an application repository + +- Add and upgrade below apps in App Store: + + | App Name | App Version | Chart Version | + | ---------------------- | ----------- | :------------ | + | AWS EBS CSI Driver | 0.5.0 | 0.3.0 | + | AWS EFS CSI Driver | 0.3.0 | 0.1.0 | + | AWS FSX CSI Driver | 0.1.0 | 0.1.0 | + | Elasticsearch Exporter | 1.1.0 | 3.3.0 | + | etcd | 3.3.12 | 0.1.1 | + | Harbor | 2.0.0 | 1.4.0 | + | Memcached | 1.5.20 | 3.2.3 | + | Minio master | | 5.0.26 | + | MongoDB | 4.2.1 | 0.3.0 | + | MySQL | 5.7.30 | 1.6.6 | + | MySQL Exporter | 0.11.0 | 0.5.3 | + | Nginx | 1.18.0 | 1.3.2 | + | PorterLB | 0.3-alpha | 0.1.3 | + | PostgreSQL | 12.0 | 0.3.2 | + | RabbitMQ | 3.8.1 | 0.3.0 | + | Redis | 5.0.5 | 0.3.2 | + | Redis Exporter | 1.3.4 | 3.4.1 | + | Tomcat | 8.5.41 | 0.4.1+1 | + +### BUG FIXES + +- Fix the issue of insufficient length of attachment IDs + +## **Network** + +### FEATURES + +- Support project network isolation by adding controllers to manage custom project network policies +- Support workspace network isolation +- Support adding, viewing, modifying and deleting native Kubernetes network policies + +## **Service Mesh** + +### FEATURES + +- Support cleaning Jaeger ES Indexer + +### UPGRADES & ENHANCEMENTS + +- Upgrade Istio to v1.4.8 + +## **Storage** + +### FEATURES + +- Support volume snapshot management +- Support storage capacity management +- Support volume monitoring + +## **Security** + +### FEATURES + +- Support LDAP and OAuth login +- Support custom workspace roles +- Support custom DevOps project roles +- Support access control across multiple clusters +- Support pod security context (#[1453](https://github.com/kubesphere/kubesphere/issues/1453)) + +### UPGRADES & ENHANCEMENTS + +- Simplify the role definition +- Optimize built-in roles + +### BUG FIXES + +- Fix the issue of login failure due to node clock skew + +## **Globalization** + +### FEATURES + +- Add support for new languages in the web console, including Spanish and Traditional Chinese + +## **User Experience** + +### FEATURES + +- Add support for history record viewing in Toolbox. Users can re-visit the Clusters/Workspaces/Projects/DevOps Projects that they recently visited, which can also be launched through shortcut keys + +### UPGRADES & ENHANCEMENTS + +- Refactor global navigation +- Refactor breadcrumbs in detail pages +- Refactor data watching in the resources list +- Simplify project creation +- Refactor composing application creation, and support creating a composing application through YAML +- Support workload revision through YAML +- Optimize the display of log query results +- Refactor app store deployment form +- Support helm chart schema (#[schema-files](https://helm.sh/docs/topics/charts/#schema-files)) + +### BUG FIXES + +- Fix the error when editing ingress annotations (#[1931](https://github.com/kubesphere/kubesphere/issues/1931)) +- Fix container probes when editing in workload edit template modal +- Fix XSS security problems of the server-side templates \ No newline at end of file diff --git a/content/en/docs/v3.4/release/release-v310.md b/content/en/docs/v3.4/release/release-v310.md new file mode 100644 index 000000000..4028daeb7 --- /dev/null +++ b/content/en/docs/v3.4/release/release-v310.md @@ -0,0 +1,175 @@ +--- +title: "Release Notes for 3.1.0" +keywords: "Kubernetes, KubeSphere, release notes" +description: "KubeSphere Release Notes for 3.1.0" +linkTitle: "Release Notes - 3.1.0" +weight: 18300 +--- + +## How to Install v3.1.0 + +- [Install KubeSphere v3.1.0 on Linux](https://github.com/kubesphere/kubekey) +- [Install KubeSphere v3.1.0 on an existing Kubernetes cluster](https://github.com/kubesphere/ks-installer) + +## New Features and Enhancements + +### Multi-cluster management + +- Simplified the steps to import Member Clusters with configuration validation (for example, `jwtSecret`) added. ([#3232](https://github.com/kubesphere/kubesphere/issues/3232)) +- Refactored the cluster controller and optimized the logic. ([#3234](https://github.com/kubesphere/kubesphere/issues/3234)) +- Upgraded the built-in web Kubectl, the version of which is now consistent with your Kubernetes cluster version. ([#3103](https://github.com/kubesphere/kubesphere/issues/3103)) +- Support customized resynchronization period of cluster controller. ([#3213](https://github.com/kubesphere/kubesphere/issues/3213)) +- Support lightweight installation of Member Clusters without components such as Redis and OpenLDAP. ([#3056](https://github.com/kubesphere/kubesphere/issues/3056)) +- Support high availability of Tower agent and server. ([#31](https://github.com/kubesphere/tower/issues/31)) + +### KubeEdge integration + +You can now enable KubeEdge in your cluster and manage edge nodes on the KubeSphere console. ([#3070](https://github.com/kubesphere/kubesphere/issues/3070)) + +- Support the installation of both cloud and edge modules of KubeEdge. +- Support adding KubeEdge through the KubeSphere console. +- Support the deployment of workloads on edge nodes. +- The logs and monitoring data of edge nodes can be collected. +- The network of edge nodes can be configured automatically as they join or leave a cluster. +- Taints can be added automatically as an edge node joins your cluster. +- You can use `nodeAffinity` to prevent cloud workloads (for example, DaemonSets) from being deployed to edge nodes. ([#1295](https://github.com/kubesphere/ks-installer/pull/1295), [#1297](https://github.com/kubesphere/ks-installer/pull/1297) and [#1300](https://github.com/kubesphere/ks-installer/pull/1300)) + +### Authorization and authentication management +- Added ServiceAccount management. ([#3211](https://github.com/kubesphere/kubesphere/issues/3211)) +- Improved the LDAP authentication plugin and added support for LDAPS and search filtering. ([#2970](https://github.com/kubesphere/kubesphere/issues/2970) and [#3766](https://github.com/kubesphere/kubesphere/issues/3766)) +- Improved the identify provider plugin and simplified the configuration of identify providers. ([#2970](https://github.com/kubesphere/kubesphere/issues/2970)) +- New users now see a prompt to change the old password when first logging in to KubeSphere. +- New users now need to confirm account information when logging in to KubeSphere through a third party. +- Support [CAS](https://apereo.github.io/cas/5.0.x/protocol/CAS-Protocol-Specification.html) as an available identity provider. ([#3047](https://github.com/kubesphere/kubesphere/issues/3047)) +- Support [OIDC](https://openid.net/specs/openid-connect-core-1_0.html) as an available identity provider. ([#2941](https://github.com/kubesphere/kubesphere/issues/2941)) +- Support IDaaS (Alibaba Cloud Identity as a Service) as an available identity provider. ([#2997](https://github.com/kubesphere/kubesphere/pull/2997)) + + +### Multi-tenant management +- Users now can configure departments in a workspace and assign users to the department. All users in the department can have the same role in a project or DevOps project. ([#2940](https://github.com/kubesphere/kubesphere/issues/2940)) +- Support workspace quotas which are used to manage resource usage of a workspace. ([#2939](https://github.com/kubesphere/kubesphere/issues/2939)) + +### Network + +- Added Kube-OVN. +- Support Calico IP pool management. ([#3057](https://github.com/kubesphere/kubesphere/issues/3057)) +- Support visual network topology. ([#3061](https://github.com/kubesphere/kubesphere/issues/3061) and [#583](https://github.com/kubesphere/kubesphere/issues/583)) +- A static IP address can be assigned to a Deployment now. ([#3058](https://github.com/kubesphere/kubesphere/issues/3058)) + +### Observability + +- Improved the current method of Prometheus integration. ([#3068](https://github.com/kubesphere/kubesphere/issues/3068) and [#1164](https://github.com/kubesphere/ks-installer/pull/1164); [guide](/docs/v3.3/faq/observability/byop/)) +- Added Thanos Ruler (Thanos v0.18.0) for the new alerting function. +- Upgraded Prometheus to v2.26.0. +- Upgraded Prometheus Operator to v0.42.1. +- Upgraded kube-state-metrics to v1.9.7. +- Upgraded metrics-server to v0.4.2. +- Upgraded Notification Manager to v1.0.0. ([Releases](https://github.com/kubesphere/notification-manager/releases)) +- Upgraded FluentBit Operator to v0.5.0. ([Releases](https://github.com/kubesphere/fluentbit-operator/releases)) +- Upgraded FluentBit to v1.6.9. +- Upgraded KubeEvents to v0.2.0. +- Upgraded Kube-Auditing to v0.1.2. + +#### Monitoring + +- Support configurations of ServiceMonitors on the KubeSphere console. ([#1031](https://github.com/kubesphere/console/pull/1301)) +- Support PromQL auto-completion and syntax highlighting. ([#1307](https://github.com/kubesphere/console/pull/1307)) +- Support customized monitoring at the cluster level. ([#3193](https://github.com/kubesphere/kubesphere/pull/3193)) +- Changed the HTTP ports of kube-scheduler and kube-controller-manager from `10251` and `10252` to the HTTPS ports of `10259` and `10257` respectively for data scraping. ([#1367](https://github.com/kubesphere/ks-installer/pull/1367)) + +#### Alerting + +- Prometheus-style alerting rules can be managed and configured now. ([#3181](https://github.com/kubesphere/kubesphere/pull/3181)) +- Support alerting rules at the platform level and project level. ([#3181](https://github.com/kubesphere/kubesphere/pull/3181)) +- Support real-time display of alerting rule status. ([#3181](https://github.com/kubesphere/kubesphere/pull/3181)) + +#### Notification management + +- Added new notification channels on the console including DingTalk, WeCom, Slack and Webhook to receive notifications. ([#3066](https://github.com/kubesphere/kubesphere/issues/3066)) + +#### Logging + +- Logs can be exported to [Loki](https://github.com/kubesphere/fluentbit-operator/blob/master/docs/plugins/output/loki.md) now. ([#39](https://github.com/kubesphere/fluentbit-operator/pull/39)) +- Support kubelet, Docker, and containerd log collection. ([#38](https://github.com/kubesphere/fluentbit-operator/pull/38)) +- Support [auditd](https://github.com/kubesphere/fluentbit-operator#auditd) log collection. ([#45](https://github.com/kubesphere/fluentbit-operator/pull/45)) + +### DevOps + +- Improved the error message of pipeline cron text. ([#2919](https://github.com/kubesphere/kubesphere/issues/2919)) +- Improved the interactive experience of creating pipelines. ([#1283](https://github.com/kubesphere/console/issues/1283)) +- Improved S2I error messages. ([#140](https://github.com/kubesphere/s2ioperator/issues/140)) +- Upgraded Jenkins to 2.249.1. ([#2618](https://github.com/kubesphere/kubesphere/issues/2618)) +- Added an approval mechanism for pipelines. Accounts with necessary permissions can review pipelines and approve them. ([#2483](https://github.com/kubesphere/kubesphere/issues/2483) and [#3006](https://github.com/kubesphere/kubesphere/issues/3006)) +- Multiple pipelines can be started and run at the same time. ([#1811](https://github.com/kubesphere/kubesphere/issues/1811)) +- The pipeline status can be viewed on the DevOps project **Pipelines** page. ([#3007](https://github.com/kubesphere/kubesphere/issues/3007)) +- Pipelines can be triggered by tags now. ([#3051](https://github.com/kubesphere/kubesphere/issues/3051)) +- Support pipeline cloning. ([#3053](https://github.com/kubesphere/kubesphere/issues/3053)) +- Support GitLab multi-branch pipelines. ([#3100](https://github.com/kubesphere/kubesphere/issues/3100)) +- Support S2I Webhook. ([#6](https://github.com/kubesphere/s2ioperator/issues/6)) +- Jenkins in KubeSphere is now deployed as a distribution ([#2182](https://github.com/kubesphere/kubesphere/issues/2182)). + +### App Store and apps + +- The reason for app template deployment failure is now available for check. ([#3036](https://github.com/kubesphere/kubesphere/issues/3036), [#3001](https://github.com/kubesphere/kubesphere/issues/3001) and [#2951](https://github.com/kubesphere/kubesphere/issues/2951)) +- Support batch deleting of app templates. +- Support editing of deployed app templates. +- A new built-in app [XenonDB](https://github.com/radondb/xenondb) is now available in the App Store. Based on MySQL, it is an open-source tool that provides high availability cluster solutions. + +### Microservices governance + +- The KubeSphere console now displays the traffic direction of microservices in composing apps. ([#3153](https://github.com/kubesphere/kubesphere/issues/3153)) +- Support Kiali. Users can now manage Istio directly through Kiali. ([#3106](https://github.com/kubesphere/kubesphere/issues/3106)) +- Support NGINX Ingress Gateway monitoring with NGINX Ingress Controller metrics added. ([#1205](https://github.com/kubesphere/ks-installer/pull/1205)) +- A route can be added now when an app is created. ([#1426](https://github.com/kubesphere/console/issues/1426)) +- Upgraded Istio to 1.6.10. ([#3326](https://github.com/kubesphere/kubesphere/issues/3236)) + +### Metering and billing + +- Users now can see resource consumption of different resources at different levels, such as clusters, workspaces, and apps. ([#3062](https://github.com/kubesphere/kubesphere/issues/3062)) +- The resource price can be set in a ConfigMap to provide billing information of resources on the console. + +### KubeSphere console UI + +- Improved homepage loading. +- Improved pipeline configurations on graphical editing panels. +- Improved error messages of pipeline status. +- Improved the way code repositories are filtered. +- Improved the configuration of node scheduling policies. +- Improved deployment configurations. +- Relocated the built-in web kubectl to a separate page. + +## Major Technical Adjustments + +- Upgraded Kubernetes version dependencies from v1.17 to v1.18. ([#3274](https://github.com/kubesphere/kubesphere/issues/3274)) +- Upgraded the Prometheus client_golang version dependency to v1.5.1 and Prometheus version dependency to v1.8.2. ([#3097](https://github.com/kubesphere/kubesphere/pull/3097)) +- Refactored OpenPitrix based on CRDs and fixed some issues caused by the original architecture. ([#3036](https://github.com/kubesphere/kubesphere/issues/3036), [#3001](https://github.com/kubesphere/kubesphere/issues/3001), [#2995](https://github.com/kubesphere/kubesphere/issues/2995), [#2981](https://github.com/kubesphere/kubesphere/issues/2981), [#2954](https://github.com/kubesphere/kubesphere/issues/2954), [#2951](https://github.com/kubesphere/kubesphere/issues/2951), [#2783](https://github.com/kubesphere/kubesphere/issues/2783), [#2713](https://github.com/kubesphere/kubesphere/issues/2713), [#2700](https://github.com/kubesphere/kubesphere/issues/2700) and [#1903](https://github.com/kubesphere/kubesphere/issues/1903)) +- Refactored the previous alerting system with old alerting rules deprecated and removed its dependency on MySQL, Redis, etcd and other components. The new alerting system works based on Thanos Ruler and the build-in rules of Prometheus. Old alerting rules in KubeSphere v3.0.0 will be automatically changed to new alerting rules in KubeSphere v3.1.0 after upgrading. +- Refactored the notification system and removed its dependency on MySQL, Redis, etcd and other components. Notification channels are now configured for the entire cluster based on [Notification Manager](https://github.com/kubesphere/notification-manager/) in CRDs. In a multi-cluster architecture, if a notification channel is set for the Host cluster, it works for all Member Clusters. + +## Deprecated or Removed Features + +- The legacy alerting and notification system dependent on MySQL, Redis, etcd and other components is replaced by the new alerting and notification function. +- Changed the container terminal WebSocket API. ([#3041](https://github.com/kubesphere/kubesphere/issues/3041)) + +## Bug Fixes +- Fixed account login failures. ([#3132](https://github.com/kubesphere/kubesphere/issues/3132) and [#3357](https://github.com/kubesphere/kubesphere/issues/3357)) +- Fixed an issue where ANSI colors were not supported in container logs. ([#1322](https://github.com/kubesphere/kubesphere/issues/3044)) +- Fixed an issue where the Istio-related monitoring data of a microservices-based app could not be scraped if its project name started with `kube`. ([#3126](https://github.com/kubesphere/kubesphere/issues/3162)) +- Fixed an issue where viewers at different levels could use the container terminal. ([#3041](https://github.com/kubesphere/kubesphere/issues/3041)) +- Fixed a deletion failure issue of cascade resources in a multi-cluster architecture. ([#2912](https://github.com/kubesphere/kubesphere/issues/2912)) +- Fixed the incompatibility issue with Kubernetes 1.19 and above. ([#2928](https://github.com/kubesphere/kubesphere/issues/2928) and [#2928](https://github.com/kubesphere/kubesphere/issues/2928)) +- Fixed the invalid button to view Service monitoring data. ([#1394](https://github.com/kubesphere/console/issues/1394)) +- Fixed an issue where the grayscale release Service name could not be the same as the app label. ([#3128](https://github.com/kubesphere/kubesphere/issues/3128)) +- Fixed an issue where the status of microservices-based app could not be updated. ([#3241](https://github.com/kubesphere/kubesphere/issues/3241)) +- Fixed an issue where a workspace in a Member Cluster would be deleted if its name was the same as a workspace in the Host Cluster. ([#3169](https://github.com/kubesphere/kubesphere/issues/3169)) +- Fixed the connection failure between clusters if agent connection is used. ([#3202](https://github.com/kubesphere/kubesphere/pull/3203)) +- Fixed a multi-cluster status display issue. ([#3135](https://github.com/kubesphere/kubesphere/issues/3135)) +- Fixed the workload deployment failure in DevOps pipelines. ([#3112](https://github.com/kubesphere/kubesphere/issues/3112)) +- Fixed an issue where the account with the `admin` role in a DevOps project could not download artifacts. ([#3088](https://github.com/kubesphere/kubesphere/issues/3083)) +- Fixed an issue of DevOps pipeline creation failure. ([#3105](https://github.com/kubesphere/kubesphere/issues/3105)) +- Fixed an issue of triggering pipelines in a multi-cluster architecture. ([#2626](https://ask.kubesphere.io/forum/d/2626-webhook-jenkins)) +- Fixed an issue of data loss when a pipeline was edited. ([#1270](https://github.com/kubesphere/console/issues/1270)) +- Fixed a display issue of **Docker Container Register Credentials**. ([#1269](https://github.com/kubesphere/console/issues/1269)) +- Fixed a localization issue of Chinese unit in the code analysis result. ([#1278](https://github.com/kubesphere/console/issues/1278)) +- Fixed a display issue caused by Boolean values in Jenkinsfiles. ([#3043](https://github.com/kubesphere/kubesphere/issues/3043)) +- Fixed a display issue on the **Storage Management** page caused by the lack of `StorageClassName` in a PVC. ([#1109](https://github.com/kubesphere/ks-installer/issues/1109)) diff --git a/content/en/docs/v3.4/release/release-v311.md b/content/en/docs/v3.4/release/release-v311.md new file mode 100644 index 000000000..7a7908ccf --- /dev/null +++ b/content/en/docs/v3.4/release/release-v311.md @@ -0,0 +1,168 @@ +--- +title: "Release Notes for 3.1.1" +keywords: "Kubernetes, KubeSphere, release notes" +description: "KubeSphere Release Notes for 3.1.1" +linkTitle: "Release Notes - 3.1.1" +weight: 18200 +--- + +## User Experience + +### Enhancements + +- Add the function of deleting related resources in batches during workload deletion. [kubesphere/console#1933](https://github.com/kubesphere/console/pull/1933) +- Optimize dialog boxes. [kubesphere/console#2016](https://github.com/kubesphere/console/pull/2016) +- Add the container terminal function to projects in the `system-workspace` workspace. [kubesphere/console#1921](https://github.com/kubesphere/console/pull/1921) + +### Bug Fixes + +- Remove the function of editing external network access of headless Services on the Service management page. [kubesphere/console#2055](https://github.com/kubesphere/console/issues/2055) +- Fix the incorrect environment variable placeholders displayed in workload creation. [kubesphere/console#2008](https://github.com/kubesphere/console/pull/2008) +- Fix an issue where the login page is not displayed when users log out from certain pages. [kubesphere/console#2009](https://github.com/kubesphere/console/pull/2009) +- Fix an issue on the Pod template editing page, where the protocol drop-down list is not completely displayed. [kubesphere/console#1944](https://github.com/kubesphere/console/pull/1944) +- Fix a probe format verification issue in workload creation. [kubesphere/console#1941](https://github.com/kubesphere/console/pull/1941) +- Fix the incorrect DevOps project list displayed on the workspace member details page. [#1936](https://github.com/kubesphere/console/pull/1936) +- Fix incorrect and missing UI text. [kubesphere/console#1879](https://github.com/kubesphere/console/pull/1879) [kubesphere/console#1880](https://github.com/kubesphere/console/pull/1880) [kubesphere/console#1895](https://github.com/kubesphere/console/pull/1895) + +## Observability + +### Enhancements + +- Optimize port format restrictions in notification settings. [#1885](https://github.com/kubesphere/console/pull/1885) +- Add the function of specifying an existing Prometheus stack during installation. [#1528](https://github.com/kubesphere/ks-installer/pull/1528) + +### Bug Fixes + +- Fix the mail server synchronization error. [#1969](https://github.com/kubesphere/console/pull/1969) +- Fix an issue where the notification manager is reset after installer restart. [#1564](https://github.com/kubesphere/ks-installer/pull/1564) +- Fix an issue where the alerting policy cannot be deleted after the monitored object is deleted. [#2045](https://github.com/kubesphere/console/pull/2045) +- Add a default template for monitoring resource creation. [#2029](https://github.com/kubesphere/console/pull/2029) +- Fix an issue where containers display only outdated logs. [#1972](https://github.com/kubesphere/console/issues/1972) +- Fix the incorrect timestamp in alerting information. [#1978](https://github.com/kubesphere/console/pull/1978) +- Optimize parameter rules in alerting policy creation. [#1958](https://github.com/kubesphere/console/pull/1958) +- Fix an issue in custom monitoring, where metrics are not completely displayed due to the incorrect height of the view area. [#1989](https://github.com/kubesphere/console/pull/1989) +- Adjust the limits of the node exporter and kube-state-metrics. [#1537](https://github.com/kubesphere/ks-installer/pull/1537) +- Adjust the selector of the etcdHighNumberOfFailedGRPCRequests rule to prevent incorrect etcd alerts. [#1540](https://github.com/kubesphere/ks-installer/pull/1540) +- Fix an issue during system upgrade, where the events ruler component is not upgraded to the latest version. [#1594](https://github.com/kubesphere/ks-installer/pull/1594) +- Fix bugs of the kube_node_status_allocatable_memory_bytes and kube_resourcequota selectors. [#1560](https://github.com/kubesphere/ks-installer/pull/1560) + +## Service Mesh + +### Enhancements + +- Add a time range selector to the Tracing tab. [#2022](https://github.com/kubesphere/console/pull/2022) + +### Bug Fixes + +- Fix an issue where the Tracing tab is incorrectly displayed. [kubesphere/console#1890](https://github.com/kubesphere/console/pull/1890) + +## DevOps + +### Enhancements + +- Add the function of filtering branches by branch name in GitLab multi-branch pipelines. [kubesphere/console#2077](https://github.com/kubesphere/console/pull/2077) +- Rename the **Rerun** button on the b2i page to **Run**. [kubesphere/console#1981](https://github.com/kubesphere/console/pull/1981) + +### Bug Fixes + +- Fix an issue where credential status cannot be synchronized. [kubesphere/console#1956](https://github.com/kubesphere/console/pull/1956) +- Fix incorrect image tags in CI automatic image pushing. [kubesphere/console#2037](https://github.com/kubesphere/console/pull/2037) +- Fix an issue on the pipeline details page, where users cannot return to the previous page. [kubesphere/console#1996](https://github.com/kubesphere/console/pull/1996) +- Fix the inconsistent dialog box names of the image builder. [kubesphere/console#1922](https://github.com/kubesphere/console/pull/1922) +- Fix an issue in DevOps projects, where the updates are reset when kubeconfig credentials are created. [kubesphere/console#1990](https://github.com/kubesphere/console/pull/1990) +- Fix incorrect trusted users in multi-branch pipelines. [kubesphere/console#1987](https://github.com/kubesphere/console/pull/1987) +- Fix an issue in DevOps project pipelines, where stage labels are reset when other settings are changed but not saved. [kubesphere/console#1979](https://github.com/kubesphere/console/pull/1979) +- Fix the incorrect shell and labels displayed in pipelines. [kubesphere/console#1970](https://github.com/kubesphere/console/pull/1970) +- Fix incorrect information displayed in the pipeline basic information dialog box. [kubesphere/console#1955](https://github.com/kubesphere/console/pull/1955) +- Fix the API error generated when multi-branch pipelines are run. [kubesphere/console#1954](https://github.com/kubesphere/console/pull/1954) +- Fix an issue in pipelines, where webhook pushing settings do not take effect. [kubesphere/console#1953](https://github.com/kubesphere/console/pull/1953) +- Optimize the UI text of the drag-and-drop function in the pipeline editor. [kubesphere/console#1949](https://github.com/kubesphere/console/pull/1949) +- Add default build environment settings for service build from source code. [kubesphere/console#1993](https://github.com/kubesphere/console/pull/1993) + +## Authentication and Authorization + +### Bug Fixes + +- Fix the incorrect last login time of users. [kubesphere/console#1881](https://github.com/kubesphere/console/pull/1881) +- Fix an issue in workspaces, where the `admin` user cannot view resource quotas. [kubesphere/ks-installer#1551](https://github.com/kubesphere/ks-installer/pull/1551) [kubesphere/console#2062](https://github.com/kubesphere/console/pull/2062) +- Fix an issue where project members cannot connect to container terminals. [kubesphere/console#2002](https://github.com/kubesphere/console/pull/2002) +- Fix an issue where the administrator cannot be specified when a project is assigned to a workspace. [kubesphere/console#1961](https://github.com/kubesphere/console/pull/1961) +- Fix the duplicate permission names in workspace role creation. [kubesphere/console#1945](https://github.com/kubesphere/console/pull/1945) + +## Multi-tenant Management + +### Bug Fixes + +- Fix an issue where deleted roles can be associated with user groups. [#1899](https://github.com/kubesphere/console/pull/1899) [#3897](https://github.com/kubesphere/kubesphere/pull/3897) +- Fix an issue where deletion of long usernames can cause system collapse. [kubesphere/ks-installer#1450](https://github.com/kubesphere/ks-installer/pull/1450) [kubesphere/kubesphere#3796](https://github.com/kubesphere/kubesphere/pull/3796) +- Fix an error generated when project roles are bound to user groups. [kubesphere/console#1967](https://github.com/kubesphere/console/pull/1967) +- Fix incorrect workspace quotas displayed in multi-cluster environments. [kubesphere/console#2013](https://github.com/kubesphere/console/pull/2013) + +## Multi-cluster Management + +### Enhancements + +- Optimize the error message generated when the configuration of a member cluster is incorrect. [kubesphere/console#2084](https://github.com/kubesphere/console/pull/2084) [kubesphere/console#1965](https://github.com/kubesphere/console/pull/1965) + +### Bug Fixes + +- Fix an issue where node labels in member clusters cannot be obtained. [kubesphere/console#1927](https://github.com/kubesphere/console/pull/1927) +- Fix an issue on the project list page, where multi-cluster projects are not correctly identified. [kubesphere/console#2059](https://github.com/kubesphere/console/pull/2059) +- Fix the incorrect gateway status displayed in multi-cluster projects. [kubesphere/console#1939](https://github.com/kubesphere/console/pull/1939) + +## Metering and Billing + +### Enhancements + +- Optimize the metering and billing UI. [#1896](https://github.com/kubesphere/console/pull/1896) +- Change the color of a button on the metering and billing page. [#1934](https://github.com/kubesphere/console/pull/1934) + +### Bug Fixes + +- Fix an issue where OpenPitrix resources are not included in metering and billing. [#3871](https://github.com/kubesphere/kubesphere/pull/3871) +- Fix an error generated in metering and billing for the `system-workspace` workspace. [#2083](https://github.com/kubesphere/console/pull/2083) +- Fix an issue where projects are not completely displayed in the multi-cluster metering and billing list. [#2066](https://github.com/kubesphere/console/pull/2066) +- Fix an error on the billing page generated when a dependent cluster is not loaded. [#2054](https://github.com/kubesphere/console/pull/2054) + +## App Store + +### Enhancements + +- Optimize the UI layout and text on the app template creation page. [kubesphere/console#2012](https://github.com/kubesphere/console/pull/2012) [kubesphere/console#2063](https://github.com/kubesphere/console/pull/2063) +- Optimize the app template import function. [kubesphere/openpitrix-jobs#18](https://github.com/kubesphere/openpitrix-jobs/pull/18) +- Add the RadonDB PostgreSQL app to the App Store. [kubesphere/openpitrix-jobs#17](https://github.com/kubesphere/openpitrix-jobs/pull/17) + +## Security + +### Enhancements + +- Switch the branch of jwt-go to fix CVE-2020-26160. [#3991](https://github.com/kubesphere/kubesphere/pull/3991) +- Upgrade the Protobuf version to v1.3.2 to fix CVE-2021-3121. [#3944](https://github.com/kubesphere/kubesphere/pull/3944) +- Upgrade the Crypto version to the latest version to fix CVE-2020-29652. [#3997](https://github.com/kubesphere/kubesphere/pull/3997) +- Remove the `yarn.lock` file to prevent incorrect CVE bug reports. [#2024](https://github.com/kubesphere/console/pull/2024) + +### Bug Fixes + +- Fix an issue where container terminal can be accessed without authorization. [kubesphere/kubesphere#3956](https://github.com/kubesphere/kubesphere/pull/3956) + +## Storage + +### Enhancements + +- Improve the concurrency performance of the S3 uploader. [#4011](https://github.com/kubesphere/kubesphere/pull/4011) +- Add preset CSI Provisioner CR settings. [#1536](https://github.com/kubesphere/ks-installer/pull/1536) + +### Bug Fixes + +- Remove the invalid function of automatic storage class detection. [#3947](https://github.com/kubesphere/kubesphere/pull/3947) +- Fix incorrect storage resource units of project quotas.[#3973](https://github.com/kubesphere/kubesphere/issues/3973) + +## KubeEdge Integration + +### Enhancements + +- Add support for KubeEdge v1.6.2. [#1527](https://github.com/kubesphere/ks-installer/pull/1527) [#1542](https://github.com/kubesphere/ks-installer/pull/1542) + +### Bug Fixes + +- Fix the incorrect `advertiseAddress` setting of the KubeEdge CloudCore component. [#1561](https://github.com/kubesphere/ks-installer/pull/1561) \ No newline at end of file diff --git a/content/en/docs/v3.4/release/release-v320.md b/content/en/docs/v3.4/release/release-v320.md new file mode 100644 index 000000000..6bdfd0402 --- /dev/null +++ b/content/en/docs/v3.4/release/release-v320.md @@ -0,0 +1,177 @@ +--- +title: "Release Notes for 3.2.0" +keywords: "Kubernetes, KubeSphere, release notes" +description: "KubeSphere Release Notes for 3.2.0" +linkTitle: "Release Notes - 3.2.0" +weight: 18100 +--- + +## Multi-tenancy & Multi-cluster + +### New Features + +- Add support for setting the host cluster name in multi-cluster scenarios, which defaults to `host`. ([#4211](https://github.com/kubesphere/kubesphere/pull/4211), [@yuswift](https://github.com/yuswift)) +- Add support for setting the cluster name in single-cluster scenarios. ([#4220](https://github.com/kubesphere/kubesphere/pull/4220), [@yuswift](https://github.com/yuswift)) +- Add support for initializing the default cluster name by using globals.config. ([#2283](https://github.com/kubesphere/console/pull/2283), [@harrisonliu5](https://github.com/harrisonliu5)) +- Add support for scheduling Pod replicas across multiple clusters when creating a Deployment. ([#2191](https://github.com/kubesphere/console/pull/2191), [@weili520](https://github.com/weili520)) +- Add support for changing cluster weights on the project details page. ([#2192](https://github.com/kubesphere/console/pull/2192), [@weili520](https://github.com/weili520)) + +### Bug Fixes + +- Fix an issue in the **Create Deployment** dialog box in **Cluster Management**, where a multiple-cluster project can be selected by directly entering the project name. ([#2125](https://github.com/kubesphere/console/pull/2125), [@fuchunlan](https://github.com/fuchunlan)) +- Fix an error that occurs when workspace or cluster basic information is edited. ([#2188](https://github.com/kubesphere/console/pull/2188), [@xuliwenwenwen](https://github.com/xuliwenwenwen)) +- Remove information about deleted clusters on the **Basic Information** page of the host cluster. ([#2211](https://github.com/kubesphere/console/pull/2211), [@fuchunlan](https://github.com/fuchunlan)) +- Add support for sorting Services and editing Service settings in multi-cluster projects. ([#2167](https://github.com/kubesphere/console/pull/2167), [@harrisonliu5](https://github.com/harrisonliu5)) +- Refactor the gateway feature of multi-cluster projects. ([#2275](https://github.com/kubesphere/console/pull/2275), [@harrisonliu5](https://github.com/harrisonliu5)) +- Fix an issue where multi-cluster projects cannot be deleted after the workspace is deleted. ([#4365](https://github.com/kubesphere/kubesphere/pull/4365), [@wansir](https://github.com/wansir)) + +## Observability + +### New Features + +- Add support for HTTPS communication with Elasticsearch. ([#4176](https://github.com/kubesphere/kubesphere/pull/4176), [@wanjunlei](https://github.com/wanjunlei)) +- Add support for setting GPU types when scheduling GPU workloads. ([#4225](https://github.com/kubesphere/kubesphere/pull/4225), [@zhu733756](https://github.com/zhu733756)) +- Add support for validating notification settings. ([#4216](https://github.com/kubesphere/kubesphere/pull/4216), [@wenchajun](https://github.com/wenchajun)) +- Add support for importing Grafana dashboards by specifying a dashboard URL or by uploading a Grafana dashboard JSON file. KubeSphere automatically converts Grafana dashboards into KubeSphere cluster dashboards. ([#4194](https://github.com/kubesphere/kubesphere/pull/4194), [@zhu733756](https://github.com/zhu733756)) +- Add support for creating Grafana dashboards in **Custom Monitoring**. ([#2214](https://github.com/kubesphere/console/pull/2214), [@harrisonliu5](https://github.com/harrisonliu5)) +- Optimize the **Notification Configuration** feature. ([#2261](https://github.com/kubesphere/console/pull/2261), [@xuliwenwenwen](https://github.com/xuliwenwenwen)) +- Add support for setting a GPU limit in the **Edit Default Container Quotas** dialog box. ([#2253](https://github.com/kubesphere/console/pull/2253), [@weili520](https://github.com/weili520)) +- Add a default GPU monitoring dashboard.([#2580](https://github.com/kubesphere/console/pull/2580), [@harrisonliu5](https://github.com/harrisonliu5)) +- Add the **Leader** tag to the etcd leader on the etcd monitoring page. ([#2445](https://github.com/kubesphere/console/pull/2445), [@xuliwenwenwen](https://github.com/xuliwenwenwen)) + +### Bug Fixes + +- Fix the incorrect Pod information displayed on the **Alerting Messages** page and alerting policy details page. ([#2215](https://github.com/kubesphere/console/pull/2215), [@harrisonliu5](https://github.com/harrisonliu5)) + +## Authentication & Authorization + +### New Features + +- Add a built-in OAuth 2.0 server that supports OpenID Connect. ([#3525](https://github.com/kubesphere/kubesphere/pull/3525), [@wansir](https://github.com/wansir)) +- Remove information confirmation required when an external identity provider is used. ([#4238](https://github.com/kubesphere/kubesphere/pull/4238), [@wansir](https://github.com/wansir)) + +### Bug Fixes + +- Fix incorrect source IP addresses in the login history. ([#4331](https://github.com/kubesphere/kubesphere/pull/4331), [@wansir](https://github.com/wansir)) + +## Storage + +### New Features + +- Change the parameters that determine whether volume clone, volume snapshot, and volume expansion are allowed. ([#2199](https://github.com/kubesphere/console/pull/2199), [@weili520](https://github.com/weili520)) +- Add support for setting the volume binding mode during storage class creation. ([#2220](https://github.com/kubesphere/console/pull/2220), [@weili520](https://github.com/weili520)) +- Add the volume instance management feature. ([#2226](https://github.com/kubesphere/console/pull/2226), [@weili520](https://github.com/weili520)) +- Add support for multiple snapshot classes. Users are allowed to select a snapshot type when creating a snapshot. ([#2218](https://github.com/kubesphere/console/pull/2218), [@weili520](https://github.com/weili520)) + +### Bug Fixes + +- Change the volume access mode options on the **Storage Settings** tab page. ([#2348](https://github.com/kubesphere/console/pull/2348), [@live77](https://github.com/live77)) + +## Network + +### New Features + +- Add the Route sorting, routing rule editing, and annotation editing features on the Route list page. ([#2165](https://github.com/kubesphere/console/pull/2165), [@harrisonliu5](https://github.com/harrisonliu5)) +- Refactor the cluster gateway and project gateway features. ([#2262](https://github.com/kubesphere/console/pull/2262), [@harrisonliu5](https://github.com/harrisonliu5)) +- Add the service name auto-completion feature in routing rule creation. ([#2196](https://github.com/kubesphere/console/pull/2196), [@wengzhisong-hz](https://github.com/wengzhisong-hz)) +- DNS optimizations for ks-console: + - Use the name of the ks-apiserver Service directly instead of `ks-apiserver.kubesphere-system.svc` as the API URL. + - Add a DNS cache plugin (dnscache) for caching DNS results. ([#2435](https://github.com/kubesphere/console/pull/2435), [@live77](https://github.com/live77)) + +### Bug Fixes + +- Add a **Cancel** button in the **Enable Gateway** dialog box. ([#2245](https://github.com/kubesphere/console/pull/2245), [@weili520](https://github.com/weili520)) + +## Apps & App Store + +### New Features + +- Add support for setting a synchronization interval during app repository creation and editing. ([#2311](https://github.com/kubesphere/console/pull/2311), [@xuliwenwenwen](https://github.com/xuliwenwenwen)) +- Add a disclaimer in the App Store. ([#2173](https://github.com/kubesphere/console/pull/2173), [@xuliwenwenwen](https://github.com/xuliwenwenwen)) +- Add support for dynamically loading community-developed Helm charts into the App Store. ([#4250](https://github.com/kubesphere/kubesphere/pull/4250), [@xyz-li](https://github.com/xyz-li)) + +### Bug Fixes + +- Fix an issue where the value of `kubesphere_app_template_count` is always `0` when `GetKubeSphereStats` is called. ([#4130](https://github.com/kubesphere/kubesphere/pull/4130), [@Hanamichi](https://github.com/ks-ci-bohttps://github.com/x893675)) + +## DevOps + +### New Features + +- Set the system to hide the **Branch** column on the **Run Records** tab page when the current pipeline is not a multi-branch pipeline. ([#2379](https://github.com/kubesphere/console/pull/2379), [@live77](https://github.com/live77)) +- Add the feature of automatically loading Jenkins configurations from ConfigMaps. ([#75](https://github.com/kubesphere/ks-devops/pull/75), [@JohnNiang](https://github.com/JohnNiang)) +- Add support for triggering pipelines by manipulating CRDs instead of calling Jenkins APIs. ([#41](https://github.com/kubesphere/ks-devops/issues/41), [@rick](https://github.com/LinuxSuRen)) +- Add support for containerd-based pipelines. ([#171](https://github.com/kubesphere/ks-devops/pull/171), [@rick](https://github.com/LinuxSuRen)) +- Add Jenkins job metadata into pipeline annotations. ([#254](https://github.com/kubesphere/ks-devops/issues/254), [@JohnNiang](https://github.com/JohnNiang)) + +### Bug Fixes + +- Fix an issue where credential creation and update fails when the value length of a parameter is too long. ([#123](https://github.com/kubesphere/ks-devops/pull/123), [@shihh](https://github.com/shihaoH)) +- Fix an issue where ks-apiserver crashes when the **Run Records** tab page of a parallel pipeline is opened. ([#93](https://github.com/kubesphere/ks-devops/pull/93), [@JohnNiang](https://github.com/JohnNiang)) + +### Dependency Upgrades + +- Upgrade the version of Configuration as Code to 1.53. ([#42](https://github.com/kubesphere/ks-jenkins/pull/42), [@rick](https://github.com/LinuxSuRen)) + +## Installation + +### New Features + +- Add support for Kubernetes v1.21.5 and v1.22.1. ([#634](https://github.com/kubesphere/kubekey/pull/634), [@pixiake](https://github.com/pixiake)) +- Add support for automatically setting the container runtime. ([#738](https://github.com/kubesphere/kubekey/pull/738), [@pixiake](https://github.com/pixiake)) +- Add support for automatically updating Kubernetes certificates. ([#705](https://github.com/kubesphere/kubekey/pull/705), [@pixiake](https://github.com/pixiake)) +- Add support for installing Docker and conatinerd using a binary file. ([#657](https://github.com/kubesphere/kubekey/pull/657), [@pixiake](https://github.com/pixiake)) +- Add support for Flannel VxLAN and direct routing. ([#606](https://github.com/kubesphere/kubekey/pull/606), [@kinglong08](https://github.com/kinglong08)) +- Add support for deploying etcd using a binary file. ([#634](https://github.com/kubesphere/kubekey/pull/634), [@pixiake](https://github.com/pixiake)) +- Add an internal load balancer for deploying a high availability system. ([#567](https://github.com/kubesphere/kubekey/pull/567), [@24sama](https://github.com/24sama)) + +### Bug Fixes +- Fix a runtime.RawExtension serialization error. ([#731](https://github.com/kubesphere/kubekey/pull/731), [@pixiake](https://github.com/pixiake)) +- Fix the nil pointer error during cluster upgrade. ([#684](https://github.com/kubesphere/kubekey/pull/684), [@24sama](https://github.com/24sama)) +- Add support for updating certificates of Kubernetes v1.20.0 and later. ([#690](https://github.com/kubesphere/kubekey/pull/690), [@24sama](https://github.com/24sama)) +- Fix a DNS address configuration error. ([#637](https://github.com/kubesphere/kubekey/pull/637), [@pixiake](https://github.com/pixiake)) +- Fix a cluster creation error that occurs when no default gateway address exists. ([#661](https://github.com/kubesphere/kubekey/pull/661), [@liulangwa](https://github.com/liulangwa)) + +## User Experience +- Fix language mistakes and optimize wording. ([@Patrick-LuoYu](https://github.com/Patrick-LuoYu), [@Felixnoo](https://github.com/Felixnoo), [@serenashe](https://github.com/serenashe)) +- Fix incorrect function descriptions. ([@Patrick-LuoYu](https://github.com/Patrick-LuoYu), [@Felixnoo](https://github.com/Felixnoo), [@serenashe](https://github.com/serenashe)) +- Remove hard-coded and concatenated UI strings to better support UI localization and internationalization. ([@Patrick-LuoYu](https://github.com/Patrick-LuoYu), [@Felixnoo](https://github.com/Felixnoo), [@serenashe](https://github.com/serenashe)) +- Add conditional statements to display correct English singular and plural forms. ([@Patrick-LuoYu](https://github.com/Patrick-LuoYu), [@Felixnoo](https://github.com/Felixnoo), [@serenashe](https://github.com/serenashe)) +- Optimize the **Pod Scheduling Rules** area in the **Create Deployment** dialog box. ([#2170](https://github.com/kubesphere/console/pull/2170), [@qinyueshang](https://github.com/qinyueshang)) +- Fix an issue in the **Edit Project Quotas** dialog box, where the quota value changes to 0 when it is set to infinity. ([#2118](https://github.com/kubesphere/console/pull/2118), [@fuchunlan](https://github.com/fuchunlan)) +- Fix an issue in the **Create ConfigMap** dialog box, where the position of the hammer icon is incorrect when the data entry is empty. ([#2206](https://github.com/kubesphere/console/pull/2206), [@fuchunlan](https://github.com/fuchunlan)) +- Fix the incorrect default value of the time range drop-down list on the **Overview** page of projects. ([#2340](https://github.com/kubesphere/console/pull/2340), [@fuchunlan](https://github.com/fuchunlan)) +- Fix an error that occurs during login redirection, where redirection fails if the referer URL contains an ampersand (&). ([#2194](https://github.com/kubesphere/console/pull/2194), [@harrisonliu5](https://github.com/harrisonliu5)) +- Change **1 hours** to **1 hour** on the custom monitoring dashboard creation page. ([#2276](https://github.com/kubesphere/console/pull/2276), [@live77](https://github.com/live77)) +- Fix the incorrect Service types on the Service list page. ([#2178](https://github.com/kubesphere/console/pull/2178), [@xuliwenwenwen](https://github.com/xuliwenwenwen)) +- Fix the incorrect traffic data displayed in grayscale release job details. ([#2422](https://github.com/kubesphere/console/pull/2422), [@harrisonliu5](https://github.com/harrisonliu5)) +- Fix an issue in the **Edit Project Quotas** dialog box, where values with two decimal places and values greater than 8 cannot be set. ([#2127](https://github.com/kubesphere/console/pull/2127), [@weili520](https://github.com/weili520)) +- Allow the **About** dialog box to be closed by clicking other areas of the window. ([#2114](https://github.com/kubesphere/console/pull/2114), [@fuchunlan](https://github.com/fuchunlan)) +- Optimize the project title so that the cursor is changed into a hand when hovering over the project title. ([#2128](https://github.com/kubesphere/console/pull/2128), [@fuchunlan](https://github.com/fuchunlan)) +- Add support for creating ConfigMaps and Secrets in the **Environment Variables** area of the **Create Deployment** dialog box. ([#2227](https://github.com/kubesphere/console/pull/2227), [@harrisonliu5](https://github.com/harrisonliu5)) +- Add support for setting Pod annotations in the **Create Deployment** dialog box. ([#2129](https://github.com/kubesphere/console/pull/2129), [@harrisonliu5](https://github.com/harrisonliu5)) +- Allow domain names to start with an asterisk (*). ([#2432](https://github.com/kubesphere/console/pull/2432), [@wengzhisong-hz](https://github.com/wengzhisong-hz)) +- Add support for searching for Harbor images in the **Create Deployment** dialog box. ([#2132](https://github.com/kubesphere/console/pull/2132), [@wengzhisong-hz](https://github.com/wengzhisong-hz)) +- Add support for mounting volumes to init containers. ([#2166](https://github.com/kubesphere/console/pull/2166), [@Sigboom](https://github.com/Sigboom)) +- Remove the workload auto-restart feature in volume expansion. ([#4121](https://github.com/kubesphere/kubesphere/pull/4121), [@wenhuwang](https://github.com/wenhuwang)) + + +## APIs + +- Deprecate router API version v1alpha2. ([#4193](https://github.com/kubesphere/kubesphere/pull/4193), [@RolandMa1986](https://github.com/RolandMa1986)) +- Upgrade the pipeline API version from v2 to v3. ([#2323](https://github.com/kubesphere/console/pull/2323), [@harrisonliu5](https://github.com/harrisonliu5)) +- Change the Secret verification API. ([#2368](https://github.com/kubesphere/console/pull/2368), [@harrisonliu5](https://github.com/harrisonliu5)) +- Client credential is required for OAuth2 Token endpoint.([#3525](https://github.com/kubesphere/kubesphere/pull/3525),[@wansir](https://github.com/wansir)) + +## Component Changes + +- kubefed: v0.7.0 -> v0.8.1 +- prometheus-operator: v0.42.1 -> v0.43.2 +- notification-manager: v1.0.0 -> v1.4.0 +- fluent-bit: v1.6.9 -> v1.8.3 +- kube-events: v0.1.0 -> v0.3.0 +- kube-auditing: v0.1.2 -> v0.2.0 +- istio: 1.6.10 -> 1.11.1 +- jaeger: 1.17 -> 1.27 +- kiali: v1.26.1 -> v1.38 +- KubeEdge: v1.6.2 -> 1.7.2 \ No newline at end of file diff --git a/content/en/docs/v3.4/release/release-v321.md b/content/en/docs/v3.4/release/release-v321.md new file mode 100644 index 000000000..d11d74b2e --- /dev/null +++ b/content/en/docs/v3.4/release/release-v321.md @@ -0,0 +1,46 @@ +--- +title: "Release Notes for 3.2.1" +keywords: "Kubernetes, KubeSphere, release notes" +description: "KubeSphere Release Notes for 3.2.1" +linkTitle: "Release Notes - 3.2.1" +weight: 18099 +--- + +## Enhancements and Bug Fixes + +### Enhancements + +- Add support for filtering Pods by status. ([#4434](https://github.com/kubesphere/kubesphere/pull/4434), [@iawia002](https://github.com/iawia002), [#2620](https://github.com/kubesphere/console/pull/2620), [@weili520](https://github.com/weili520)) +- Add a tip in the image builder creation dialog box, which indicates that containerd is not supported. ([#2734](https://github.com/kubesphere/console/pull/2734), [@weili520](https://github.com/weili520)) +- Add information about available quotas in the **Edit Project Quotas** dialog box. ([#2619](https://github.com/kubesphere/console/pull/2619), [@weili520](https://github.com/weili520)) + +### Bug Fixes + +- Change the password verification rules to prevent passwords without uppercase letters. ([#4481](https://github.com/kubesphere/kubesphere/pull/4481), [@live77](https://github.com/live77)) +- Fix a login issue, where a user from an LDAP identity provider cannot log in if information about the user does not exist on KubeSphere. ([#4436](https://github.com/kubesphere/kubesphere/pull/4436), [@RolandMa1986](https://github.com/RolandMa1986)) +- Fix an issue where cluster gateway metrics cannot be obtained. ([#4457](https://github.com/kubesphere/kubesphere/pull/4457), [@RolandMa1986](https://github.com/RolandMa1986)) +- Fix incorrect access modes displayed in the volume list. ([#2686](https://github.com/kubesphere/console/pull/2686), [@weili520](https://github.com/weili520)) +- Remove the **Update** button on the **Gateway Settings** page. ([#2608](https://github.com/kubesphere/console/pull/2608), [@weili520](https://github.com/weili520)) +- Fix a display error of the time range selection drop-down list. ([#2715](https://github.com/kubesphere/console/pull/2715), [@weili520](https://github.com/weili520)) +- Fix an issue where Secret data text is not displayed correctly when the text is too long. ([#2600](https://github.com/kubesphere/console/pull/2600), [@weili520](https://github.com/weili520)) +- Fix an issue where StatefulSet creation fails when a volume template is mounted. ([#2730](https://github.com/kubesphere/console/pull/2730), [@weili520](https://github.com/weili520)) +- Fix an issue where cluster gateway information fails to be obtained when the user does not have permission to view cluster information. ([#2695](https://github.com/kubesphere/console/pull/2695), [@harrisonliu5](https://github.com/harrisonliu5)) +- Fix an issue where status and run records of pipelines are not automatically updated. ([#2594](https://github.com/kubesphere/console/pull/2594), [@harrisonliu5](https://github.com/harrisonliu5)) +- Add a tip for the kubernetesDeploy pipeline step, which indicates that the step is about to be deprecated. ([#2660](https://github.com/kubesphere/console/pull/2660), [@harrisonliu5](https://github.com/harrisonliu5)) +- Fix an issue where HTTP registry addresses of image registry Secrets cannot be validated. ([#2795](https://github.com/kubesphere/console/pull/2795), [@harrisonliu5](https://github.com/harrisonliu5)) +- Fix the incorrect URL of the Harbor image. ([#2784](https://github.com/kubesphere/console/pull/2784), [@harrisonliu5](https://github.com/harrisonliu5)) +- Fix a display error of log search results. ([#2598](https://github.com/kubesphere/console/pull/2598), [@weili520](https://github.com/weili520)) +- Fix an error in the volume instance YAML configuration. ([#2629](https://github.com/kubesphere/console/pull/2629), [@weili520](https://github.com/weili520)) +- Fix incorrect available workspace quotas displayed in the **Edit Project Quotas** dialog box. ([#2613](https://github.com/kubesphere/console/pull/2613), [@weili520](https://github.com/weili520)) +- Fix an issue in the **Monitoring** dialog box, where the time range selection drop-down list does not function properly. ([#2722](https://github.com/kubesphere/console/pull/2722), [@weili520](https://github.com/weili520)) +- Fix incorrect available quotas displayed in the Deployment creation page. ([#2668](https://github.com/kubesphere/console/pull/2668), [@weili520](https://github.com/weili520)) +- Change the documentation address to [kubesphere.io](https://kubesphere.io/) and [kubesphere.com.cn](https://kubesphere.com.cn/). ([#2628](https://github.com/kubesphere/console/pull/2628), [@weili520](https://github.com/weili520)) +- Fix an issue where Deployment volume settings cannot be modified. ([#2656](https://github.com/kubesphere/console/pull/2656), [@weili520](https://github.com/weili520)) +- Fix an issue where the container terminal cannot be accessed when the browser language is not English, Simplified Chinese, or Traditional Chinese. ([#2702](https://github.com/kubesphere/console/pull/2702), [@weili520](https://github.com/weili520)) +- Fix incorrect volume status displayed in the Deployment editing dialog box. ([#2622](https://github.com/kubesphere/console/pull/2622), [@weili520](https://github.com/weili520)) +- Remove labels displayed on the credential details page. ([#2621](https://github.com/kubesphere/console/pull/2621), [@123liubao](https://github.com/123liubao)) +- Fix the problem caused by non-ASCII branch names. ([#399](https://github.com/kubesphere/ks-devops/pull/399)) +- Fix the wrong handling of the choice parameter in Pipeline. ([#378](https://github.com/kubesphere/ks-devops/pull/378)) +- Fix the problem that could not proceed or break the pipeline created by others. [#408](https://github.com/kubesphere/ks-devops/pull/408) +- Fix messy sequence of pipeline run records. [#394](https://github.com/kubesphere/ks-devops/pull/394) +- Fix pipeline triggered by non-admin user but still display "Started by user admin". [#384](https://github.com/kubesphere/ks-devops/pull/384) diff --git a/content/en/docs/v3.4/release/release-v330.md b/content/en/docs/v3.4/release/release-v330.md new file mode 100644 index 000000000..e191f555c --- /dev/null +++ b/content/en/docs/v3.4/release/release-v330.md @@ -0,0 +1,90 @@ +--- +title: "Release Notes for 3.3.0" +keywords: "Kubernetes, KubeSphere, Release Notes" +description: "KubeSphere 3.3.0 Release Notes" +linkTitle: "Release Notes - 3.3.0" +weight: 18098 +--- + +## DevOps +### Features +- Add the Continuous Deployment feature, which supports GitOps and uses Argo CD as the backend, and users can view the status of continuous deployments in real time. +- Add the allowlist feature on the **Basic Information** page of a DevOps project to restrict the target repository and deployment location for continuous deployments. +- Add support for importing and managing code repositories. +- Add support for built-in CRD-based pipeline templates and parameter customization. +- Add support for viewing pipeline events. + +## Storage +### Features + +- Add support for tenant-level storage class permission management. +- Add the volume snapshot content management and volume snapshot class management features. +- Add support for automatic restart of deployments and statefulsets after a PVC has been changed. +- Add the PVC auto expansion feature, which automatically expands PVCs when remaining capacity is insufficient. + +## Multi-tenancy and Multi-cluster +### Features +- Add a notification to remind users when the kubeconfig certificate of a cluster is about to expire. +- Add the kubesphere-config configmap, which provides the name of the current cluster. +- Add support for cluster-level member and role management. + +## Observability + +### Features +- Add process and thread monitoring metrics for containers. +- Add disk monitoring metrics that provide usage of each disk. +- Add support for importing Grafana templates to create custom monitoring dashboards of a namespace scope. +- Add support for defining separate data retention periods for container logs, resource events, and audit logs. + +### Enhancements & Updates +- Upgrade Alertmanager from v0.21.0 to v0.23.0. +- Upgrade Grafana from 7.4.3 to 8.3.3. +- Upgrade kube-state-metrics from v1.9.7 to v2.3.0. +- Upgrade node-exporter from v0.18.1 to v1.3.1. +- Upgrade Prometheus from v2.26.0 to v2.34.0. +- Upgrade Prometheus Operator from v0.43.2 to v0.55.1. +- Upgrade kube-rbac-proxy from v0.8.0 to v0.11.0. +- Upgrade configmap-reload from v0.3.0 to v0.5.0. +- Upgrade Thanos from v0.18.0 to v0.25.2. +- Upgrade kube-events from v0.3.0 to v0.4.0. +- Upgrade Fluent Bit Operator from v0.11.0 to v0.13.0. +- Upgrade Fluent Bit from v1.8.3 to v1.8.11. + +## KubeEdge Integration +### Features +- Add support for logging in to common cluster nodes and edge nodes from the KubeSphere web console. +### Enhancements & Updates +- Upgrade KubeEdge from v1.7.2 to v1.9.2. +- Remove EdgeWatcher. + +## Network +### Enhancements & Updates +- Integrate OpenELB with KubeSphere for exposing LoadBalancer services. +### Bug Fixes +- Fix an issue where the gateway of a project is not deleted after the project is deleted. +## App Store +### Bug Fixes +- Fix a ks-controller-manager crash caused by Helm controller NPE errors. + +## Authentication & Authorization +### Features +- Add support for manually disabling and enabling users. +## User Experience +- Add a prompt when the audit log of Kubernetes has been enabled. +- Add the lifecycle management feature for containers. +- Add support for creating container environment variables in batches from secrets and configmaps. +- Add a time range selector on the **Traffic Monitoring** tab page. +- Add a message in the **Audit Log Search** dialog box, which prompts users to enable the audit logs feature. +- Add more Istio parameters in `ClusterConfiguration`. +- Add support for more languages, for example, Turkish. +- Set the **Token** parameter on the webhook settings page as mandatory. +- Prevent passwords without uppercase letters set through the backend CLI. +- Fix an issue where no data is displayed on the **Traffic Management** and **Tracing** tab pages in a multi-cluster project. +- Fix an app installation failure, which occurs when users click buttons too fast. +- Fix an issue where container probes are still displayed after they are deleted. +- Fix an issue where statefulset creation fails when a volume is mounted to an init container. +- Prevent ks-apiserver and ks-controller-manager from restarting when the cluster configuration is changed. +- Optimize some UI texts. +- Optimize display of the service topology on the **Service** page. + +For more information about issues and contributors of KubeSphere 3.3.0, see [GitHub](https://github.com/kubesphere/kubesphere/blob/master/CHANGELOG/CHANGELOG-3.3.md). \ No newline at end of file diff --git a/content/en/docs/v3.4/release/release-v331.md b/content/en/docs/v3.4/release/release-v331.md new file mode 100644 index 000000000..375e08f94 --- /dev/null +++ b/content/en/docs/v3.4/release/release-v331.md @@ -0,0 +1,42 @@ +--- +title: "Release Notes for 3.3.1" +keywords: "Kubernetes, KubeSphere, Release Notes" +description: "KubeSphere 3.3.1 Release Notes" +linkTitle: "Release Notes - 3.3.1" +weight: 18096 +--- + +## DevOps +### Enhancements & Updates + +- Add support for editing the kubeconfig binding mode on the pipeline UI. + +### Bug Fixes + +- Fix an issue where users fail to check the CI/CD template. +- Remove the `Deprecated` tag from the CI/CD template and replace `kubernetesDeploy` with `kubeconfig binding` at the deployment phase. + +## Network +### Bug Fixes + +- Fix an issue where users fail to create routing rules in IPv6 and IPv4 dual-stack environments. + +## Storage +### Bug Fixes + +- Set `hostpath` as a required option when users are mounting volumes. + + +## Authentication & Authorization +### Bug Fixes + +- Delete roles `users-manager` and `workspace-manager`. +- Add role `platform-self-provisioner`. +- Block some permissions of custom roles. + +## User Experience + +- Add support for changing the number of items displayed on each page of a table. +- Add support for batch stopping workloads. + +For more information about issues and contributors of KubeSphere 3.3.1, see [GitHub](https://github.com/kubesphere/kubesphere/blob/master/CHANGELOG/CHANGELOG-3.3.1.md). \ No newline at end of file diff --git a/content/en/docs/v3.4/release/release-v332.md b/content/en/docs/v3.4/release/release-v332.md new file mode 100644 index 000000000..1c80c0d92 --- /dev/null +++ b/content/en/docs/v3.4/release/release-v332.md @@ -0,0 +1,92 @@ +--- +title: "Release Notes for 3.3.2" +keywords: "Kubernetes, KubeSphere, Release Notes" +description: "KubeSphere 3.3.2 Release Notes" +linkTitle: "Release Notes - 3.3.2" +weight: 18095 +--- + +## DevOps + +### Enhancements & Upgrades + +- Add the latest GitHub Actions. +- Save the PipelineRun results to the configmap. +- Modify the Chinese description of the status of ArgoCD applications. +- Add more information to continuous deployment parameters. +- Add a link for PipelineRun in the aborted state. +- Add an ID column for PipelineRun, and the ID will be displayed when users run kubectl commands. +- Remove the queued state from PipelineRun. + +### Bug Fixes + +- Fix an issue where webhook configurations are missing after users change and save pipeline configurations. +- Fix an issue where downloading DevOps pipeline artifacts fails. +- Fix an issue where the image address does not match when a service is created by using a JAR/WAR file. +- Fix an issue where the status of PipelineRun changes from `Cancelled` to `Not-Running`. +- Fix the automatic cleaning behavior of pipelines to keep it consistent with the cleaning configurations of Jenkins. + +## App Store + +### Bug Fixes + +- Fix an issue where the application icon is not displayed on the uploaded application template. +- Fix an issue where the homepage of an application is not displayed on the application information page. +- Fix an issue where importing built-in applications fails. +- Fix a UUID generation error in an IPv6-only environment. + +## Observability + +### Bug Fixes + +- Fix a parsing error in the configuration file of logsidecar-injector. + +## Service Mesh + +### Bug Fixes + +- Fix an issue that application governance of Bookinfo projects without service mesh enabled is not disabled by default. +- Fix an issue where the delete button is missing on the blue-green deployment details page. + +## Network + +### Bug Fixes + +- Restrict network isolation of projects within the current workspace. + +## Storage + +### Enhancements & Upgrades + +- Display the cluster to which system-workspace belongs in multi-cluster environments. +- Rename route to ingress. + +## Authentication & Authorization + +### Enhancements & Upgrades + +- Add dynamic options for cache. +- Remove the "Alerting Message Management" permission. + +### Bug Fixes + +- Fix an issue where platform roles with platform management permisions cannot manage clusters. + +## Development & Testing + +### Bug Fixes + +- Fix an issue where some data is in the `Out of sync` state after the live-reload feature is introduced. +- Fix an issue where the ks-apiserver fails when it is reloaded multiple times. +- Fix an issue where caching resources fails if some required CRDs are missing. +- Fix an issue where the ks-apiserver crashes in Kubernetes 1.24+ versions. +- Fix an issue where Goroutine leaks occur when the audit event sender times out. + +## User Experience + +- Limit the length of cluster names. +- Fix an issue where pod replicas of a federated service are not automatically refreshed. +- Fix an issue where related pods are not deleted after users delete a service. +- Fix an issue where the number of nodes and roles are incorrectly displayed when there is only one node. + +For more information about issues and contributors of KubeSphere 3.3.2, see [GitHub](https://github.com/kubesphere/kubesphere/blob/master/CHANGELOG/CHANGELOG-3.3.2.md). \ No newline at end of file diff --git a/content/en/docs/v3.4/toolbox/_index.md b/content/en/docs/v3.4/toolbox/_index.md new file mode 100644 index 000000000..6b060c70e --- /dev/null +++ b/content/en/docs/v3.4/toolbox/_index.md @@ -0,0 +1,13 @@ +--- +title: "Toolbox" +description: "Help you to better understand KubeSphere toolbox" +layout: "second" + +linkTitle: "Toolbox" + +weight: 15000 + +icon: "/images/docs/v3.3/docs.svg" +--- + +KubeSphere provides several important functionalities from the toolbox. This chapter demonstrates how to use the toolbox of KubeSphere to query events, logs, and auditing logs, view resource consumption information, and run commands with web kubectl. diff --git a/content/en/docs/v3.4/toolbox/auditing/_index.md b/content/en/docs/v3.4/toolbox/auditing/_index.md new file mode 100644 index 000000000..bbdbbccd6 --- /dev/null +++ b/content/en/docs/v3.4/toolbox/auditing/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Auditing" +weight: 15300 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/toolbox/auditing/auditing-query.md b/content/en/docs/v3.4/toolbox/auditing/auditing-query.md new file mode 100644 index 000000000..d3c2c9d90 --- /dev/null +++ b/content/en/docs/v3.4/toolbox/auditing/auditing-query.md @@ -0,0 +1,87 @@ +--- +title: "Auditing Log Query" +keywords: "Kubernetes, KubeSphere, auditing, log, query" +description: "Understand how you can perform quick auditing log queries to keep track of the latest auditing information of your cluster." +linkTitle: "Auditing Log Query" +weight: 15330 +--- + +KubeSphere supports the query of auditing logs among isolated tenants. This tutorial demonstrates how to use the query function, including the interface, search parameters and detail pages. + +## Prerequisites + +You need to enable [KubeSphere Auditing Logs](../../../pluggable-components/auditing-logs/). + +## Enter the Query Interface + +1. The query function is available for all users. Log in to the console with any user, hover over the icon in the lower-right corner and select **Audit Log Search**. + + {{< notice note >}} + +Any user has the permission to query auditing logs, while the logs that each user is able to see are different. + +- If a user has the permission to view resources in a project, it can see the auditing log that happens in this project, such as workload creation in the project. +- If a user has the permission to list projects in a workspace, it can see the auditing log that happens in this workspace but not in projects, such as project creation in the workspace. +- If a user has the permission to list projects in a cluster, it can see the auditing log that happens in this cluster but not in workspaces and projects, such as workspace creation in the cluster. + +{{}} + +2. In the pop-up window, you can view log trends in the last 12 hours. + +3. The **Audit Log Search** console supports the following query parameters: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterDescription
ClusterCluster where the operation happens. It is enabled if the multi-cluster feature is turned on.
ProjectProject where the operation happens. It supports exact query and fuzzy query.
WorkspaceWorkspace where the operation happens. It supports exact query and fuzzy query.
Resource TypeType of resource associated with the request. It supports fuzzy query.
Resource NameName of the resource associated with the request. It supports fuzzy query.
VerbKubernetes verb associated with the request. For non-resource requests, this is the lower-case HTTP method. It supports exact query.
Status CodeHTTP response code. It supports exact query.
Operation AccountUser who calls this request. It supports exact and fuzzy query.
Source IPIP address from where the request originated and intermediate proxies. It supports fuzzy query.
Time RangeTime when the request reaches the apiserver.
+ + {{< notice note >}} + +- Fuzzy query supports case-insensitive fuzzy matching and retrieval of full terms by the first half of a word or phrase based on Elasticsearch segmentation rules. +- KubeSphere stores logs for the last seven days by default. You can modify the retention period in the ConfigMap `elasticsearch-logging-curator`. + +{{}} + +## Enter Query Parameters + +1. Select a filter and enter the keyword you want to search. For example, query auditing logs containing the information of `services` created. + +2. You can click the results to see the auditing log details. \ No newline at end of file diff --git a/content/en/docs/v3.4/toolbox/auditing/auditing-receive-customize.md b/content/en/docs/v3.4/toolbox/auditing/auditing-receive-customize.md new file mode 100644 index 000000000..45b8a361d --- /dev/null +++ b/content/en/docs/v3.4/toolbox/auditing/auditing-receive-customize.md @@ -0,0 +1,180 @@ +--- +title: "Receive and Customize Auditing Logs" +keywords: "Kubernetes, KubeSphere, auditing, log, customize, receive" +description: "Learn how to receive and customize auditing logs." +linkTitle: "Receive and Customize Auditing Logs" +weight: 15310 +--- + +KubeSphere Auditing Logs provide a security-relevant chronological set of records documenting the sequence of activities that have affected the system by individual users, administrators, or other components of the system. Each request to KubeSphere generates an event that is then written to a webhook and processed according to a certain rule. The event will be ignored, stored, or generate an alert based on different rules. + +## Enable KubeSphere Auditing Logs + +To enable auditing logs, see [KubeSphere Auditing Logs](../../../pluggable-components/auditing-logs/). + +## Receive Auditing Logs from KubeSphere + +The KubeSphere Auditing Log system receives auditing logs only from KubeSphere by default, while it can also receive auditing logs from Kubernetes. + +Users can stop receiving auditing logs from KubeSphere by changing the value of `auditing.enable` in ConfigMap `kubesphere-config` in the namespace `kubesphere-system` using the following command: + +```bash +kubectl edit cm -n kubesphere-system kubesphere-config +``` + +Change the value of `auditing.enabled` as `false` to stop receiving auditing logs from KubeSphere. + +```yaml + spec: + auditing: + enabled: false +``` + +You need to restart the KubeSphere apiserver to make the changes effective. + +## Receive Auditing Logs from Kubernetes + +To make the KubeSphere Auditing Log system receive auditing logs from Kubernetes, you need to add a Kubernetes audit policy file and Kubernetes audit webhook config file to `/etc/kubernetes/manifests/kube-apiserver.yaml` as follows. + +### Audit policy + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: kube-apiserver + namespace: kube-system +spec: + containers: + - command: + - kube-apiserver + - --audit-policy-file=/etc/kubernetes/audit/audit-policy.yaml + - --audit-webhook-config-file=/etc/kubernetes/audit/audit-webhook.yaml + volumeMounts: + - mountPath: /etc/kubernetes/audit + name: k8s-audit + readOnly: true + volumes: + - hostPath: + path: /etc/kubernetes/audit + type: DirectoryOrCreate + name: k8s-audit +``` + +{{< notice note >}} + +This operation will restart the Kubernetes apiserver. + +{{}} + +The file `audit-policy.yaml` defines rules about what events should be recorded and what data they should include. You can use a minimal audit policy file to log all requests at the Metadata level: + +```yaml +# Log all requests at the Metadata level. +apiVersion: audit.k8s.io/v1 +kind: Policy +rules: +- level: Metadata +``` + +For more information about the audit policy, see [Audit Policy](https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/). + +### Audit webhook + +The file `audit-webhook.yaml` defines the webhook which the Kubernetes auditing logs will be sent to. Here is an example configuration of the Kube-Auditing webhook. + +```yaml +apiVersion: v1 +kind: Config +clusters: +- name: kube-auditing + cluster: + server: https://{ip}:6443/audit/webhook/event + insecure-skip-tls-verify: true +contexts: +- context: + cluster: kube-auditing + user: "" + name: default-context +current-context: default-context +preferences: {} +users: [] +``` + +The `ip` is the `CLUSTER-IP` of Service `kube-auditing-webhook-svc` in the namespace `kubesphere-logging-system`. You can get it using this command. + +```bash +kubectl get svc -n kubesphere-logging-system +``` + +{{< notice note >}} + +You need to restart the Kubernetes apiserver to make the changes effective after you modified these two files. + +{{}} + +Edit the CRD Webhook `kube-auditing-webhook`, and change the value of `k8sAuditingEnabled` to `true` through the following commands. + +```bash +kubectl edit webhooks.auditing.kubesphere.io kube-auditing-webhook +``` + +```yaml +spec: + auditing: + k8sAuditingEnabled: true +``` +{{< notice tip >}} + +You can also use a user of `platform-admin` role to log in to the console, search `Webhook` in **CRDs** on the **Cluster Management** page, and edit `kube-auditing-webhook` directly. + +{{}} + +To stop receiving auditing logs from Kubernetes, remove the configuration of auditing webhook backend, then change the value of `k8sAuditingEnabled` to `false`. + +## Customize Auditing Logs + +KubeSphere Auditing Log system provides a CRD Webhook `kube-auditing-webhook` to customize auditing logs. Here is an example yaml file: + +```yaml +apiVersion: auditing.kubesphere.io/v1alpha1 +kind: Webhook +metadata: + name: kube-auditing-webhook +spec: + auditLevel: RequestResponse + auditSinkPolicy: + alertingRuleSelector: + matchLabels: + type: alerting + archivingRuleSelector: + matchLabels: + type: persistence + image: kubesphere/kube-auditing-webhook:v0.1.0 + archivingPriority: DEBUG + alertingPriority: WARNING + replicas: 2 + receivers: + - name: alert + type: alertmanager + config: + service: + namespace: kubesphere-monitoring-system + name: alertmanager-main + port: 9093 +``` + + Parameter | Description | Default + --- | --- | --- + `replicas` | The replica number of the Kube-Auditing webhook. | 2 + `archivingPriority` | The priority of the archiving rule. The known audit types are `DEBUG`, `INFO`, and `WARNING`. | `DEBUG` + `alertingPriority` | The priority of the alerting rule. The known audit types are `DEBUG`, `INFO`, and `WARNING`. | `WARNING` + `auditLevel` | The level of auditing logs. The known levels are:
- `None`: don't log events.
- `Metadata`: log request metadata (requesting user, timestamp, resource, verb, etc.) but not requests or response bodies.
- `Request`: log event metadata and request bodies but no response body. This does not apply to non-resource requests.
- `RequestResponse`: log event metadata, requests, and response bodies. This does not apply to non-resource requests. | `Metadata` + `k8sAuditingEnabled` | Whether to receive Kubernetes auditing logs. | `false` + `receivers` | The receivers to receive alerts. | + +{{< notice note >}} + +You can change the level of Kubernetes auditing logs by modifying the file `audit-policy.yaml`, then restart the Kubernetes apiserver. + +{{}} \ No newline at end of file diff --git a/content/en/docs/v3.4/toolbox/auditing/auditing-rule.md b/content/en/docs/v3.4/toolbox/auditing/auditing-rule.md new file mode 100644 index 000000000..17ba026e9 --- /dev/null +++ b/content/en/docs/v3.4/toolbox/auditing/auditing-rule.md @@ -0,0 +1,207 @@ +--- +title: "Auditing Rules" +keywords: "Kubernetes, docker, kubesphere, auditing" +description: "Understand the auditing rule and how to customize a rule for processing auditing logs." +linkTitle: "Auditing Rules" +weight: 15320 +--- + +An auditing rule defines the policy for processing auditing logs. KubeSphere Auditing Logs provide users with two CRD rules (`archiving-rule` and `alerting-rule`) for customization. + +After you enable [KubeSphere Auditing Logs](../../../pluggable-components/auditing-logs/), log in to the console with a user of `platform-admin` role. In **CRDs** on the **Cluster Management** page, enter `rules.auditing.kubesphere.io` in the search bar. Click the result **Rule** and you can see the two CRD rules. + +Below are examples of part of the rules. + +## archiving-rule + +```yaml +apiVersion: auditing.kubesphere.io/v1alpha1 +kind: Rule +metadata: + labels: + type: archiving + workspace: system-workspace + name: archiving-rule +spec: + rules: + - desc: all action not need to be audit + list: + - get + - list + - watch + name: ignore-action + type: list + - condition: Verb not in ${ignore-action} + desc: All audit event except get, list, watch event + enable: true + name: archiving + priority: DEBUG + type: rule +``` + +## alerting-rule + +```yaml +apiVersion: auditing.kubesphere.io/v1alpha1 +kind: Rule +metadata: + labels: + type: alerting + workspace: system-workspace + name: alerting-rule +spec: + rules: + - desc: all operator need to be audit + list: + - create + - delete + - update + - patch + name: action + type: list + - condition: Verb in ${action} + desc: audit the change of resource + enable: true + name: ResourceChange + priority: INFO + type: rule +``` + + Attributes | Description + --- | --- + `name` | The name of the rule. + `type` | The type of the rule; known values are `rule`, `macro`, `list`, and `alias`. + `desc` | The description of the rule. + `condition` | A filtering expression that is applied against auditing logs to check whether they match the rule. + `macro` | The conditions of the macro. + `list` | The value of list. + `alias` | The value of alias. + `enable` | If it is set to `false`, the rule will not be effective. + `output` | Specifies the message of alert. + `priority` | The priority of the rule. + +When an auditing log matches a rule in `archiving-rule` and the rule priority is no less than `archivingPriority`, it will be stored for further use. When an auditing log matches a rule in `alerting-rule`, if the priority of the rule is less than `alertingPriority`, it will be stored for further use; otherwise it will generate an alert which will be sent to the user. + + +## Rule Conditions + +A `Condition` is a filtering expression that can use comparison operators (=, !=, <, <=, >, >=, contains, in, like, and regex) and can be combined using Boolean operators (and, or and not) and parentheses. Here are the supported filters. + + Filter | Description + --- | --- + `Workspace` | The workspace where the audit event happens. + `Devops` | The DevOps project where the audit event happens. + `Level` | The level of auditing logs. + `RequestURI` | RequestURI is the request URI as sent by the client to a server. + `Verb` | The verb associated with the request. + `User.Username` | The name that uniquely identifies this user among all active users. + `User.Groups` | The names of groups this user is a part of. + `SourceIPs` | The source IP from where the request originated and intermediate proxies. + `ObjectRef.Resource` | The resource of the object associated with the request. + `ObjectRef.Namespace` | The namespace of the object associated with the request. + `ObjectRef.Name` | The name of the object associated with the request. + `ObjectRef.Subresource` | The subresource of the object associated with the request. + `ResponseStatus.code` | The suggested HTTP return code for the request. + `ResponseStatus.Status` | The status of the operation. + `RequestReceivedTimestamp` | The time the request reaches the apiserver. + `StageTimestamp` | The time the request reaches the current audit stage. + +For example, to match all logs in the namespace `test`: + +``` +ObjectRef.Namespace = "test" +``` + +To match all logs in the namespaces that start with `test`: + +``` +ObjectRef.Namespace like "test*" +``` + +To match all logs happening in the latest one hour: + +``` +RequestReceivedTimestamp >= "2020-06-12T09:23:28.359896Z" and RequestReceivedTimestamp <= "2020-06-12T10:23:28.359896Z" +``` + +## Macro + +A `macro` is a rule condition snippet that can be re-used inside rules and even other macros. Macros provide a way to name common patterns and factor out redundancies in rules. Here is an example of a macro. + +```yaml +apiVersion: auditing.kubesphere.io/v1alpha1 +kind: Rule +metadata: + name: alerting-rule + labels: + workspace: system-workspace + type: alerting +spec: + rules: + - name: pod + type: macro + desc: pod + macro: ObjectRef.Resource="pods" +``` + +{{< notice note >}} + +A `macro` can be used in rules or other macros like ${pod} or ${alerting-rule.pod}. The difference between these two methods is that ${pod} can only be used in the CRD Rule `alerting-rule`, while ${alerting-rule.pod} can be used in all CRD Rules. This principle also applies to lists and alias. + +{{}} + +## List + +A `list` is a collection of items that can be included in rules, macros, or other lists. Unlike rules and macros, lists cannot be parsed as filtering expressions. Here is an example of a list. + +```yaml +apiVersion: auditing.kubesphere.io/v1alpha1 +kind: Rule +metadata: + name: alerting-rule + labels: + workspace: system-workspace + type: alerting +spec: + rules: + - name: action + type: list + desc: all operator needs to be audit + list: + - create + - delete + - update + - patch +``` + +## Alias + +An `alias` is a short name of a filter field. It can be included in rules, macros, lists, and output strings. Here is an example of an alias. + +```yaml +apiVersion: auditing.kubesphere.io/v1alpha1 +kind: Rule +metadata: + name: alerting-rule + labels: + workspace: system-workspace + type: alerting +spec: + rules: + - name: namespace + type: alias + desc: the alias of the resource namespace + alias: ObjectRef.Namespace +``` + +## Output +The `Output` string is used to format the alerting message when an auditing log triggers an alert. The `Output` string can include lists and alias. Here is an example. + +```yaml +Output: ${user} ${verb} a HostNetwork Pod ${name} in ${namespace}. +``` +{{< notice note >}} + +The fields of `user`, `verb`, `namespace`, and `name` are all aliases. + +{{}} \ No newline at end of file diff --git a/content/en/docs/v3.4/toolbox/events-query.md b/content/en/docs/v3.4/toolbox/events-query.md new file mode 100644 index 000000000..de6a88c08 --- /dev/null +++ b/content/en/docs/v3.4/toolbox/events-query.md @@ -0,0 +1,39 @@ +--- +title: "Event Query" +keywords: 'KubeSphere, Kubernetes, Event, Query' +description: 'Understand how you can perform quick event queries to keep track of the latest events of your cluster.' +linkTitle: "Event Query" +weight: 15200 +--- + +Kubernetes events provide insight into what is happening inside a cluster, based on which KubeSphere adds longer historical query and aggregation capabilities, and also supports event query for tenant isolation. + +This guide demonstrates how you can do multi-level, fine-grained event queries to track the status of your components. + +## Prerequisites + +[KubeSphere Events](../../pluggable-components/events/) needs to be enabled. + +## Query Events + +1. The event query function is available for all users. Log in to the console with any account, hover over icon in the lower-right corner and select **Resource Event Search**. + +2. In the displayed dialog box, you can view the number of events that the user has permission to view. + + {{< notice note >}} + +- KubeSphere supports event queries on each cluster separately if you have enabled the [multi-cluster feature](../../multicluster-management/). You can click icon on the left of the search box and select a target cluster. + +- KubeSphere stores events for the last seven days by default. + + {{}} + +3. You can click the search box and enter a condition to search for events by message, workspace, project, resource type, resource name, reason, category, or time range (for example, use `Time Range:Last 10 minutes` to search for events within the last 10 minutes). + +4. Click any one of the results from the list, and you can see raw information of it. It is convenient for developers in terms of debugging and analysis. + +{{< notice note >}} + +The event query interface supports dynamic refreshing every 5s, 10s or 15s. + + {{}} diff --git a/content/en/docs/v3.4/toolbox/log-query.md b/content/en/docs/v3.4/toolbox/log-query.md new file mode 100644 index 000000000..f95bdfbd0 --- /dev/null +++ b/content/en/docs/v3.4/toolbox/log-query.md @@ -0,0 +1,59 @@ +--- +title: "Log Query" +keywords: 'KubeSphere, Kubernetes, log, query' +description: 'Query Kubernetes logs from toolbox' +linkTitle: "Log Query" +weight: 15100 +--- + +The logs of applications and systems can help you better understand what is happening inside your cluster and workloads. The logs are particularly useful for debugging problems and monitoring cluster activities. KubeSphere provides a powerful and easy-to-use logging system which offers users the capabilities of log collection, query and management from the perspective of tenants. The tenant-based logging system is much more useful than Kibana since different tenants can only view their own logs, leading to better security. Moreover, the KubeSphere logging system filters out some redundant information so that tenants can only focus on logs that are useful to them. + +This tutorial demonstrates how to use the log query function, including the interface, search parameters and details pages. + +## Prerequisites + +You need to enable the [KubeSphere Logging System](../../pluggable-components/logging/). + +## Enter the Log Query Interface + +1. The log query function is available for all users. Log in to the console with any account, hover over icon in the lower-right corner and select **Log Search**. + +2. In the pop-up window, you can see a time histogram of log numbers, a cluster selection drop-down list, and a log search bar. + + {{< notice note >}} + +- KubeSphere supports log queries on each cluster separately if you have enabled the [multi-cluster feature](../../multicluster-management/). You can click icon on the left of the search box and select a target cluster. + +- KubeSphere stores logs for last seven days by default. + + {{}} + +3. You can customize the query time range by selecting **Time Range** in the log search bar. Alternatively, click on the bars in the time histogram, and KubeSphere will use the time range of that bar for log queries. + +{{< notice note >}} + +- The keyword field supports the query of keyword combinations. For example, you can use `Error`, `Fail`, `Fatal`, `Exception`, and `Warning` together to query all the exception logs. +- The keyword field supports exact query and fuzzy query. The fuzzy query provides case-insensitive fuzzy matching and retrieval of full terms by the first half of a word or phrase based on the ElasticSearch segmentation rules. For example, you can retrieve the logs containing `node_cpu_total` by searching the keyword `node_cpu` instead of the keyword `cpu`. +- Each cluster has its own log retention period which can be set separately. You can modify it in `ClusterConfiguration`. For more information, see [KubeSphere Logging System](../../pluggable-components/logging/). + +{{}} + +## Use Search Parameters + +1. You can provide as many fields as possible to narrow down your search results. + +2. Click any one of the results from the list. Drill into its detail page and inspect the log from this Pod, including the complete context on the right. It is convenient for developers in terms of debugging and analyzing. + + {{< notice note >}} + +The log query interface supports dynamic refreshing with 5s, 10s or 15s, and allows users to export logs to a local file for further analysis (in the upper-right corner). + + {{}} + +4. In the left panel, you can click icon to view the Pod details page or container details page. + +## Drill into the Details Page + +1. If the log looks abnormal, you can drill into the Pod detail page or container detail page to further inspect container logs, resource monitoring graphs, and events. + +2. Inspect the container detail page. At the same time, it allows you to open the terminal to debug the container directly. diff --git a/content/en/docs/v3.4/toolbox/metering-and-billing/_index.md b/content/en/docs/v3.4/toolbox/metering-and-billing/_index.md new file mode 100644 index 000000000..6fd17490c --- /dev/null +++ b/content/en/docs/v3.4/toolbox/metering-and-billing/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "Metering and Billing" +weight: 15400 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/toolbox/metering-and-billing/enable-billing.md b/content/en/docs/v3.4/toolbox/metering-and-billing/enable-billing.md new file mode 100644 index 000000000..468987a00 --- /dev/null +++ b/content/en/docs/v3.4/toolbox/metering-and-billing/enable-billing.md @@ -0,0 +1,84 @@ +--- +title: "Enable Billing" +keywords: "Kubernetes, KubeSphere, ConfigMap, Billing" +description: "Enable the billing function in KubeSphere to view the billing data of your resources during a period." +linkTitle: "Enable Billing" +weight: 15420 +--- + +This tutorial demonstrates how to enable KubeSphere Billing to view the cost of different resources in your cluster. By default, the Billing function is disabled so you need to manually add the price information in a ConfigMap. + +Perform the following steps to enable KubeSphere Billing. + +1. Run the following command to edit the ConfigMap `kubesphere-config`: + + ```bash + kubectl edit cm kubesphere-config -n kubesphere-system + ``` + +2. Add the retention day and price information under `metering` in the ConfigMap. The following is an example for your reference: + + ```yaml + $ kubectl get cm kubesphere-config -n kubesphere-system -oyaml + ... + alerting: + prometheusEndpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090 + thanosRulerEndpoint: http://thanos-ruler-operated.kubesphere-monitoring-system.svc:10902 + thanosRuleResourceLabels: thanosruler=thanos-ruler,role=thanos-alerting-rules + ... + metering: + retentionDay: 7d + billing: + priceInfo: + currencyUnit: "USD" + cpuPerCorePerHour: 1.5 + memPerGigabytesPerHour: 5 + ingressNetworkTrafficPerMegabytesPerHour: 1 + egressNetworkTrafficPerMegabytesPerHour: 1 + pvcPerGigabytesPerHour: 2.1 + kind: ConfigMap + ... + ``` + + The following table describes the parameters. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterDescription
retentionDayretentionDay determines the date range displayed on the Metering and Billing page for users. The value of this parameter must be the same as the value of retention in Prometheus.
currencyUnitThe currency that is displayed on the Metering and Billing page. Currently allowed values are CNY (Renminbi) and USD (US dollars). If you specify other currencies, the console will display cost in USD by default.
cpuCorePerHourThe unit price of CPU per core/hour.
memPerGigabytesPerHourThe unit price of memory per GB/hour.
ingressNetworkTrafficPerMegabytesPerHourThe unit price of ingress traffic per MB/hour.
egressNetworkTrafficPerMegabytesPerHourThe unit price of egress traffic per MB/hour.
pvcPerGigabytesPerHourThe unit price of PVC per GB/hour. Note that KubeSphere calculates the total cost of volumes based on the storage capacity PVCs request regardless of the actual storage in use.
+ +3. Run the following command to restart `ks-apiserver`: + + ```bash + kubectl rollout restart deploy ks-apiserver -n kubesphere-system + ``` + +4. On the **Metering and Billing** page, you can see the cost information of resources. \ No newline at end of file diff --git a/content/en/docs/v3.4/toolbox/metering-and-billing/view-resource-consumption.md b/content/en/docs/v3.4/toolbox/metering-and-billing/view-resource-consumption.md new file mode 100644 index 000000000..f9ea53f81 --- /dev/null +++ b/content/en/docs/v3.4/toolbox/metering-and-billing/view-resource-consumption.md @@ -0,0 +1,71 @@ +--- +title: "View Resource Consumption" +keywords: "Kubernetes, KubeSphere, metering, billing, consumption" +description: "Track information about resource usage of your cluster's workloads at different levels." +linkTitle: "View Resource Consumption" +weight: 15410 +--- + +KubeSphere metering helps you track resource consumption within a given cluster or workspace at a granular level. Different tenants with different roles can only see the data to which they have access. Besides, you can also set prices for varied resources to see billing information. + +## Prerequisites + +- The **Metering and Billing** section is accessible to all tenants while the information visible to each of them may be different depending on what roles they have at what level. Note that metering is not a pluggable component of KubeSphere, which means you can use it as long as you have a KubeSphere cluster. For a newly created cluster, you need to wait for about 1 hour to see metering information. +- To see billing information, you need to [enable it first](../enable-billing/). + +## View Cluster Resource Consumption + +**Cluster Resource Consumption** contains resource usage information of clusters (and nodes included), such as CPU, memory and storage. + +1. Log in to the KubeSphere console as `admin`, click icon in the lower-right corner and select **Metering and Billing**. + +2. Click **View Consumption** in the **Cluster Resource Consumption** section. + +3. On the left side of the dashboard, you can see a cluster list containing your host cluster and all member clusters if you have enabled [multi-cluster management](../../../multicluster-management/). There is only one cluster called `default` in the list if it is not enabled. + + On the right side, there are three parts showing resource consumption in different ways. + + + + + + + + + + + + + + + + + + +
ModuleDescription
OverviewDisplays a consumption overview of different resources in a cluster since its creation. You can also see the billing information if you have set prices for these resources in the ConfigMap kubesphere-config.
Consumption by YesterdayDisplays the total resource consumption by yesterday. You can also customize the time range and internal to see data within a specific period.
Current Resources IncludedDisplays the consumption of resources included in the selected target object (in this case, all nodes in the selected cluster) over the last hour.
+ +4. You can click a cluster on the left and dive deeper into a node or Pod to see detailed consumption information. + + {{< notice note >}} + + To export the metering and billing data of an object as a CSV file, select the checkbox on the left and click **✓**. + + {{}} + +## View Workspace (Project) Resource Consumption + +**Workspace (Project) Resource Consumption** contains resource usage information of workspaces (and projects included), such as CPU, memory and storage. + +1. Log in to the KubeSphere console as `admin`, click icon in the lower-right corner and select **Metering and Billing**. + +2. Click **View Consumption** in the **Workspace (Project) Resource Consumption** section. + +3. On the left side of the dashboard, you can see a list containing all the workspaces in the current cluster. The right part displays detailed consumption information in the selected workspace, the layout of which is basically the same as that of a cluster. + + {{< notice note >}} + + In a multi-cluster architecture, you cannot see the metering and billing information of a workspace if it does not have any available cluster assigned to it. For more information, see [Cluster Visibility and Authorization](../../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/). + + {{}} + +4. Click a workspace on the left and dive deeper into a project or workload (for example, Deployment and StatefulSet) to see detailed consumption information. \ No newline at end of file diff --git a/content/en/docs/v3.4/toolbox/web-kubectl.md b/content/en/docs/v3.4/toolbox/web-kubectl.md new file mode 100644 index 000000000..54a51b1f6 --- /dev/null +++ b/content/en/docs/v3.4/toolbox/web-kubectl.md @@ -0,0 +1,44 @@ +--- +title: "Web Kubectl" +keywords: 'KubeSphere, Kubernetes, kubectl, cli' +description: 'The web kubectl tool is integrated into KubeSphere to provide consistent user experiences for Kubernetes users.' +linkTitle: "Web Kubectl" +weight: 15500 +--- + +The Kubernetes command-line tool, kubectl, allows you to run commands on Kubernetes clusters. You can use kubectl to deploy applications, inspect and manage cluster resources, view logs, and more. + +KubeSphere provides web kubectl on the console for user convenience. By default, in the current version, only the account granted the `platform-admin` role (such as the default account `admin`) has the permission to use web kubectl for cluster resource operation and management. + +This tutorial demonstrates how to use web kubectl to operate on and manage cluster resources. + +## Use Web Kubectl + +1. Log in to KubeSphere with a user granted the `platform-admin` role, hover over the **Toolbox** in the lower-right corner and select **Kubectl**. + +2. You can see the kubectl interface in the pop-up window. If you have enabled the multi-cluster feature, you need to select the target cluster first from the drop-down list in the upper-right corner. This drop-down list is not visible if the multi-cluster feature is not enabled. + +3. Enter kubectl commands in the command-line tool to query and manage Kubernetes cluster resources. For example, execute the following command to query the status of all PVCs in the cluster. + + ```bash + kubectl get pvc --all-namespaces + ``` + + ![web-kubectl-example](/images/docs/v3.3/web-kubectl/web-kubectl-example.png) + +4. Use the following syntax to run kubectl commands from your terminal window: + + ```bash + kubectl [command] [TYPE] [NAME] [flags] + ``` + + {{< notice note >}} + +- Where `command`, `TYPE`, `NAME`, and `flags` are: + - `command`: Specifies the operation that you want to perform on one or more resources, such as `create`, `get`, `describe` and `delete`. + - `TYPE`: Specifies the [resource type](https://kubernetes.io/docs/reference/kubectl/overview/#resource-types). Resource types are case-insensitive and you can specify the singular, plural, or abbreviated forms. + - `NAME`: Specifies the name of the resource. Names are case-sensitive. If the name is omitted, details for all resources are displayed, such as `kubectl get pods`. + - `flags`: Specifies optional flags. For example, you can use the `-s` or `--server` flags to specify the address and port of the Kubernetes API server. +- If you need help, run `kubectl help` from the terminal window or refer to the [Kubernetes kubectl CLI documentation](https://kubernetes.io/docs/reference/kubectl/overview/). + + {{}} diff --git a/content/en/docs/v3.4/upgrade/_index.md b/content/en/docs/v3.4/upgrade/_index.md new file mode 100644 index 000000000..a88033ba0 --- /dev/null +++ b/content/en/docs/v3.4/upgrade/_index.md @@ -0,0 +1,14 @@ +--- +title: "Upgrade" +description: "Upgrade KubeSphere and Kubernetes" +layout: "second" + +linkTitle: "Upgrade" + +weight: 7000 + +icon: "/images/docs/v3.3/docs.svg" + +--- + +This chapter demonstrates how cluster operators can upgrade KubeSphere to 3.3.2. \ No newline at end of file diff --git a/content/en/docs/v3.4/upgrade/air-gapped-upgrade-with-ks-installer.md b/content/en/docs/v3.4/upgrade/air-gapped-upgrade-with-ks-installer.md new file mode 100644 index 000000000..5a412910d --- /dev/null +++ b/content/en/docs/v3.4/upgrade/air-gapped-upgrade-with-ks-installer.md @@ -0,0 +1,182 @@ +--- +title: "Air-Gapped Upgrade with ks-installer" +keywords: "Air-Gapped, upgrade, kubesphere, 3.3" +description: "Use ks-installer and offline package to upgrade KubeSphere." +linkTitle: "Air-Gapped Upgrade with ks-installer" +weight: 7500 +--- + +ks-installer is recommended for users whose Kubernetes clusters were not set up by [KubeKey](../../installing-on-linux/introduction/kubekey/), but hosted by cloud vendors or created by themselves. This tutorial is for **upgrading KubeSphere only**. Cluster operators are responsible for upgrading Kubernetes beforehand. + + +## Prerequisites + +- You need to have a KubeSphere cluster running v3.2.x. If your KubeSphere version is v3.1.x or earlier, upgrade to v3.2.x first. +- Read [Release Notes for 3.3.2](../../../v3.3/release/release-v332/) carefully. +- Back up any important component beforehand. +- A Docker registry. You need to have a Harbor or other Docker registries. For more information, see [Prepare a Private Image Registry](../../installing-on-linux/introduction/air-gapped-installation/#step-2-prepare-a-private-image-registry). +- Supported Kubernetes versions of KubeSphere 3.3: v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, and * v1.24.x. For Kubernetes versions with an asterisk, some features of edge nodes may be unavailable due to incompatability. Therefore, if you want to use edge nodes, you are advised to install Kubernetes v1.21.x. + +## Major Updates + +In KubeSphere 3.3.1, some changes have made on built-in roles and permissions of custom roles. Therefore, before you upgrade KubeSphere to 3.3.1, please note the following: + + - Change of built-in roles: Platform-level built-in roles `users-manager` and `workspace-manager` are removed. If an existing user has been bound to `users-manager` or `workspace-manager`, its role will be changed to `platform-regular` after the upgrade is completed. Role `platform-self-provisioner` is added. For more information about built-in roles, refer to [Create a user](../../quick-start/create-workspace-and-project). + + - Some permission of custom roles are removed: + - Removed permissions of platform-level custom roles: user management, role management, and workspace management. + - Removed permissions of workspace-level custom roles: user management, role management, and user group management. + - Removed permissions of namespace-level custom roles: user management and role management. + - After you upgrade KubeSphere to 3.3.1, custom roles will be retained, but removed permissions of the custom roles will be revoked. +## Step 1: Prepare Installation Images + +As you install KubeSphere in an air-gapped environment, you need to prepare an image package containing all the necessary images in advance. + +1. Download the image list file `images-list.txt` from a machine that has access to Internet through the following command: + + ```bash + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/images-list.txt + ``` + + {{< notice note >}} + + This file lists images under `##+modulename` based on different modules. You can add your own images to this file following the same rule. To view the complete file, see [Appendix](../../installing-on-linux/introduction/air-gapped-installation/#image-list-of-kubesphere-v310). + + {{}} + +2. Download `offline-installation-tool.sh`. + + ```bash + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/offline-installation-tool.sh + ``` + +3. Make the `.sh` file executable. + + ```bash + chmod +x offline-installation-tool.sh + ``` + +4. You can execute the command `./offline-installation-tool.sh -h` to see how to use the script: + + ```bash + root@master:/home/ubuntu# ./offline-installation-tool.sh -h + Usage: + + ./offline-installation-tool.sh [-l IMAGES-LIST] [-d IMAGES-DIR] [-r PRIVATE-REGISTRY] [-v KUBERNETES-VERSION ] + + Description: + -b : save kubernetes' binaries. + -d IMAGES-DIR : the dir of files (tar.gz) which generated by `docker save`. default: ./kubesphere-images + -l IMAGES-LIST : text file with list of images. + -r PRIVATE-REGISTRY : target private registry:port. + -s : save model will be applied. Pull the images in the IMAGES-LIST and save images as a tar.gz file. + -v KUBERNETES-VERSION : download kubernetes' binaries. default: v1.17.9 + -h : usage message + ``` + +5. Pull images in `offline-installation-tool.sh`. + + ```bash + ./offline-installation-tool.sh -s -l images-list.txt -d ./kubesphere-images + ``` + + {{< notice note >}} + + You can choose to pull images as needed. For example, you can delete `##k8s-images` and related images under it in `images-list.text` if you already have a Kubernetes cluster. + + {{}} + +## Step 2: Push Images to Your Private Registry + +Transfer your packaged image file to your local machine and execute the following command to push it to the registry. + +```bash +./offline-installation-tool.sh -l images-list.txt -d ./kubesphere-images -r dockerhub.kubekey.local +``` + +{{< notice note >}} + +The domain name is `dockerhub.kubekey.local` in the command. Make sure you use your **own registry address**. + +{{}} + +## Step 3: Download ks-installer + +Similar to installing KubeSphere on an existing Kubernetes cluster in an online environment, you also need to download `kubesphere-installer.yaml`. + +1. Execute the following command to download ks-installer and transfer it to your machine that serves as the taskbox for installation. + + ```bash + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + ``` + +2. Verify that you have specified your private image registry in `spec.local_registry` in `cluster-configuration.yaml`. Note that if your existing cluster was installed in an air-gapped environment, you may already have this field specified. Otherwise, run the following command to edit `cluster-configuration.yaml` of your existing KubeSphere v3.1.x cluster and add the private image registry: + + ``` + kubectl edit cc -n kubesphere-system + ``` + + For example, `dockerhub.kubekey.local` is the registry address in this tutorial, then use it as the value of `.spec.local_registry` as below: + + ```yaml + spec: + persistence: + storageClass: "" + authentication: + jwtSecret: "" + local_registry: dockerhub.kubekey.local # Add this line manually; make sure you use your own registry address. + ``` + +3. Save `cluster-configuration.yaml` after you finish editing it. Replace `ks-installer` with your **own registry address** with the following command: + + ```bash + sed -i "s#^\s*image: kubesphere.*/ks-installer:.*# image: dockerhub.kubekey.local/kubesphere/ks-installer:v3.1.0#" kubesphere-installer.yaml + ``` + + {{< notice warning >}} + + `dockerhub.kubekey.local` is the registry address in the command. Make sure you use your own registry address. + + {{}} + +## Step 4: Upgrade KubeSphere + +Execute the following command after you make sure that all steps above are completed. + +```bash +kubectl apply -f kubesphere-installer.yaml +``` + +## Step 5: Verify Installation + +When the installation finishes, you can see the content as follows: + +```bash +##################################################### +### Welcome to KubeSphere! ### +##################################################### + +Console: http://192.168.0.2:30880 +Account: admin +Password: P@88w0rd + +NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. + +##################################################### +https://kubesphere.io 20xx-xx-xx xx:xx:xx +##################################################### +``` + +Now, you will be able to access the web console of KubeSphere through `http://{IP}:30880` with the default account and password `admin/P@88w0rd`. + +{{< notice note >}} + +To access the console, make sure port 30880 is opened in your security group. + +{{}} diff --git a/content/en/docs/v3.4/upgrade/air-gapped-upgrade-with-kubekey.md b/content/en/docs/v3.4/upgrade/air-gapped-upgrade-with-kubekey.md new file mode 100644 index 000000000..8b431df14 --- /dev/null +++ b/content/en/docs/v3.4/upgrade/air-gapped-upgrade-with-kubekey.md @@ -0,0 +1,349 @@ +--- +title: "Air-Gapped Upgrade with KubeKey" +keywords: "Air-Gapped, kubernetes, upgrade, kubesphere, 3.3.1" +description: "Use the offline package to upgrade Kubernetes and KubeSphere." +linkTitle: "Air-Gapped Upgrade with KubeKey" +weight: 7400 +--- +Air-gapped upgrade with KubeKey is recommended for users whose KubeSphere and Kubernetes were both deployed by [KubeKey](../../installing-on-linux/introduction/kubekey/). If your Kubernetes cluster was provisioned by yourself or cloud providers, refer to [Air-gapped Upgrade with ks-installer](../air-gapped-upgrade-with-ks-installer/). + +## Prerequisites + +- You need to have a KubeSphere cluster running v3.2.x. If your KubeSphere version is v3.1.x or earlier, upgrade to v3.2.x first. +- Your Kubernetes version must be v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, and * v1.24.x. For Kubernetes versions with an asterisk, some features of edge nodes may be unavailable due to incompatability. Therefore, if you want to use edge nodes, you are advised to install Kubernetes v1.21.x. +- Read [Release Notes for 3.3.2](../../../v3.3/release/release-v332/) carefully. +- Back up any important component beforehand. +- A Docker registry. You need to have a Harbor or other Docker registries. +- Make sure every node can push and pull images from the Docker Registry. + +## Major Updates + +In KubeSphere 3.3.1, some changes have made on built-in roles and permissions of custom roles. Therefore, before you upgrade KubeSphere to 3.3.1, please note the following: + + - Change of built-in roles: Platform-level built-in roles `users-manager` and `workspace-manager` are removed. If an existing user has been bound to `users-manager` or `workspace-manager`, its role will be changed to `platform-regular` after the upgrade is completed. Role `platform-self-provisioner` is added. For more information about built-in roles, refer to [Create a user](../../quick-start/create-workspace-and-project). + + - Some permission of custom roles are removed: + - Removed permissions of platform-level custom roles: user management, role management, and workspace management. + - Removed permissions of workspace-level custom roles: user management, role management, and user group management. + - Removed permissions of namespace-level custom roles: user management and role management. + - After you upgrade KubeSphere to 3.3.1, custom roles will be retained, but removed permissions of the custom roles will be revoked. + +## Upgrade KubeSphere and Kubernetes + +Upgrading steps are different for single-node clusters (all-in-one) and multi-node clusters. + +{{< notice info >}} + +KubeKey upgrades Kubernetes from one MINOR version to the next MINOR version until the target version. For example, you may see the upgrading process going from 1.16 to 1.17 and to 1.18, instead of directly jumping to 1.18 from 1.16. + +{{}} + + +### System Requirements + +| Systems | Minimum Requirements (Each node) | +| --------------------------------------------------------------- | ------------------------------------------- | +| **Ubuntu** *16.04, 18.04, 20.04* | CPU: 2 Cores, Memory: 4 G, Disk Space: 40 G | +| **Debian** *Buster, Stretch* | CPU: 2 Cores, Memory: 4 G, Disk Space: 40 G | +| **CentOS** *7.x* | CPU: 2 Cores, Memory: 4 G, Disk Space: 40 G | +| **Red Hat Enterprise Linux** *7* | CPU: 2 Cores, Memory: 4 G, Disk Space: 40 G | +| **SUSE Linux Enterprise Server** *15* **/openSUSE Leap** *15.2* | CPU: 2 Cores, Memory: 4 G, Disk Space: 40 G | + +{{< notice note >}} + +[KubeKey](https://github.com/kubesphere/kubekey) uses `/var/lib/docker` as the default directory where all Docker related files, including images, are stored. It is recommended you add additional storage volumes with at least **100G** mounted to `/var/lib/docker` and `/mnt/registry` respectively. See [fdisk](https://www.computerhope.com/unix/fdisk.htm) command for reference. + +{{}} + + +### Step 1: Download KubeKey +1. 1. Run the following commands to download KubeKey. + {{< tabs >}} + + {{< tab "Good network connections to GitHub/Googleapis" >}} + + Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or use the following command directly. + + ```bash + curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - + ``` + + {{}} + + {{< tab "Poor network connections to GitHub/Googleapis" >}} + + Run the following command first to make sure you download KubeKey from the correct zone. + + ```bash + export KKZONE=cn + ``` + + Run the following command to download KubeKey: + + ```bash + curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - + ``` + {{}} + + {{}} + +2. After you uncompress the file, execute the following command to make `kk` executable: + + ```bash + chmod +x kk + ``` + +### Step 2: Prepare installation images + +As you install KubeSphere and Kubernetes on Linux, you need to prepare an image package containing all the necessary images and download the Kubernetes binary file in advance. + +1. Download the image list file `images-list.txt` from a machine that has access to Internet through the following command: + + ```bash + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/images-list.txt + ``` + + {{< notice note >}} + + This file lists images under `##+modulename` based on different modules. You can add your own images to this file following the same rule. + + {{}} + +2. Download `offline-installation-tool.sh`. + + ```bash + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/offline-installation-tool.sh + ``` + +3. Make the `.sh` file executable. + + ```bash + chmod +x offline-installation-tool.sh + ``` + +4. You can execute the command `./offline-installation-tool.sh -h` to see how to use the script: + + ```bash + root@master:/home/ubuntu# ./offline-installation-tool.sh -h + Usage: + + ./offline-installation-tool.sh [-l IMAGES-LIST] [-d IMAGES-DIR] [-r PRIVATE-REGISTRY] [-v KUBERNETES-VERSION ] + + Description: + -b : save kubernetes' binaries. + -d IMAGES-DIR : the dir of files (tar.gz) which generated by `docker save`. default: /home/ubuntu/kubesphere-images + -l IMAGES-LIST : text file with list of images. + -r PRIVATE-REGISTRY : target private registry:port. + -s : save model will be applied. Pull the images in the IMAGES-LIST and save images as a tar.gz file. + -v KUBERNETES-VERSION : download kubernetes' binaries. default: v1.17.9 + -h : usage message + ``` + +5. Download the Kubernetes binary file. + + ```bash + ./offline-installation-tool.sh -b -v v1.22.12 + ``` + + If you cannot access the object storage service of Google, run the following command instead to add the environment variable to change the source. + + ```bash + export KKZONE=cn;./offline-installation-tool.sh -b -v v1.22.12 + ``` + + {{< notice note >}} + + - You can change the Kubernetes version downloaded based on your needs. Recommended Kubernetes versions for KubeSphere 3.3 are v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, and * v1.24.x. For Kubernetes versions with an asterisk, some features of edge nodes may be unavailable due to incompatability. Therefore, if you want to use edge nodes, you are advised to install Kubernetes v1.21.x. If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.23.10 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../installing-on-linux/introduction/kubekey/#support-matrix). + + - After you run the script, a folder `kubekey` is automatically created. Note that this file and `kk` must be placed in the same directory when you create the cluster later. + + {{}} + +6. Pull images in `offline-installation-tool.sh`. + + ```bash + ./offline-installation-tool.sh -s -l images-list.txt -d ./kubesphere-images + ``` + + {{< notice note >}} + + You can choose to pull images as needed. For example, you can delete `##k8s-images` and related images under it in `images-list.text` if you already have a Kubernetes cluster. + + {{}} + +### Step 3: Push images to your private registry + +Transfer your packaged image file to your local machine and execute the following command to push it to the registry. + +```bash +./offline-installation-tool.sh -l images-list.txt -d ./kubesphere-images -r dockerhub.kubekey.local +``` + + {{< notice note >}} + + The domain name is `dockerhub.kubekey.local` in the command. Make sure you use your **own registry address**. + + {{}} + +### Air-gapped upgrade for all-in-one clusters + +#### Example machines +| Host Name | IP | Role | Port | URL | +| --------- | ----------- | -------------------- | ---- | ----------------------- | +| master | 192.168.1.1 | Docker registry | 5000 | http://192.168.1.1:5000 | +| master | 192.168.1.1 | master, etcd, worker | | | + +#### Versions + +| | Kubernetes | KubeSphere | +| ------ | ---------- | ---------- | +| Before | v1.18.6 | v3.2.x | +| After | v1.22.12 | 3.3.x | + +#### Upgrade a cluster + +In this example, KubeSphere is installed on a single node, and you need to specify a configuration file to add host information. Besides, for air-gapped installation, pay special attention to `.spec.registry.privateRegistry`, which must be set to **your own registry address**. For more information, see the following sections. + +#### Create an example configuration file + +Execute the following command to generate an example configuration file for installation: + +```bash +./kk create config [--with-kubernetes version] [--with-kubesphere version] [(-f | --file) path] +``` + +For example: + +```bash +./kk create config --with-kubernetes v1.22.12 --with-kubesphere v3.3.2 -f config-sample.yaml +``` + +{{< notice note >}} + +Make sure the Kubernetes version is the one you downloaded. + +{{}} + +#### Edit the configuration file + +Edit the configuration file `config-sample.yaml`. Here is [an example for your reference](https://github.com/kubesphere/kubekey/blob/release-2.2/docs/config-example.md). + + {{< notice warning >}} + +For air-gapped installation, you must specify `privateRegistry`, which is `dockerhub.kubekey.local` in this example. + + {{}} + + Set `hosts` of your `config-sample.yaml` file: + +```yaml + hosts: + - {name: ks.master, address: 192.168.1.1, internalAddress: 192.168.1.1, user: root, password: Qcloud@123} + roleGroups: + etcd: + - ks.master + control-plane: + - ks.master + worker: + - ks.master +``` + +Set `privateRegistry` of your `config-sample.yaml` file: +```yaml + registry: + registryMirrors: [] + insecureRegistries: [] + privateRegistry: dockerhub.kubekey.local +``` + +#### Upgrade your single-node cluster to KubeSphere 3.3 and Kubernetes v1.22.12 + +```bash +./kk upgrade -f config-sample.yaml +``` + +To upgrade Kubernetes to a specific version, explicitly provide the version after the flag `--with-kubernetes`. Available versions are v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, and * v1.24.x. For Kubernetes versions with an asterisk, some features of edge nodes may be unavailable due to incompatability. Therefore, if you want to use edge nodes, you are advised to install Kubernetes v1.21.x. + +### Air-gapped upgrade for multi-node clusters + +#### Example machines +| Host Name | IP | Role | Port | URL | +| --------- | ----------- | --------------- | ---- | ----------------------- | +| master | 192.168.1.1 | Docker registry | 5000 | http://192.168.1.1:5000 | +| master | 192.168.1.1 | master, etcd | | | +| slave1 | 192.168.1.2 | worker | | | +| slave1 | 192.168.1.3 | worker | | | + + +#### Versions + +| | Kubernetes | KubeSphere | +| ------ | ---------- | ---------- | +| Before | v1.18.6 | v3.2.x | +| After | v1.22.12 | 3.3.x | + +#### Upgrade a cluster + +In this example, KubeSphere is installed on multiple nodes, so you need to specify a configuration file to add host information. Besides, for air-gapped installation, pay special attention to `.spec.registry.privateRegistry`, which must be set to **your own registry address**. For more information, see the following sections. + +#### Create an example configuration file + + Execute the following command to generate an example configuration file for installation: + +```bash +./kk create config [--with-kubernetes version] [--with-kubesphere version] [(-f | --file) path] +``` + + For example: + +```bash +./kk create config --with-kubernetes v1.22.12 --with-kubesphere v3.3.2 -f config-sample.yaml +``` + +{{< notice note >}} + +Make sure the Kubernetes version is the one you downloaded. + +{{}} + +#### Edit the configuration file + +Edit the configuration file `config-sample.yaml`. Here is [an example for your reference](https://github.com/kubesphere/kubekey/blob/release-2.2/docs/config-example.md). + + {{< notice warning >}} + + For air-gapped installation, you must specify `privateRegistry`, which is `dockerhub.kubekey.local` in this example. + + {{}} + +Set `hosts` of your `config-sample.yaml` file: + +```yaml + hosts: + - {name: ks.master, address: 192.168.1.1, internalAddress: 192.168.1.1, user: root, password: Qcloud@123} + - {name: ks.slave1, address: 192.168.1.2, internalAddress: 192.168.1.2, user: root, privateKeyPath: "/root/.ssh/kp-qingcloud"} + - {name: ks.slave2, address: 192.168.1.3, internalAddress: 192.168.1.3, user: root, privateKeyPath: "/root/.ssh/kp-qingcloud"} + roleGroups: + etcd: + - ks.master + control-plane: + - ks.master + worker: + - ks.slave1 + - ks.slave2 +``` +Set `privateRegistry` of your `config-sample.yaml` file: +```yaml + registry: + registryMirrors: [] + insecureRegistries: [] + privateRegistry: dockerhub.kubekey.local +``` + +#### Upgrade your multi-node cluster to KubeSphere 3.3 and Kubernetes v1.22.12 + +```bash +./kk upgrade -f config-sample.yaml +``` + +To upgrade Kubernetes to a specific version, explicitly provide the version after the flag `--with-kubernetes`. Available versions are v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, and * v1.24.x. For Kubernetes versions with an asterisk, some features of edge nodes may be unavailable due to incompatability. Therefore, if you want to use edge nodes, you are advised to install Kubernetes v1.21.x. diff --git a/content/en/docs/v3.4/upgrade/overview.md b/content/en/docs/v3.4/upgrade/overview.md new file mode 100644 index 000000000..df085801b --- /dev/null +++ b/content/en/docs/v3.4/upgrade/overview.md @@ -0,0 +1,28 @@ +--- +title: "Upgrade — Overview" +keywords: "Kubernetes, upgrade, KubeSphere, 3.3, upgrade" +description: "Understand what you need to pay attention to before the upgrade, such as versions, and upgrade tools." +linkTitle: "Overview" +weight: 7100 +--- + +## Make Your Upgrade Plan + +KubeSphere 3.3 is compatible with Kubernetes v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, and * v1.24.x: + +- Before you upgrade your cluster to KubeSphere 3.3, you need to have a KubeSphere cluster running v3.2.x. +- You can choose to only upgrade KubeSphere to 3.3 or upgrade Kubernetes (to a higher version) and KubeSphere (to 3.3) at the same time. +- For Kubernetes versions with an asterisk, some features of edge nodes may be unavailable due to incompatability. Therefore, if you want to use edge nodes, you are advised to install Kubernetes v1.21.x. +## Before the Upgrade + +{{< notice warning >}} + +- You are supposed to implement a simulation for the upgrade in a testing environment first. After the upgrade is successful in the testing environment and all applications are running normally, upgrade your cluster in your production environment. +- During the upgrade process, there may be a short interruption of applications (especially for those single-replica Pods). Please arrange a reasonable period of time for your upgrade. +- It is recommended to back up etcd and stateful applications before in production. You can use [Velero](https://velero.io/) to implement the backup and migrate Kubernetes resources and persistent volumes. + +{{}} + +## Upgrade Tool + +Depending on how your existing cluster was set up, you can use KubeKey or ks-installer to upgrade your cluster. It is recommended that you [use KubeKey to upgrade your cluster](../upgrade-with-kubekey/) if it was created by KubeKey. Otherwise, [use ks-installer to upgrade your cluster](../upgrade-with-ks-installer/). \ No newline at end of file diff --git a/content/en/docs/v3.4/upgrade/upgrade-with-ks-installer.md b/content/en/docs/v3.4/upgrade/upgrade-with-ks-installer.md new file mode 100644 index 000000000..6790aa41a --- /dev/null +++ b/content/en/docs/v3.4/upgrade/upgrade-with-ks-installer.md @@ -0,0 +1,41 @@ +--- +title: "Upgrade with ks-installer" +keywords: "Kubernetes, upgrade, KubeSphere, v3.3.2" +description: "Use ks-installer to upgrade KubeSphere." +linkTitle: "Upgrade with ks-installer" +weight: 7300 +--- + +ks-installer is recommended for users whose Kubernetes clusters were not set up by [KubeKey](../../installing-on-linux/introduction/kubekey/), but hosted by cloud vendors or created by themselves. This tutorial is for **upgrading KubeSphere only**. Cluster operators are responsible for upgrading Kubernetes beforehand. + +## Prerequisites + +- You need to have a KubeSphere cluster running v3.2.x. If your KubeSphere version is v3.1.x or earlier, upgrade to v3.2.x first. +- Read [Release Notes for 3.3.2](../../../v3.3/release/release-v332/) carefully. +- Back up any important component beforehand. +- Supported Kubernetes versions of KubeSphere 3.3: v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, and v1.24.x. For Kubernetes versions with an asterisk, some features of edge nodes may be unavailable due to incompatability. Therefore, if you want to use edge nodes, you are advised to install Kubernetes v1.21.x. + +## Major Updates + +In KubeSphere 3.3.1, some changes have made on built-in roles and permissions of custom roles. Therefore, before you upgrade KubeSphere to 3.3.1, please note the following: + + - Change of built-in roles: Platform-level built-in roles `users-manager` and `workspace-manager` are removed. If an existing user has been bound to `users-manager` or `workspace-manager`, its role will be changed to `platform-regular` after the upgrade is completed. Role `platform-self-provisioner` is added. For more information about built-in roles, refer to [Create a user](../../quick-start/create-workspace-and-project). + + - Some permission of custom roles are removed: + - Removed permissions of platform-level custom roles: user management, role management, and workspace management. + - Removed permissions of workspace-level custom roles: user management, role management, and user group management. + - Removed permissions of namespace-level custom roles: user management and role management. + - After you upgrade KubeSphere to 3.3.1, custom roles will be retained, but removed permissions of the custom roles will be revoked. + +## Apply ks-installer + +Run the following command to upgrade your cluster. + +```bash +kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml --force +``` + +## Enable Pluggable Components + +You can [enable new pluggable components](../../pluggable-components/overview/) of KubeSphere 3.3 after the upgrade to explore more features of the container platform. + diff --git a/content/en/docs/v3.4/upgrade/upgrade-with-kubekey.md b/content/en/docs/v3.4/upgrade/upgrade-with-kubekey.md new file mode 100644 index 000000000..35eef7615 --- /dev/null +++ b/content/en/docs/v3.4/upgrade/upgrade-with-kubekey.md @@ -0,0 +1,146 @@ +--- +title: "Upgrade with KubeKey" +keywords: "Kubernetes, upgrade, KubeSphere, 3.3, KubeKey" +description: "Use KubeKey to upgrade Kubernetes and KubeSphere." +linkTitle: "Upgrade with KubeKey" +weight: 7200 +--- +KubeKey is recommended for users whose KubeSphere and Kubernetes were both installed by [KubeKey](../../installing-on-linux/introduction/kubekey/). If your Kubernetes cluster was provisioned by yourself or cloud providers, refer to [Upgrade with ks-installer](../upgrade-with-ks-installer/). + +This tutorial demonstrates how to upgrade your cluster using KubeKey. + +## Prerequisites + +- You need to have a KubeSphere cluster running v3.2.x. If your KubeSphere version is v3.1.x or earlier, upgrade to v3.2.x first. +- Read [Release Notes for 3.3.2](../../../v3.3/release/release-v332/) carefully. +- Back up any important component beforehand. +- Make your upgrade plan. Two scenarios are provided in this document for [all-in-one clusters](#all-in-one-cluster) and [multi-node clusters](#multi-node-cluster) respectively. + +## Major Updates + +In KubeSphere 3.3.1, some changes have made on built-in roles and permissions of custom roles. Therefore, before you upgrade KubeSphere to 3.3.1, please note the following: + + - Change of built-in roles: Platform-level built-in roles `users-manager` and `workspace-manager` are removed. If an existing user has been bound to `users-manager` or `workspace-manager`, its role will be changed to `platform-regular` after the upgrade is completed. Role `platform-self-provisioner` is added. For more information about built-in roles, refer to [Create a user](../../quick-start/create-workspace-and-project). + + - Some permission of custom roles are removed: + - Removed permissions of platform-level custom roles: user management, role management, and workspace management. + - Removed permissions of workspace-level custom roles: user management, role management, and user group management. + - Removed permissions of namespace-level custom roles: user management and role management. + - After you upgrade KubeSphere to 3.3.1, custom roles will be retained, but removed permissions of the custom roles will be revoked. + +## Download KubeKey + +Follow the steps below to download KubeKey before you upgrade your cluster. + +{{< tabs >}} + +{{< tab "Good network connections to GitHub/Googleapis" >}} + +Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or use the following command directly. + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "Poor network connections to GitHub/Googleapis" >}} + +Run the following command first to make sure you download KubeKey from the correct zone. + +```bash +export KKZONE=cn +``` + +Run the following command to download KubeKey: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +After you download KubeKey, if you transfer it to a new machine also with poor network connections to Googleapis, you must run `export KKZONE=cn` again before you proceed with the steps below. + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +The commands above download the latest release of KubeKey. You can change the version number in the command to download a specific version. + +{{}} + +Make `kk` executable: + +```bash +chmod +x kk +``` + +## Upgrade KubeSphere and Kubernetes + +Upgrading steps are different for single-node clusters (all-in-one) and multi-node clusters. + +{{< notice info >}} + +When upgrading Kubernetes, KubeKey will upgrade from one MINOR version to the next MINOR version until the target version. For example, you may see the upgrading process going from 1.16 to 1.17 and to 1.18, instead of directly jumping to 1.18 from 1.16. + +{{}} + +### All-in-one cluster + +Run the following command to use KubeKey to upgrade your single-node cluster to KubeSphere 3.3 and Kubernetes v1.22.12: + +```bash +./kk upgrade --with-kubernetes v1.22.12 --with-kubesphere v3.3.2 +``` + +To upgrade Kubernetes to a specific version, explicitly provide the version after the flag `--with-kubernetes`. Available versions are v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, and v1.24.x. For Kubernetes versions with an asterisk, some features of edge nodes may be unavailable due to incompatability. Therefore, if you want to use edge nodes, you are advised to install Kubernetes v1.21.x. +### Multi-node cluster + +#### Step 1: Generate a configuration file using KubeKey + +This command creates a configuration file `sample.yaml` of your cluster. + +```bash +./kk create config --from-cluster +``` + +{{< notice note >}} + +It assumes your kubeconfig is allocated in `~/.kube/config`. You can change it with the flag `--kubeconfig`. + +{{}} + +#### Step 2: Edit the configuration file template + +Edit `sample.yaml` based on your cluster configuration. Make sure you replace the following fields correctly. + +- `hosts`: The basic information of your hosts (hostname and IP address) and how to connect to them using SSH. +- `roleGroups.etcd`: Your etcd nodes. +- `controlPlaneEndpoint`: Your load balancer address (optional). +- `registry`: Your image registry information (optional). + +{{< notice note >}} + +For more information, see [Edit the configuration file](../../installing-on-linux/introduction/multioverview/#2-edit-the-configuration-file) or refer to the `Cluster` section of [the complete configuration file](https://github.com/kubesphere/kubekey/blob/release-2.2/docs/config-example.md) for more information. + +{{}} + +#### Step 3: Upgrade your cluster +The following command upgrades your cluster to KubeSphere 3.3 and Kubernetes v1.22.12: + +```bash +./kk upgrade --with-kubernetes v1.22.12 --with-kubesphere v3.3.2 -f sample.yaml +``` + +To upgrade Kubernetes to a specific version, explicitly provide the version after the flag `--with-kubernetes`. Available versions are v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, and v1.24.x. For Kubernetes versions with an asterisk, some features of edge nodes may be unavailable due to incompatability. Therefore, if you want to use edge nodes, you are advised to install Kubernetes v1.21.x. + +{{< notice note >}} + +To use new features of KubeSphere 3.3, you may need to enable some pluggable components after the upgrade. + +{{}} \ No newline at end of file diff --git a/content/en/docs/v3.4/upgrade/what-changed.md b/content/en/docs/v3.4/upgrade/what-changed.md new file mode 100644 index 000000000..f799ef160 --- /dev/null +++ b/content/en/docs/v3.4/upgrade/what-changed.md @@ -0,0 +1,12 @@ +--- +title: "Changes after Upgrade" +keywords: "Kubernetes, upgrade, KubeSphere, 3.3" +description: "Understand what will be changed after the upgrade." + +linkTitle: "Changes after Upgrade" +weight: 7600 +--- + +This section covers the changes after upgrade for existing settings in previous versions. If you want to know all the new features and enhancements in KubeSphere 3.3, see [Release Notes for 3.3.0](../../../v3.3/release/release-v330/), [Release Notes for 3.3.1](../../../v3.3/release/release-v331/), and [Release Notes for 3.3.2](../../../v3.3/release/release-v332/). + + diff --git a/content/en/docs/v3.4/workspace-administration/_index.md b/content/en/docs/v3.4/workspace-administration/_index.md new file mode 100644 index 000000000..2024f8313 --- /dev/null +++ b/content/en/docs/v3.4/workspace-administration/_index.md @@ -0,0 +1,16 @@ +--- +title: "Workspace Administration and User Guide" +description: "This chapter helps you to better manage KubeSphere workspaces." +layout: "second" + +linkTitle: "Workspace Administration and User Guide" + +weight: 9000 + +icon: "/images/docs/v3.3/docs.svg" + +--- + +KubeSphere tenants work in a workspace to manage projects and apps. Among others, workspace administrators are responsible for the management of app repositories. Tenants with necessary permissions can further deploy and use app templates from app repositories. They can also leverage individual app templates which are uploaded and released to the App Store. Besides, administrators also control whether the network of a workspace is isolated from others'. + +This chapter demonstrates how workspace administrators and tenants work at the workspace level. \ No newline at end of file diff --git a/content/en/docs/v3.4/workspace-administration/app-repository/_index.md b/content/en/docs/v3.4/workspace-administration/app-repository/_index.md new file mode 100644 index 000000000..656e5cfaf --- /dev/null +++ b/content/en/docs/v3.4/workspace-administration/app-repository/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "App Repositories" +weight: 9300 + +_build: + render: false +--- diff --git a/content/en/docs/v3.4/workspace-administration/app-repository/import-helm-repository.md b/content/en/docs/v3.4/workspace-administration/app-repository/import-helm-repository.md new file mode 100644 index 000000000..4e9a017f1 --- /dev/null +++ b/content/en/docs/v3.4/workspace-administration/app-repository/import-helm-repository.md @@ -0,0 +1,52 @@ +--- +title: "Import a Helm Repository" +keywords: "Kubernetes, Helm, KubeSphere, Application" +description: "Import a Helm repository to KubeSphere to provide app templates for tenants in a workspace." +linkTitle: "Import a Helm Repository" +weight: 9310 +--- + +KubeSphere builds app repositories that allow users to use Kubernetes applications based on Helm charts. App repositories are powered by [OpenPitrix](https://github.com/openpitrix/openpitrix), an open source platform for cross-cloud application management sponsored by QingCloud. In an app repository, every application serves as a base package library. To deploy and manage an app from an app repository, you need to create the repository in advance. + +To create a repository, you use an HTTP/HTTPS server or object storage solutions to store packages. More specifically, an app repository relies on external storage independent of OpenPitrix, such as [MinIO](https://min.io/) object storage, [QingStor object storage](https://github.com/qingstor), and [AWS object storage](https://aws.amazon.com/what-is-cloud-object-storage/). These object storage services are used to store configuration packages and index files created by developers. After a repository is registered, the configuration packages are automatically indexed as deployable applications. + +This tutorial demonstrates how to add an app repository to KubeSphere. + +## Prerequisites + +- You need to enable the [KubeSphere App Store (OpenPitrix)](../../../pluggable-components/app-store/). +- You need to have an app repository. Refer to [the official documentation of Helm](https://v2.helm.sh/docs/developing_charts/#the-chart-repository-guide) to create repositories or [upload your own apps to the public repository of KubeSphere](../upload-app-to-public-repository/). Alternatively, use the example repository in the steps below, which is only for demonstration purposes. +- You need to create a workspace and a user (`ws-admin`). The user must be granted the role of `workspace-admin` in the workspace. For more information, refer to [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Add an App Repository + +1. Log in to the web console of KubeSphere as `ws-admin`. In your workspace, go to **App Repositories** under **App Management**, and then click **Add**. + +2. In the dialog that appears, specify an app repository name and add your repository URL. For example, enter `https://charts.kubesphere.io/main`. + + - **Name**: Set a simple and clear name for the repository, which is easy for users to identify. + - **URL**: Follow the RFC 3986 specification with the following three protocols supported: + - S3: The URL is S3-styled, such as `s3..amazonaws.com` for the access to Amazon S3 services using the S3 interface. If you select this type, you need to provide the access key and secret. + - HTTP: For example, `http://docs-repo.gd2.qingstor.com`. The example contains a sample app NGINX, which will be imported automatically after the repository is created. You can deploy it from app templates. + - HTTPS: For example, `https://docs-repo.gd2.qingstor.com`. + {{< notice note >}} + +If you want to use basic access authentication in HTTP/HTTPS, you can use a URL with a style like this: `http://username:password@docs-repo.gd2.qingstor.com`. + +{{}} + + - **Synchronization Interval**: Interval of synchronizing the remote app repository. + + - **Description**: Give a brief introduction of main features of the app repository. + +3. After you specify required fields, click **Validate** to verify the URL. You will see a green check mark next to the URL if it is available and click **OK** to finish. + + {{< notice note >}} + +- In an on-premises private cloud environment, you can build your own repository based on [ChartMuseum](https://chartmuseum.com/). Then, you develop and upload applications to the repository and deploy them on KubeSphere for your own needs. + +- If you need to set up HTTP basic access authentication, you can refer to [this document](https://github.com/helm/chartmuseum#basic-auth). + + {{}} + +4. The repository appears in the repository list after imported and KubeSphere automatically adds all apps in the repository as app templates. When users choose to deploy apps using app templates, they can see apps in this repository. For more information, see [Deploy Apps from App Templates](../../../project-user-guide/application/deploy-app-from-template/). diff --git a/content/en/docs/v3.4/workspace-administration/app-repository/upload-app-to-public-repository.md b/content/en/docs/v3.4/workspace-administration/app-repository/upload-app-to-public-repository.md new file mode 100644 index 000000000..be174f861 --- /dev/null +++ b/content/en/docs/v3.4/workspace-administration/app-repository/upload-app-to-public-repository.md @@ -0,0 +1,44 @@ +--- +title: "Upload Apps to the KubeSphere GitHub Repository" +keywords: "Kubernetes, helm, KubeSphere, application" +description: "Upload your own apps to the GitHub repository of KubeSphere." +linkTitle: "Upload Apps to the KubeSphere GitHub Repository" +weight: 9320 +--- + +KubeSphere provides an app repository for testing and development. Users can upload their apps to the repository, which will serve as available app templates once approved. + +## Upload Your App + +Build your app first based on [the Helm documentation](https://helm.sh/docs/topics/charts/). You can refer to the existing apps in the KubeSphere app repository. Official apps are stored in [src/main](https://github.com/kubesphere/helm-charts/tree/master/src/main) and apps being tested are stored in [src/test](https://github.com/kubesphere/helm-charts/tree/master/src/test). + +### Step 1: Develop an app + +1. [Fork the app repository of KubeSphere](https://github.com/kubesphere/helm-charts/fork). + +2. Install Helm based on [the Helm documentation](https://helm.sh/docs/intro/install/). + +3. Execute the following command to initialize the Helm client. + + ```bash + helm init --client-only + ``` + +4. Create your app. For example, you create an app named `mychart` in the directory `src/test`. + + ```bash + cd src/test + helm create mychart + cd mychart + ``` + +5. You can see that Helm has created related templates in the directory. For more information, see [Create an App](../../../application-store/app-developer-guide/helm-developer-guide/#create-an-app). + +### Step 2: Submit an app + +When you finish the development, submit a pull request to [the official repository of KubeSphere](https://github.com/kubesphere/helm-charts) for review. + +### Step 3: Deploy your app + +After your pull request is approved, your app will be available to use. For more information, refer to [Import a Helm Repository](../import-helm-repository/) to add `https://charts.kubesphere.io/main` to KubeSphere. + diff --git a/content/en/docs/v3.4/workspace-administration/department-management.md b/content/en/docs/v3.4/workspace-administration/department-management.md new file mode 100644 index 000000000..1201d50db --- /dev/null +++ b/content/en/docs/v3.4/workspace-administration/department-management.md @@ -0,0 +1,80 @@ +--- +title: "Department Management" +keywords: 'KubeSphere, Kubernetes, Department, Role, Permission, Group' +description: 'Create departments in a workspace and assign users to different departments to implement permission control.' +linkTitle: "Department Management" +weight: 9800 +--- + +This document describes how to manage workspace departments. + +A department in a workspace is a logical unit used for permission control. You can set a workspace role, multiple project roles, and multiple DevOps project roles in a department, and assign users to the department to control user permissions in batches. + +## Prerequisites + +- You need to [create a workspace and a user](../../quick-start/create-workspace-and-project/) assigned the `workspace-admin` role in the workspace. This document uses the `demo-ws` workspace and the `ws-admin` account as an example. +- To set project roles or DevOps project roles in a department, you need to [create at least one project or DevOps project](../../quick-start/create-workspace-and-project/) in the workspace. + +## Create a Department + +1. Log in to the KubeSphere web console as `ws-admin` and go to the `demo-ws` workspace. + +2. On the left navigation bar, choose **Departments** under **Workspace Settings**, and click **Set Departments** on the right. + +3. In the **Set Departments** dialog box, set the following parameters and click **OK** to create a department. + + {{< notice note >}} + + * If a department has already been created in the workspace, you can click **Create Department** to add more departments to the workspace. + * You can create multiple departments and multiple sub-departments in each department. To create a subdepartment, select a department on the left department tree and click **Create Department** on the right. + + {{}} + + * **Name**: Name of the department. + * **Alias**: Alias of the department. + * **Workspace Role**: Role of all department members in the current workspace. + * **Project Role**: Role of all department members in a project. You can click **Add Project** to specify multiple project roles. Only one role can be specified for each project. + * **DevOps Project Role**: Role of all department members in a DevOps project. You can click **Add DevOps Project** to specify multiple DevOps project roles. Only one role can be specified for each DevOps project. + +4. Click **OK** after the department is created, and then click **Close**. On the **Departments** page, the created department is displayed in a department tree on the left. + +## Assign a User to a Department + +1. On the **Departments** page, select a department in the department tree on the left and click **Not Assigned** on the right. + +2. In the user list, click on the right of a user, and click **OK** for the displayed message to assign the user to the department. + + {{< notice note >}} + + * If permissions provided by the department overlap with existing permissions of the user, new permissions are added to the user. Existing permissions of the user are not affected. + * Users assigned to a department can perform operations according to the workspace role, project roles, and DevOps project roles associated with the department without being invited to the workspace, projects, and DevOps projects. + + {{}} + +## Remove a User from a Department + +1. On the **Departments** page, select a department in the department tree on the left and click **Assigned** on the right. +2. In the assigned user list, click on the right of a user, enter the username in the displayed dialog box, and click **OK** to remove the user. + +## Delete and Edit a Department + +1. On the **Departments** page, click **Set Departments**. + +2. In the **Set Departments** dialog box, on the left, click the upper level of the department to be edited or deleted. + +3. Click on the right of the department to edit it. + + {{< notice note >}} + + For details, see [Create a Department](#create-a-department). + + {{}} + +4. Click on the right of the department, enter the department name in the displayed dialog box, and click **OK** to delete the department. + + {{< notice note >}} + + * If a department contains sub-departments, the sub-departments will also be deleted. + * After a department is deleted, the associated roles will be unbound from the users. + + {{}} \ No newline at end of file diff --git a/content/en/docs/v3.4/workspace-administration/project-quotas.md b/content/en/docs/v3.4/workspace-administration/project-quotas.md new file mode 100644 index 000000000..ad59de15f --- /dev/null +++ b/content/en/docs/v3.4/workspace-administration/project-quotas.md @@ -0,0 +1,56 @@ +--- +title: "Project Quotas" +keywords: 'KubeSphere, Kubernetes, projects, quotas, resources, requests, limits' +description: 'Set requests and limits to control resource usage in a project.' +linkTitle: "Project Quotas" +weight: 9600 +--- + +KubeSphere uses [Kubernetes requests and limits](https://kubesphere.io/blogs/understand-requests-and-limits-in-kubernetes/) to control resource (for example, CPU and memory) usage in a project, also known as [resource quotas](https://kubernetes.io/docs/concepts/policy/resource-quotas/) in Kubernetes. Requests make sure a project can get the resources it needs as they are specifically guaranteed and reserved. On the contrary, limits ensure that a project can never use resources above a certain value. + +Besides CPU and memory, you can also set resource quotas for other objects separately such as Pods, [Deployments](../../project-user-guide/application-workloads/deployments/), [Jobs](../../project-user-guide/application-workloads/jobs/), [Services](../../project-user-guide/application-workloads/services/), and [ConfigMaps](../../project-user-guide/configuration/configmaps/) in a project. + +This tutorial demonstrates how to configure quotas for a project. + +## Prerequisites + +You have an available workspace, a project and a user (`ws-admin`). The user must have the `admin` role at the workspace level. For more information, see [Create Workspaces, Projects, Users and Roles](../../quick-start/create-workspace-and-project/). + +{{< notice note >}} + +If you use the user `project-admin` (a user of the `admin` role at the project level), you can set project quotas as well for a new project (i.e. its quotas remain unset). However, `project-admin` cannot change project quotas once they are set. Generally, it is the responsibility of `ws-admin` to set limits and requests for a project. `project-admin` is responsible for [setting limit ranges](../../project-administration/container-limit-ranges/) for containers in a project. + +{{}} + +## Set Project Quotas + +1. Log in to the console as `ws-admin` and go to a project. On the **Overview** page, you can see project quotas remain unset if the project is newly created. Click **Edit Quotas** to configure quotas. + +2. In the displayed dialog box, you can see that KubeSphere does not set any requests or limits for a project by default. To set +limits to control CPU and memory resources, use the slider to move to a desired value or enter numbers directly. Leaving a field blank means you do not set any requests or limits. + + {{< notice note >}} + + The limit can never be lower than the request. + + {{}} + +3. To set quotas for other resources, click **Add** under **Project Resource Quotas**, and then select a resource or enter a recource name and set a quota. + +4. Click **OK** to finish setting quotas. + +5. Go to **Basic Information** in **Project Settings**, and you can see all resource quotas for the project. + +6. To change project quotas, click **Edit Project** on the **Basic Information** page and select **Edit Project Quotas**. + + {{< notice note >}} + + For [a multi-cluster project](../../project-administration/project-and-multicluster-project/#multi-cluster-projects), the option **Edit Project Quotas** does not display in the **Manage Project** drop-down menu. To set quotas for a multi-cluster project, go to **Projects Quotas** under **Project Settings** and click **Edit Quotas**. Note that as a multi-cluster project runs across clusters, you can set resource quotas on different clusters separately. + + {{}} + +7. Change project quotas in the dialog that appears and click **OK**. + +## See Also + +[Container Limit Ranges](../../project-administration/container-limit-ranges/) diff --git a/content/en/docs/v3.4/workspace-administration/role-and-member-management.md b/content/en/docs/v3.4/workspace-administration/role-and-member-management.md new file mode 100644 index 000000000..eeffc5f4c --- /dev/null +++ b/content/en/docs/v3.4/workspace-administration/role-and-member-management.md @@ -0,0 +1,61 @@ +--- +title: "Workspace Role and Member Management" +keywords: "Kubernetes, workspace, KubeSphere, multitenancy" +description: "Customize a workspace role and grant it to tenants." +linkTitle: "Workspace Role and Member Management" +weight: 9400 +--- + +This tutorial demonstrates how to manage roles and members in a workspace. + +## Prerequisites + +At least one workspace has been created, such as `demo-workspace`. Besides, you need a user of the `workspace-admin` role (for example, `ws-admin`) at the workspace level. For more information, see [Create Workspaces, Projects, Users and Roles](../../quick-start/create-workspace-and-project/). + +{{< notice note >}} + +The actual role name follows a naming convention: `workspace name-role name`. For example, for a workspace named `demo-workspace`, the actual role name of the role `admin` is `demo-workspace-admin`. + +{{}} + +## Built-in Roles + +In **Workspace Roles**, there are four available built-in roles. Built-in roles are created automatically by KubeSphere when a workspace is created and they cannot be edited or deleted. You can only view permissions included in a built-in role or assign it to a user. + +| Built-in Roles | Description | +| ------------------ | ------------------------------------------------------------ | +| `workspace-viewer` | Workspace viewer who can view all resources in the workspace. | +| `workspace-self-provisioner` | Workspace regular member who can view workspace settings, manage app templates, and create projects and DevOps projects. | +| `workspace-regular` | Workspace regular member who can view workspace settings. | +| `workspace-admin` | Workspace administrator who has full control over all resources in the workspace. | + +To view the permissions that a role contains: + +1. Log in to the console as `ws-admin`. In **Workspace Roles**, click a role (for example, `workspace-admin`) and you can see role details. + +2. Click the **Authorized Users** tab to see all the users that are granted the role. + +## Create a Workspace Role + +1. Navigate to **Workspace Roles** under **Workspace Settings**. + +2. In **Workspace Roles**, click **Create** and set a role **Name** (for example, `demo-project-admin`). Click **Edit Permissions** to continue. + +3. In the pop-up window, permissions are categorized into different **Modules**. In this example, click **Project Management** and select **Project Creation**, **Project Management**, and **Project Viewing** for this role. Click **OK** to finish creating the role. + + {{< notice note >}} + + **Depends on** means the major permission (the one listed after **Depends on**) needs to be selected first so that the affiliated permission can be assigned. + + {{}} + +4. Newly-created roles will be listed in **Workspace Roles**. To edit the information or permissions, or delete an existing role, click icon on the right. + +## Invite a New Member + +1. Navigate to **Workspace Members** under **Workspace Settings**, and click **Invite**. +2. Invite a user to the workspace by clicking icon on the right of it and assign a role to it. + +3. After you add the user to the workspace, click **OK**. In **Workspace Members**, you can see the user in the list. + +4. To edit the role of an existing user or remove the user from the workspace, click icon on the right and select the corresponding operation. \ No newline at end of file diff --git a/content/en/docs/v3.4/workspace-administration/upload-helm-based-application.md b/content/en/docs/v3.4/workspace-administration/upload-helm-based-application.md new file mode 100644 index 000000000..1a2236f91 --- /dev/null +++ b/content/en/docs/v3.4/workspace-administration/upload-helm-based-application.md @@ -0,0 +1,38 @@ +--- +title: "Upload Helm-based Applications" +keywords: "Kubernetes, Helm, KubeSphere, OpenPitrix, Application" +description: "Learn how to upload a Helm-based application as an app template to your workspace." +linkTitle: "Upload Helm-based Applications" +weight: 9200 +--- + +KubeSphere provides full lifecycle management for applications. Among other things, workspace administrators can upload or create new app templates and test them quickly. Furthermore, they publish well-tested apps to the [App Store](../../application-store/) so that other users can deploy them with one click. To develop app templates, workspace administrators need to upload packaged [Helm charts](https://helm.sh/) to KubeSphere first. + +This tutorial demonstrates how to develop an app template by uploading a packaged Helm chart. + +## Prerequisites + +- You need to enable the [KubeSphere App Store (OpenPitrix)](../../pluggable-components/app-store/). +- You need to create a workspace and a user (`project-admin`). The user must be invited to the workspace with the role of `workspace-self-provisioner`. For more information, refer to [Create Workspaces, Projects, Users and Roles](../../quick-start/create-workspace-and-project/). + +## Hands-on Lab + +1. Log in to KubeSphere as `project-admin`. In your workspace, go to **App Templates** under **App Management**, and click **Create**. + +2. In the dialog that appears, click **Upload**. You can upload your own Helm chart or download the [Nginx chart](/files/application-templates/nginx-0.1.0.tgz) and use it as an example for the following steps. + +3. After the package is uploaded, click **OK** to continue. + +4. You can view the basic information of the app under **App Information**. To upload an icon for the app, click **Upload Icon**. You can also skip it and click **OK** directly. + + {{< notice note >}} + +Maximum accepted resolutions of the app icon: 96 x 96 pixels. + +{{}} + +5. The app appears in the template list with the status **Developing** after successfully uploaded, which means this app is under development. The uploaded app is visible to all members in the same workspace. + +6. Click the app and the page opens with the **Versions** tab selected. Click the draft version to expand the menu, where you can see options including **Delete**, **Install**, and **Submit for Release**. + +7. For more information about how to release your app to the App Store, refer to [Application Lifecycle Management](../../application-store/app-lifecycle-management/#step-2-upload-and-submit-application). diff --git a/content/en/docs/v3.4/workspace-administration/what-is-workspace.md b/content/en/docs/v3.4/workspace-administration/what-is-workspace.md new file mode 100644 index 000000000..98e650db7 --- /dev/null +++ b/content/en/docs/v3.4/workspace-administration/what-is-workspace.md @@ -0,0 +1,83 @@ +--- +title: "Workspace Overview" +keywords: "Kubernetes, KubeSphere, workspace" +description: "Understand the concept of workspaces in KubeSphere and learn how to create and delete a workspace." + +linkTitle: "Workspace Overview" +weight: 9100 +--- + +A workspace is a logical unit to organize your [projects](../../project-administration/) and [DevOps projects](../../devops-user-guide/) and manage [app templates](../upload-helm-based-application/) and app repositories. It is the place for you to control resource access and share resources within your team in a secure way. + +It is a best practice to create a new workspace for tenants (excluding cluster administrators). A same tenant can work in multiple workspaces, while a workspace allows multiple tenants to access it in different ways. + +This tutorial demonstrates how to create and delete a workspace. + +## Prerequisites + +You have a user granted the role of `workspaces-manager`, such as `ws-manager` in [Create Workspaces, Projects, Users and Roles](../../quick-start/create-workspace-and-project/). + +## Create a Workspace + +1. Log in to the web console of KubeSphere as `ws-manager`. Click **Platform** on the upper-left corner, and then select **Access Control**. On the **Workspaces** page, click **Create**. + + +2. For single-node cluster, on the **Basic Information** page, specify a name for the workspace and select an administrator from the drop-down list. Click **Create**. + + - **Name**: Set a name for the workspace which serves as a unique identifier. + - **Alias**: An alias name for the workspace. + - **Administrator**: User that administers the workspace. + - **Description**: A brief introduction of the workspace. + + For multi-node cluster, after the basic information about the workspace is set, click **Next** to continue. On the **Cluster Settings** page, select clusters to be used in the workspace, and then click **Create**. + +3. The workspace is displayed in the workspace list after it is created. + +4. Click the workspace and you can see resource status of the workspace on the **Overview** page. + +## Delete a Workspace + +In KubeSphere, you use a workspace to group and manage different projects, which means the lifecycle of a project is dependent on the workspace. More specifically, all the projects and related resources in a workspace will be deleted if the workspace is deleted. + +Before you delete a workspace, decide whether you want to unbind some key projects. + +### Unbind projects before deletion + +To delete a workspace while preserving some projects in it, run the following command first: + +```bash +kubectl label ns kubesphere.io/workspace- && kubectl patch ns -p '{"metadata":{"ownerReferences":[]}}' --type=merge +``` + +{{< notice note >}} + +The command above removes labels associated with the workspace and removes ownerReferences. After that, you can [assign an unbound project to a new workspace](../../faq/access-control/add-kubernetes-namespace-to-kubesphere-workspace/). + +{{}} + +### Delete a workspace on the console + +After you unbind necessary projects from a workspace, perform the following steps to delete a workspace. + +{{< notice note >}} + +Be extremely cautious about deleting a workspace if you use kubectl to delete workspace resource objects directly. + +{{}} + +1. In your workspace, go to **Basic Information** under **Workspace Settings**. On the **Basic Information** page, you can see the general information of the workspace, such as the number of projects and members. + + {{< notice note >}} + + On this page, you can click **Edit Information** to change the basic information of the workspace (excluding the workspace name) and turn on/off [Network Isolation](../../workspace-administration/workspace-network-isolation/). + + {{}} + +2. To delete the workspace, click **Manage > Delete Workspace**. In the displayed dialog box, enter the name of the workspace, and then click **OK**. + + {{< notice warning >}} + + A workspace cannot be restored after it is deleted and resources in the workspace will also be removed. + + {{}} + diff --git a/content/en/docs/v3.4/workspace-administration/workspace-network-isolation.md b/content/en/docs/v3.4/workspace-administration/workspace-network-isolation.md new file mode 100644 index 000000000..8bc7582da --- /dev/null +++ b/content/en/docs/v3.4/workspace-administration/workspace-network-isolation.md @@ -0,0 +1,37 @@ +--- +title: "Workspace Network Isolation" +keywords: 'KubeSphere, Kubernetes, Calico, Network Policy' +description: 'Enable or disable the network policy in your workspace.' +linkTitle: "Workspace Network Isolation" +weight: 9500 +--- + +## Prerequisites + +- You have already enabled [Network Policies](../../pluggable-components/network-policy/). + +- Use a user of the `workspace-admin` role. For example, use the `ws-admin` user created in [Create Workspaces, Projects, Users and Roles](../../quick-start/create-workspace-and-project/). + + {{< notice note >}} + + For the implementation of the network policy, you can refer to [KubeSphere NetworkPolicy](https://github.com/kubesphere/community/blob/master/sig-network/concepts-and-designs/kubesphere-network-policy.md). + + {{}} + +## Enable or Disable Workspace Network Isolation + +Workspace network isolation is disabled by default. You can turn on network isolation in **Basic Information** under **Workspace Settings**. + +{{< notice note >}} + +When network isolation is turned on, egress traffic will be allowed by default, while ingress traffic will be denied for different workspaces. If you need to customize your network policy, you need to turn on [Project Network Isolation](../../project-administration/project-network-isolation/) and add a network policy in **Project Settings**. + +{{}} + +You can also disable network isolation on the **Basic Information** page. + +## Best Practice + +To ensure that all Pods in a workspace are secure, a best practice is to enable workspace network isolation. + +When network isolation is on, the workspace cannot be accessed by other workspaces. If a workspace's default network isolation doesn't meet your needs, turn on project network isolation and customize your project's network policy. \ No newline at end of file diff --git a/content/en/docs/v3.4/workspace-administration/workspace-quotas.md b/content/en/docs/v3.4/workspace-administration/workspace-quotas.md new file mode 100644 index 000000000..8a40dec53 --- /dev/null +++ b/content/en/docs/v3.4/workspace-administration/workspace-quotas.md @@ -0,0 +1,41 @@ +--- +title: "Workspace Quotas" +keywords: 'KubeSphere, Kubernetes, workspace, quotas' +description: 'Set workspace quotas to control the total resource usage of projects and DevOps projects in a workspace.' +linkTitle: "Workspace Quotas" +weight: 9700 +--- + +Workspace quotas are used to control the total resource usage of all projects and DevOps projects in a workspace. Similar to [project quotas](../project-quotas/), workspace quotas contain requests and limits of CPU and memory. Requests make sure projects in the workspace can get the resources they needs as they are specifically guaranteed and reserved. On the contrary, limits ensure that the resource usage of all projects in the workspace can never go above a certain value. + +In [a multi-cluster architecture](../../multicluster-management/), as you need to [assign one or multiple clusters to a workspace](../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/), you can decide the amount of resources that can be used by the workspace on different clusters. + +This tutorial demonstrates how to manage resource quotas for a workspace. + +## Prerequisites + +You have an available workspace and a user (`ws-manager`). The user must have the `workspaces-manager` role at the platform level. For more information, see [Create Workspaces, Projects, Users and Roles](../../quick-start/create-workspace-and-project/). + +## Set Workspace Quotas + +1. Log in to the KubeSphere web console as `ws-manager` and go to a workspace. + +2. Navigate to **Workspace Quotas** under **Workspace Settings**. + +3. The **Workspace Quotas** page lists all the available clusters assigned to the workspace and their respective requests and limits of CPU and memory. Click **Edit Quotas** on the right of a cluster. + +4. In the displayed dialog box, you can see that KubeSphere does not set any requests or limits for the workspace by default. To set requests and limits to control CPU and memory resources, move icon to a desired value or enter numbers directly. Leaving a field blank means you do not set any requests or limits. + + {{< notice note >}} + + The limit can never be lower than the request. + + {{}} + +5. Click **OK** to finish setting quotas. + +## See Also + +[Project Quotas](../project-quotas/) + +[Container Limit Ranges](../../project-administration/container-limit-ranges/) \ No newline at end of file diff --git a/content/zh/docs/v3.4/_index.md b/content/zh/docs/v3.4/_index.md new file mode 100644 index 000000000..89937bc3e --- /dev/null +++ b/content/zh/docs/v3.4/_index.md @@ -0,0 +1,62 @@ +--- +title: "Documentation" +css: "scss/docs.scss" +isDocsRoot: true + +LinkTitle: "文档" + + +section1: + title: KubeSphere 文档 + content: 了解如何通过 KubeSphere 容器平台构建并管理云原生应用程序。获取文档、示例代码与教程等信息。 + image: /images/docs/v3.3/banner.png + +sectionLink: + docs: + title: 常用文档 + description: 通过快速入门、教程和示例等学习使用 KubeSphere。 + list: + - /docs/v3.3/quick-start/all-in-one-on-linux + - /docs/v3.3/quick-start/minimal-kubesphere-on-k8s + - /docs/v3.3/quick-start/create-workspace-and-project + - /docs/v3.3/introduction/what-is-kubesphere + - /docs/v3.3/pluggable-components + - /docs/v3.3/installing-on-linux/introduction/multioverview + - /docs/v3.3/pluggable-components/app-store + - /docs/v3.3/pluggable-components/devops + - /docs/v3.3/multicluster-management + - /docs/v3.3/project-user-guide/configuration/image-registry + - /docs/v3.3/devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-jenkinsfile + - /docs/v3.3/devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-graphical-editing-panel + - /docs/v3.3/project-user-guide/image-builder/source-to-image + - /docs/v3.3/application-store/app-lifecycle-management + + videos: + title: 视频教程 + description: 观看视频教程学习 KubeSphere。 + list: + - link: https://www.bilibili.com/video/BV1KA411s7D3 + text: All-in-One 模式安装 KubeSphere + - link: https://www.bilibili.com/video/BV16y4y1v7cn + text: 多节点安装 KubeSphere + - link: https://www.bilibili.com/video/BV1Pz4y1C7jr + text: 离线安装 KubeSphere + +section3: + title: 在云服务上运行 KubeSphere 与 Kubernetes 技术栈 + description: 云厂商以托管的形式为用户提供 KubeSphere 服务,深度集成了公有云托管容器服务,用户可在几分钟内通过简单的步骤迅速构建高可用集群。您可在以下公有云上一键部署 KubeSphere。 + list: + - image: /images/docs/v3.3/aws.jpg + content: AWS Quickstart + link: https://aws.amazon.com/quickstart/architecture/qingcloud-kubesphere/ + - image: /images/docs/v3.3/microsoft-azure.jpg + content: Azure Marketplace + link: https://market.azure.cn/marketplace/apps/qingcloud.kubesphere + - image: /images/docs/v3.3/qingcloud.svg + content: QingCloud QKE + link: https://www.qingcloud.com/products/kubesphereqke/ + + titleRight: 想要在您的云上托管 KubeSphere? + btnContent: 与我们合作 + btnLink: /partner/ +--- diff --git a/content/zh/docs/v3.4/access-control-and-account-management/_index.md b/content/zh/docs/v3.4/access-control-and-account-management/_index.md new file mode 100644 index 000000000..6bb2d9195 --- /dev/null +++ b/content/zh/docs/v3.4/access-control-and-account-management/_index.md @@ -0,0 +1,13 @@ +--- +title: "帐户管理和权限控制" +description: "帐户管理和权限控制" +layout: "second" + +linkTitle: "帐户管理和权限控制" +weight: 12000 + +icon: "/images/docs/v3.3/docs.svg" + +--- + +KubeSphere 的多租户架构是运行在容器平台上的许多关键组件的基础。不同的租户被分配不同的角色,以便他们可以执行相关的任务。本章概述了 KubeSphere 的多租户系统,并演示了如何为第三方登录配置身份验证。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/access-control-and-account-management/external-authentication/_index.md b/content/zh/docs/v3.4/access-control-and-account-management/external-authentication/_index.md new file mode 100644 index 000000000..fe7156336 --- /dev/null +++ b/content/zh/docs/v3.4/access-control-and-account-management/external-authentication/_index.md @@ -0,0 +1,8 @@ +--- +title: "外部身份验证" +description: "了解如何在 KubeSphere 上配置第三方身份验证。" +layout: "single" + +linkTitle: "外部身份验证" +weight: 12200 +--- diff --git a/content/zh/docs/v3.4/access-control-and-account-management/external-authentication/cas-identity-provider.md b/content/zh/docs/v3.4/access-control-and-account-management/external-authentication/cas-identity-provider.md new file mode 100644 index 000000000..eb05799c6 --- /dev/null +++ b/content/zh/docs/v3.4/access-control-and-account-management/external-authentication/cas-identity-provider.md @@ -0,0 +1,58 @@ +--- +title: "CAS 身份提供者" +keywords: "CAS, 身份提供者" +description: "如何使用外部 CAS 身份提供者。" + +linkTitle: "CAS 身份提供者" +weight: 12223 +--- + +CAS (Central Authentication Service) 是耶鲁 Yale 大学发起的一个java开源项目,旨在为 Web应用系统提供一种可靠的 单点登录 解决方案( Web SSO ), CAS 具有以下特点: + +- 开源的企业级单点登录解决方案 +- CAS Server 为需要独立部署的 Web 应用----一个独立的Web应用程序(cas.war)。 +- CAS Client 支持非常多的客户端 ( 指单点登录系统中的各个 Web 应用 ) ,包括 Java, .Net, PHP, Perl, 等。 + + +## 准备工作 + +您需要部署一个 Kubernetes 集群,并在集群中安装 KubeSphere。有关详细信息,请参阅[在 Linux 上安装](../../../installing-on-linux/)和[在 Kubernetes 上安装](../../../installing-on-kubernetes/)。 + +## 步骤 + +1. 以 `admin` 身份登录 KubeSphere,将光标移动到右下角 icon ,点击 **kubectl**,然后执行以下命令来编辑 CRD `ClusterConfiguration` 中的 `ks-installer`: + + ```bash + kubectl -n kubesphere-system edit cc ks-installer + ``` + +2. 在 `spec.authentication.jwtSecret` 字段下添加以下字段。 + + ```yaml + spec: + authentication: + jwtSecret: '' + authenticateRateLimiterMaxTries: 10 + authenticateRateLimiterDuration: 10m0s + oauthOptions: + accessTokenMaxAge: 1h + accessTokenInactivityTimeout: 30m + identityProviders: + - name: cas + type: CASIdentityProvider + mappingMethod: auto + provider: + redirectURL: "https://ks-console:30880/oauth/redirect/cas" + casServerURL: "https://cas.example.org/cas" + insecureSkipVerify: true + ``` + + 字段描述如下: + + | 参数 | 描述 | + | -------------------- | ------------------------------------------------------------ | + | redirectURL | 重定向到 ks-console 的 URL,格式为:`https://<域名>/oauth/redirect/<身份提供者名称>`。URL 中的 `<身份提供者名称>` 对应 `oauthOptions:identityProviders:name` 的值。 | + | casServerURL | 定义cas 认证的url 地址 | + | insecureSkipVerify | 关闭 TLS 证书验证。 | + + diff --git a/content/zh/docs/v3.4/access-control-and-account-management/external-authentication/oidc-identity-provider.md b/content/zh/docs/v3.4/access-control-and-account-management/external-authentication/oidc-identity-provider.md new file mode 100644 index 000000000..fa144bc98 --- /dev/null +++ b/content/zh/docs/v3.4/access-control-and-account-management/external-authentication/oidc-identity-provider.md @@ -0,0 +1,64 @@ +--- +title: "OIDC 身份提供者" +keywords: "OIDC, 身份提供者" +description: "如何使用外部 OIDC 身份提供者。" + +linkTitle: "OIDC 身份提供者" +weight: 12221 +--- + +## OIDC 身份提供者 + +[OpenID Connect](https://openid.net/connect/) 是一种基于 OAuth 2.0 系列规范的可互操作的身份认证协议。使用简单的 REST/JSON 消息流,其设计目标是“让简单的事情变得简单,让复杂的事情成为可能”。与之前的任何身份认证协议(例如 Keycloak、Okta、Dex、Auth0、Gluu、Casdoor 等)相比,开发人员集成起来非常容易。 + +## 准备工作 + +您需要部署一个 Kubernetes 集群,并在集群中安装 KubeSphere。有关详细信息,请参阅[在 Linux 上安装](../../../installing-on-linux/)和[在 Kubernetes 上安装](../../../installing-on-kubernetes/)。 + +## 步骤 + +1. 以 `admin` 身份登录 KubeSphere,将光标移动到右下角 icon ,点击 **kubectl**,然后执行以下命令来编辑 CRD `ClusterConfiguration` 中的 `ks-installer`: + + ```bash + kubectl -n kubesphere-system edit cc ks-installer + ``` + +2. 在 `spec.authentication.jwtSecret` 字段下添加以下字段。 + + *使用 [Google Identity Platform](https://developers.google.com/identity/protocols/oauth2/openid-connect) 的示例*: + + ```yaml + spec: + authentication: + jwtSecret: '' + authenticateRateLimiterMaxTries: 10 + authenticateRateLimiterDuration: 10m0s + oauthOptions: + accessTokenMaxAge: 1h + accessTokenInactivityTimeout: 30m + identityProviders: + - name: google + type: OIDCIdentityProvider + mappingMethod: auto + provider: + clientID: '********' + clientSecret: '********' + issuer: https://accounts.google.com + redirectURL: 'https://ks-console/oauth/redirect/google' + ``` + + 字段描述如下: + + | 参数 | 描述 | + | -------------------- | ------------------------------------------------------------ | + | clientID | 客户端 ID。 | + | clientSecret | 客户端密码。 | + | redirectURL | 重定向到 ks-console 的 URL,格式为:`https://<域名>/oauth/redirect/<身份提供者名称>`。URL 中的 `<身份提供者名称>` 对应 `oauthOptions:identityProviders:name` 的值。 | + | issuer | 定义客户端如何动态发现有关 OpenID 提供者的信息。 | + | preferredUsernameKey | 可配置的密钥,包含首选用户声明。此参数为可选参数。 | + | emailKey | 可配置的密钥,包含电子邮件声明。此参数为可选参数。 | + | getUserInfo | 使用 userinfo 端点获取令牌的附加声明。非常适用于上游返回 “thin” ID 令牌的场景。此参数为可选参数。 | + | insecureSkipVerify | 关闭 TLS 证书验证。 | + + + diff --git a/content/zh/docs/v3.4/access-control-and-account-management/external-authentication/set-up-external-authentication.md b/content/zh/docs/v3.4/access-control-and-account-management/external-authentication/set-up-external-authentication.md new file mode 100644 index 000000000..bc880d62b --- /dev/null +++ b/content/zh/docs/v3.4/access-control-and-account-management/external-authentication/set-up-external-authentication.md @@ -0,0 +1,112 @@ +--- +title: "设置外部身份验证" +keywords: "LDAP, 外部, 第三方, 身份验证" +description: "如何在 KubeSphere 上设置外部身份验证。" + +linkTitle: "设置外部身份验证" +weight: 12210 +--- + +本文档描述了如何在 KubeSphere 上使用外部身份提供者,例如 LDAP 服务或 Active Directory 服务。 + +KubeSphere 提供了一个内置的 OAuth 服务。用户通过获取 OAuth 访问令牌以对 API 进行身份验证。作为 KubeSphere 管理员,您可以编辑 CRD `ClusterConfiguration` 中的 `ks-installer` 来配置 OAuth 并指定身份提供者。 + +## 准备工作 + +您需要部署一个 Kubernetes 集群,并在集群中安装 KubeSphere。有关详细信息,请参阅[在 Linux 上安装](../../../installing-on-linux/)和[在 Kubernetes 上安装](../../../installing-on-kubernetes/)。 + + +## 步骤 + +1. 以 `admin` 身份登录 KubeSphere,将光标移动到右下角 icon ,点击 **kubectl**,然后执行以下命令来编辑 CRD `ClusterConfiguration` 中的 `ks-installer`: + + ```bash + kubectl -n kubesphere-system edit cc ks-installer + ``` + +2. 在 `spec.authentication.jwtSecret` 字段下添加以下字段。 + + 示例: + + ```yaml + spec: + authentication: + jwtSecret: '' + authenticateRateLimiterMaxTries: 10 + authenticateRateLimiterDuration: 10m0s + loginHistoryRetentionPeriod: 168h + maximumClockSkew: 10s + multipleLogin: true + oauthOptions: + accessTokenMaxAge: 1h + accessTokenInactivityTimeout: 30m + identityProviders: + - name: LDAP + type: LDAPIdentityProvider + mappingMethod: auto + provider: + host: 192.168.0.2:389 + managerDN: uid=root,cn=users,dc=nas + managerPassword: ******** + userSearchBase: cn=users,dc=nas + loginAttribute: uid + mailAttribute: mail + ``` + + 字段描述如下: + + * `jwtSecret`:签发用户令牌的密钥。在多集群环境下,所有的集群必须[使用相同的密钥](../../../multicluster-management/enable-multicluster/direct-connection/#prepare-a-member-cluster)。 + * `authenticateRateLimiterMaxTries`:`authenticateLimiterDuration` 指定的期间内允许的最大连续登录失败次数。如果用户连续登录失败次数达到限制,则该用户将被封禁。 + * `authenticateRateLimiterDuration`:`authenticateRateLimiterMaxTries` 适用的时间段。 + * `loginHistoryRetentionPeriod`:用户登录记录保留期限,过期的登录记录将被自动删除。 + * `maximumClockSkew`:时间敏感操作(例如验证用户令牌的过期时间)的最大时钟偏差,默认值为10秒。 + * `multipleLogin`:是否允许多个用户同时从不同位置登录,默认值为 `true`。 + * `oauthOptions`: + * `accessTokenMaxAge`:访问令牌有效期。对于多集群环境中的成员集群,默认值为 `0h`,这意味着访问令牌永不过期。对于其他集群,默认值为 `2h`。 + * `accessTokenInactivityTimeout`:令牌空闲超时时间。该值表示令牌过期后,刷新用户令牌最大的间隔时间,如果不在此时间窗口内刷新用户身份令牌,用户将需要重新登录以获得访问权。 + * `identityProviders`: + * `name`:身份提供者的名称。 + * `type`:身份提供者的类型。 + * `mappingMethod`:帐户映射方式,值可以是 `auto` 或者 `lookup`。 + * 如果值为 `auto`(默认),需要指定新的用户名。通过第三方帐户登录时,KubeSphere 会根据用户名自动创建关联帐户。 + * 如果值为 `lookup`,需要执行步骤 3 以手动关联第三方帐户与 KubeSphere 帐户。 + * `provider`:身份提供者信息。此部分中的字段根据身份提供者的类型而异。 + +3. 如果 `mappingMethod` 设置为 `lookup`,可以运行以下命令并添加标签来进行帐户关联。如果 `mappingMethod` 是 `auto` 可以跳过这个部分。 + + ```bash + kubectl edit user + ``` + + ```yaml + labels: + iam.kubesphere.io/identify-provider: + iam.kubesphere.io/origin-uid: + ``` + +4. 字段配置完成后,保存修改,然后等待 ks-installer 重启完成。 + + {{< notice note >}} + + 多集群环境中,只需要在主集群中进行配置。 + + {{}} + + +## 身份提供者 + +您可以在 `identityProviders` 部分中配置多个身份提供者(IdPs)。身份提供者会对用户进行认证,并向 KubeSphere 提供身份令牌。 + +KubeSphere 默认提供了以下几种类型的身份提供者: + +* [LDAP Identity Provider](../use-an-ldap-service) + +* [OIDC Identity Provider](../oidc-identity-provider) + +* GitHub Identity Provider + +* [CAS Identity Provider](../cas-identity-provider) + +* Aliyun IDaaS Provider + +您也可以拓展 KubeSphere [OAuth2 认证插件](../use-an-oauth2-identity-provider) 与您的帐户系统进行集成。 diff --git a/content/zh/docs/v3.4/access-control-and-account-management/external-authentication/use-an-ldap-service.md b/content/zh/docs/v3.4/access-control-and-account-management/external-authentication/use-an-ldap-service.md new file mode 100644 index 000000000..a488de9f2 --- /dev/null +++ b/content/zh/docs/v3.4/access-control-and-account-management/external-authentication/use-an-ldap-service.md @@ -0,0 +1,104 @@ +--- +title: "LDAP身份提供者" +keywords: "LDAP, 身份提供者, 外部, 身份验证" +description: "如何使用 LDAP 服务。" + +linkTitle: "LDAP身份提供者" +weight: 12220 +--- + +本文档描述了如何使用 LDAP 服务作为外部身份提供者,允许您根据 LDAP 服务对用户进行身份验证。 + +## 准备工作 + +* 您需要部署一个 Kubernetes 集群,并在集群中安装 KubeSphere。有关详细信息,请参阅[在 Linux 上安装](../../../installing-on-linux/)和[在 Kubernetes 上安装](../../../installing-on-kubernetes/)。 +* 您需要获取 LDAP 服务的管理员专有名称(DN)和管理员密码。 + +## 步骤 + +1. 以 `admin` 身份登录 KubeSphere,将光标移动到右下角 icon ,点击 **kubectl**,然后执行以下命令来编辑 CRD `ClusterConfiguration` 中的 `ks-installer`: + + ```bash + kubectl -n kubesphere-system edit cc ks-installer + ``` + + 示例: + + ```yaml + spec: + authentication: + jwtSecret: '' + maximumClockSkew: 10s + multipleLogin: true + oauthOptions: + accessTokenMaxAge: 1h + accessTokenInactivityTimeout: 30m + identityProviders: + - name: LDAP + type: LDAPIdentityProvider + mappingMethod: auto + provider: + host: 192.168.0.2:389 + managerDN: uid=root,cn=users,dc=nas + managerPassword: ******** + userSearchBase: cn=users,dc=nas + loginAttribute: uid + mailAttribute: mail + ``` + +2. 在 `spec:authentication` 部分配置 `oauthOptions:identityProviders` 以外的字段信息请参阅[设置外部身份认证](../set-up-external-authentication/)。 + +3. 在 `oauthOptions:identityProviders` 部分配置字段。 + + * `name`: 用户定义的 LDAP 服务名称。 + * `type`: 必须将该值设置为 `LDAPIdentityProvider` 才能将 LDAP 服务用作身份提供者。 + * `mappingMethod`: 帐户映射方式,值可以是 `auto` 或者 `lookup`。 + * 如果值为 `auto`(默认),需要指定新的用户名。KubeSphere 根据用户名自动创建并关联 LDAP 用户。 + * 如果值为 `lookup`,需要执行步骤 4 以手动关联现有 KubeSphere 用户和 LDAP 用户。 + * `provider`: + * `host`: LDAP 服务的地址和端口号。 + * `managerDN`: 用于绑定到 LDAP 目录的 DN 。 + * `managerPassword`: `managerDN` 对应的密码。 + * `userSearchBase`: 用户搜索基。设置为所有 LDAP 用户所在目录级别的 DN 。 + * `loginAttribute`: 标识 LDAP 用户的属性。 + * `mailAttribute`: 标识 LDAP 用户的电子邮件地址的属性。 + +4. 如果 `mappingMethod` 设置为 `lookup`,可以运行以下命令并添加标签来进行帐户关联。如果 `mappingMethod` 是 `auto` 可以跳过这个部分。 + + ```bash + kubectl edit user + ``` + + ```yaml + labels: + iam.kubesphere.io/identify-provider: + iam.kubesphere.io/origin-uid: + ``` + +5. 字段配置完成后,保存修改,然后等待 ks-installer 完成重启。 + + {{< notice note >}} + + KubeSphere Web 控制台在 ks-installer 重新启动期间不可用。请等待重启完成。 + + {{}} + +6. 如果您使用 KubeSphere 3.2.0,请在配置 LDAP 之后执行以下命令并等待至 `ks-installer` 成功运行: + + ```bash + kubectl -n kubesphere-system set image deployment/ks-apiserver *=kubesphere/ks-apiserver:v3.2.1 + ``` + + {{< notice note >}} + + 如果您使用 KubeSphere 3.2.1,请跳过该步骤。 + + {{}} + +7. 进入KubeSphere登录页面,输入 LDAP 用户的用户名和密码登录。 + + {{< notice note >}} + + LDAP 用户的用户名是 `loginAttribute` 指定的属性值。 + + {{}} diff --git a/content/zh/docs/v3.4/access-control-and-account-management/external-authentication/use-an-oauth2-identity-provider.md b/content/zh/docs/v3.4/access-control-and-account-management/external-authentication/use-an-oauth2-identity-provider.md new file mode 100644 index 000000000..41f8e443f --- /dev/null +++ b/content/zh/docs/v3.4/access-control-and-account-management/external-authentication/use-an-oauth2-identity-provider.md @@ -0,0 +1,130 @@ +--- +title: "OAuth 2.0身份提供者" +keywords: 'Kubernetes, KubeSphere, OAuth2, Identity Provider' +description: '如何使用外部 OAuth2 身份提供者。' +linkTitle: "OAuth 2.0身份提供者" +weight: 12230 +--- + +本文档介绍了如何使用基于 OAuth 2.0 协议的外部身份提供者。 + +下图显示了 KubeSphere 与外部 OAuth 2.0 身份提供者之间的身份验证过程。 + +![oauth2](/images/docs/v3.3/access-control-and-account-management/external-authentication/use-an-oauth2-identity-provider/oauth2.svg) + +## 准备工作 + +您需要部署一个 Kubernetes 集群,并在集群中安装 KubeSphere。有关详细信息,请参阅[在 Linux 上安装](../../../installing-on-linux/)和[在 Kubernetes 上安装](../../../installing-on-kubernetes/)。 + +## 开发 OAuth 2.0 插件 + +{{< notice note >}} + +KubeSphere 提供了两个内置的 OAuth 2.0 插件:GitHub 的 [GitHubIdentityProvider](https://github.com/kubesphere/kubesphere/blob/release-3.1/pkg/apiserver/authentication/identityprovider/github/github.go) 和阿里云IDaaS的 [AliyunIDaasProvider](https://github.com/kubesphere/kubesphere/blob/release-3.1/pkg/apiserver/authentication/identityprovider/github/github.go) ,可以根据内置的插件开发其他插件。 + +{{}} + +1. 在本地克隆 [KubeSphere](https://github.com/kubesphere/kubesphere) ,进入本地 KubeSphere 仓库,并在 `/pkg/apiserver/authentication/identityprovider/` 目录下创建一个插件的包。 + +2. 在插件包中,实现如下接口: + + ```go + // /pkg/apiserver/authentication/identityprovider/oauth_provider.go + type OAuthProvider interface { + // Exchange identity with a remote server. + IdentityExchange(code string) (Identity, error) + } + + type OAuthProviderFactory interface { + // Return the identity provider type. + Type() string + // Apply settings from kubesphere-config. + Create(options oauth.DynamicOptions) (OAuthProvider, error) + } + ``` + + ```go + // /pkg/apiserver/authentication/identityprovider/identity_provider.go + type Identity interface { + // (Mandatory) Return the identifier of the user at the identity provider. + GetUserID() string + // (Optional) Return the name of the user to be referred as on KubeSphere. + GetUsername() string + // (Optional) Return the email address of the user. + GetEmail() string + } + ``` + +3. 在插件包的 `init()` 函数中注册插件。 + + ```go + // Custom plugin package + func init() { + // Change to the actual name of the struct that + // implements the OAuthProviderFactory interface. + identityprovider.RegisterOAuthProvider(&{}) + } + ``` + +4. 在 `/pkg/apiserver/authentication/options/authenticate_options.go` 中导入插件包。 + + ```go + // Change to the actual name of your plugin package. + import ( + ... + _ "kubesphere.io/kubesphere/pkg/apiserver/authentication/identityprovider/" + ... + ) + ``` + +5. [构建 ks-apiserver 镜像](https://github.com/kubesphere/community/blob/104bab42f67094930f2ca87c603b7c6365cd092a/developer-guide/development/quickstart.md) 并部署到您的集群中。 + +## 集成身份提供者 + +1. 以 `admin` 身份登录 KubeSphere,将光标移动到右下角 icon ,点击 **kubectl**,然后执行以下命令来编辑 CRD `ClusterConfiguration` 中的 `ks-installer`: + + ```bash + kubectl -n kubesphere-system edit cc ks-installer + ``` + +2. 在 `spec:authentication` 部分配置的 `oauthOptions:identityProviders` 以外的字段信息请参阅[设置外部身份认证](../set-up-external-authentication/)。 + +3. 根据开发的身份提供者插件来配置 `oauthOptions:identityProviders` 中的字段。 + + 以下是使用 GitHub 作为外部身份提供者的配置示例。详情请参阅 [GitHub 官方文档](https://docs.github.com/en/developers/apps/building-oauth-apps)和 [GitHubIdentityProvider 源代码](https://github.com/kubesphere/kubesphere/blob/release-3.1/pkg/apiserver/authentication/identityprovider/github/github.go) 。 + + ```yaml + spec: + authentication: + jwtSecret: '' + authenticateRateLimiterMaxTries: 10 + authenticateRateLimiterDuration: 10m0s + oauthOptions: + accessTokenMaxAge: 1h + accessTokenInactivityTimeout: 30m + identityProviders: + - name: github + type: GitHubIdentityProvider + mappingMethod: auto + provider: + clientID: '******' + clientSecret: '******' + redirectURL: 'https://ks-console/oauth/redirect/github' + ``` + + 同样,您也可以使用阿里云 IDaaS 作为外部身份提供者。详情请参阅[阿里云 IDaaS 文档](https://www.alibabacloud.com/help/product/111120.htm?spm=a3c0i.14898238.2766395700.1.62081da1NlxYV0)和 [AliyunIDaasProvider 源代码](https://github.com/kubesphere/kubesphere/blob/release-3.1/pkg/apiserver/authentication/identityprovider/aliyunidaas/idaas.go)。 + +4. 字段配置完成后,保存修改,然后等待 ks-installer 完成重启。 + + {{< notice note >}} + + KubeSphere Web 控制台在 ks-installer 重新启动期间不可用。请等待重启完成。 + + {{}} + +5. 进入 KubeSphere 登录界面,点击 **Log In with XXX** (例如,**Log In with GitHub**)。 + +6. 在外部身份提供者的登录界面,输入身份提供者配置的用户名和密码,登录 KubeSphere 。 + + ![github-login-page](/images/docs/v3.3/access-control-and-account-management/external-authentication/use-an-oauth2-identity-provider/github-login-page.png) + diff --git a/content/zh/docs/v3.4/access-control-and-account-management/multi-tenancy-in-kubesphere.md b/content/zh/docs/v3.4/access-control-and-account-management/multi-tenancy-in-kubesphere.md new file mode 100644 index 000000000..a82d14e8e --- /dev/null +++ b/content/zh/docs/v3.4/access-control-and-account-management/multi-tenancy-in-kubesphere.md @@ -0,0 +1,57 @@ +--- +title: "KubeSphere 中的多租户" +keywords: "Kubernetes, KubeSphere, 多租户" +description: "理解 KubeSphere 中的多租户架构。" +linkTitle: "KubeSphere 中的多租户" +weight: 12100 +--- + +Kubernetes 解决了应用编排、容器调度的难题,极大地提高了资源的利用率。有别于传统的集群运维方式,在使用 Kubernetes 的过程中,企业和个人用户在资源共享和安全性方面均面临着诸多挑战。 + +首当其冲的就是企业环境中多租户形态该如何定义,租户的安全边界该如何划分。Kubernetes 社区[关于多租户的讨论](https://docs.google.com/document/d/1fj3yzmeU2eU8ZNBCUJG97dk_wC7228-e_MmdcmTNrZY)从未停歇,但到目前为止最终的形态尚无定论。 + +## Kubernetes 多租户面临的挑战 + +多租户是一种常见的软件架构,简单概括就是在多用户环境下实现资源共享,并保证各用户间数据的隔离性。在多租户集群环境中,集群管理员需要最大程度地避免恶意租户对其他租户的攻击,公平地分配集群资源。 + +无论企业的多租户形态如何,多租户都无法避免以下两个层面的问题:逻辑层面的资源隔离;物理资源的隔离。 + +逻辑层面的资源隔离主要包括 API 的访问控制,针对用户的权限控制。Kubernetes 中的 [RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) 和命名空间 (namespace) 提供了基本的逻辑隔离能力,但在大部分企业环境中并不适用。企业中的租户往往需要跨多个命名空间甚至是多个集群进行资源管理。除此之外,针对用户的行为审计、租户隔离的日志、事件查询也是不可或缺的能力。 + +物理资源的隔离主要包括节点、网络的隔离,当然也包括容器运行时安全。您可以通过 [NetworkPolicy](../../pluggable-components/network-policy/) 对网络进行划分,通过 PodSecurityPolicy 限制容器的行为,[Kata Containers](https://katacontainers.io/) 也提供了更安全的容器运行时。 + +## KubeSphere 中的多租户 + +为了解决上述问题,KubeSphere 提供了基于 Kubernetes 的多租户管理方案。 + +![multi-tenancy-architecture](/images/docs/v3.3/zh-cn/access-control-and-account-management/multi-tanancy-in-kubesphere/multi-tenancy-architecture.png) + +在 KubeSphere 中[企业空间](../../workspace-administration/what-is-workspace/)是最小的租户单元,企业空间提供了跨集群、跨项目(即 Kubernetes 中的命名空间)共享资源的能力。企业空间中的成员可以在授权集群中创建项目,并通过邀请授权的方式参与项目协同。 + +**用户**是 KubeSphere 的帐户实例,可以被设置为平台层面的管理员参与集群的管理,也可以被添加到企业空间中参与项目协同。 + +多级的权限控制和资源配额限制是 KubeSphere 中资源隔离的基础,奠定了多租户最基本的形态。 + +### 逻辑隔离 + +与 Kubernetes 相同,KubeSphere 通过 RBAC 对用户的权限加以控制,实现逻辑层面的资源隔离。 + +KubeSphere 中的权限控制分为平台、企业空间、项目三个层级,通过角色来控制用户在不同层级的资源访问权限。 + +1. [平台角色](../../quick-start/create-workspace-and-project/):主要控制用户对平台资源的访问权限,如集群的管理、企业空间的管理、平台用户的管理等。 +2. [企业空间角色](../../workspace-administration/role-and-member-management/):主要控制企业空间成员在企业空间下的资源访问权限,如企业空间下项目、DevOps 项目的管理等。 +3. [项目角色](../../project-administration/role-and-member-management/):主要控制项目下资源的访问权限,如工作负载的管理、流水线的管理等。 + +### 网络隔离 + +除了逻辑层面的资源隔离,KubeSphere 中还可以针对企业空间和项目设置[网络隔离策略](../../pluggable-components/network-policy/)。 + +### 操作审计 + +KubeSphere 还提供了针对用户的[操作审计](../../pluggable-components/auditing-logs/)。 + +### 认证鉴权 + +KubeSphere 完整的认证鉴权链路如下图所示,可以通过 OPA 拓展 Kubernetes 的 RBAC 规则。KubeSphere 团队计划集成 [Gatekeeper](https://github.com/open-policy-agent/gatekeeper) 以支持更为丰富的安全管控策略。 + +![request-chain](/images/docs/v3.3/zh-cn/access-control-and-account-management/multi-tanancy-in-kubesphere/request-chain.jpg) diff --git a/content/zh/docs/v3.4/application-store/_index.md b/content/zh/docs/v3.4/application-store/_index.md new file mode 100644 index 000000000..26fc4d589 --- /dev/null +++ b/content/zh/docs/v3.4/application-store/_index.md @@ -0,0 +1,16 @@ +--- +title: "应用商店" +description: "上手 KubeSphere 应用商店" +layout: "second" + + +linkTitle: "应用商店" +weight: 14000 + +icon: "/images/docs/v3.3/docs.svg" + +--- + +KubeSphere 应用商店基于 [OpenPitrix](https://github.com/openpitrix/openpitrix) (一个跨云管理应用的开源平台)为用户提供企业就绪的容器化解决方案。您可以通过应用模板上传自己的应用,或者添加应用仓库作为应用工具,供租户选择他们想要的应用。 + +应用商店为应用生命周期管理提供了一个高效的集成系统,用户可以用最合适的方式快速上传、发布、部署、升级和下架应用。因此,开发者借助 KubeSphere 就能减少花在设置上的时间,更多地专注于开发。 diff --git a/content/zh/docs/v3.4/application-store/app-developer-guide/_index.md b/content/zh/docs/v3.4/application-store/app-developer-guide/_index.md new file mode 100644 index 000000000..cb4e2189f --- /dev/null +++ b/content/zh/docs/v3.4/application-store/app-developer-guide/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "应用开发者指南" +weight: 14400 + +_build: + render: false +--- diff --git a/content/zh/docs/v3.4/application-store/app-developer-guide/helm-developer-guide.md b/content/zh/docs/v3.4/application-store/app-developer-guide/helm-developer-guide.md new file mode 100644 index 000000000..3b2a72436 --- /dev/null +++ b/content/zh/docs/v3.4/application-store/app-developer-guide/helm-developer-guide.md @@ -0,0 +1,158 @@ +--- +title: "Helm 开发者指南" +keywords: 'Kubernetes, KubeSphere, Helm, 开发' +description: '开发基于 Helm 的应用。' +linkTitle: "Helm 开发者指南" +weight: 14410 +--- + +您可以上传应用的 Helm Chart 至 KubeSphere,以便具有必要权限的租户能够进行部署。本教程以 NGINX 为示例演示如何准备 Helm Chart。 + +## 安装 Helm + +如果您已经安装 KubeSphere,那么您的环境中已部署 Helm。如果未安装,请先参考 [Helm 文档](https://helm.sh/docs/intro/install/)安装 Helm。 + +## 创建本地仓库 + +执行以下命令在您的机器上创建仓库。 + +```bash +mkdir helm-repo +``` + +```bash +cd helm-repo +``` + +## 创建应用 + +使用 `helm create` 创建一个名为 `nginx` 的文件夹,它会自动为您的应用创建 YAML 模板和目录。一般情况下,不建议修改顶层目录中的文件名和目录名。 + +```bash +$ helm create nginx +$ tree nginx/ +nginx/ +├── charts +├── Chart.yaml +├── templates +│ ├── deployment.yaml +│ ├── _helpers.tpl +│ ├── ingress.yaml +│ ├── NOTES.txt +│ └── service.yaml +└── values.yaml +``` + +`Chart.yaml` 用于定义 Chart 的基本信息,包括名称、API 和应用版本。有关更多信息,请参见 [Chart.yaml 文件](../helm-specification/#chartyaml-文件)。 + +该 `Chart.yaml` 文件的示例: + +```yaml +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: nginx +version: 0.1.0 +``` + +当您向 Kubernetes 部署基于 Helm 的应用时,可以直接在 KubeSphere 控制台上编辑 `values.yaml` 文件。 + +该 `values.yaml` 文件的示例: + +```yaml +# 默认值仅供测试使用。 +# 此文件为 YAML 格式。 +# 对要传入您的模板的变量进行声明。 + +replicaCount: 1 + +image: + repository: nginx + tag: stable + pullPolicy: IfNotPresent + +nameOverride: "" +fullnameOverride: "" + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + path: / + hosts: + - chart-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # 通常不建议对默认资源进行指定,用户可以去主动选择是否指定。 + # 这也有助于 Chart 在资源较少的环境上运行,例如 Minikube。 + # 如果您要指定资源,请将下面几行内容取消注释, + # 按需调整,并删除 'resources:' 后面的大括号。 + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} +``` + +请参考 [Helm 规范](../helm-specification/)对 `nginx` 文件夹中的文件进行编辑,完成编辑后进行保存。 + +## 创建索引文件(可选) + +要在 KubeSphere 中使用 HTTP 或 HTTPS URL 添加仓库,您需要事先向对象存储上传一个 `index.yaml` 文件。在 `nginx` 的上一个目录中使用 Helm 执行以下命令,创建索引文件。 + +```bash +helm repo index . +``` + +```bash +$ ls +index.yaml nginx +``` + +{{< notice note >}} + +- 如果仓库 URL 是 S3 格式,您向仓库添加应用时会自动在对象存储中创建索引文件。 + +- 有关何如向 KubeSphere 添加仓库的更多信息,请参见[导入 Helm 仓库](../../../workspace-administration/app-repository/import-helm-repository/)。 + +{{}} + +## 打包 Chart + +前往 `nginx` 的上一个目录,执行以下命令打包您的 Chart,这会创建一个 .tgz 包。 + +```bash +helm package nginx +``` + +```bash +$ ls +nginx nginx-0.1.0.tgz +``` + +## 上传您的应用 + +现在您已经准备好了基于 Helm 的应用,您可以将它上传至 KubeSphere 并在平台上进行测试。 + +## 另请参见 + +[Helm 规范](../helm-specification/) + +[导入 Helm 仓库](../../../workspace-administration/app-repository/import-helm-repository/) + diff --git a/content/zh/docs/v3.4/application-store/app-developer-guide/helm-specification.md b/content/zh/docs/v3.4/application-store/app-developer-guide/helm-specification.md new file mode 100644 index 000000000..c33f28596 --- /dev/null +++ b/content/zh/docs/v3.4/application-store/app-developer-guide/helm-specification.md @@ -0,0 +1,131 @@ +--- +title: "Helm 规范" +keywords: 'Kubernetes, KubeSphere, Helm, 规范' +description: '了解 Chart 结构和规范。' +linkTitle: "Helm 规范" +weight: 14420 +--- + +Helm Chart 是一种打包格式。Chart 是一个描述一组 Kubernetes 相关资源的文件集合。有关更多信息,请参见 [Helm 文档](https://helm.sh/zh/docs/topics/charts/)。 + +## 结构 + +Chart 的所有相关文件都存储在一个目录中,该目录通常包含: + +```text +chartname/ + Chart.yaml # 包含 Chart 基本信息(例如版本和名称)的 YAML 文件。 + LICENSE # (可选)包含 Chart 许可证的纯文本文件。 + README.md # (可选)应用说明和使用指南。 + values.yaml # 该 Chart 的默认配置值。 + values.schema.json # (可选)向 values.yaml 文件添加结构的 JSON Schema。 + charts/ # 一个目录,包含该 Chart 所依赖的任意 Chart。 + crds/ # 定制资源定义。 + templates/ # 模板的目录,若提供相应值便可以生成有效的 Kubernetes 配置文件。 + templates/NOTES.txt # (可选)包含使用说明的纯文本文件。 +``` + +## Chart.yaml 文件 + +您必须为 Chart 提供 `chart.yaml` 文件。下面是一个示例文件,每个字段都有说明。 + +```yaml +apiVersion: (必需)Chart API 版本。 +name: (必需)Chart 名称。 +version: (必需)版本,遵循 SemVer 2 标准。 +kubeVersion: (可选)兼容的 Kubernetes 版本,遵循 SemVer 2 标准。 +description: (可选)对应用的一句话说明。 +type: (可选)Chart 的类型。 +keywords: + - (可选)关于应用的关键字列表。 +home: (可选)应用的 URL。 +sources: + - (可选)应用源代码的 URL 列表。 +dependencies: (可选)Chart 必要条件的列表。 + - name: Chart 的名称,例如 nginx。 + version: Chart 的版本,例如 "1.2.3"。 + repository: 仓库 URL ("https://example.com/charts") 或别名 ("@repo-name")。 + condition: (可选)解析为布尔值的 YAML 路径,用于启用/禁用 Chart (例如 subchart1.enabled)。 + tags: (可选) + - 用于将 Chart 分组,一同启用/禁用。 + import-values: (可选) + - ImportValues 保存源值到待导入父键的映射。每一项可以是字符串或者一对子/父子列表项。 + alias: (可选)Chart 要使用的别名。当您要多次添加同一个 Chart 时,它会很有用。 +maintainers: (可选) + - name: (必需)维护者姓名。 + email: (可选)维护者电子邮件。 + url: (可选)维护者 URL。 +icon: (可选)要用作图标的 SVG 或 PNG 图片的 URL。 +appVersion: (可选)应用版本。不需要是 SemVer。 +deprecated: (可选,布尔值)该 Chart 是否已被弃用。 +annotations: + example: (可选)按名称输入的注解列表。 +``` + +{{< notice note >}} + +- `dependencies` 字段用于定义 Chart 依赖项,`v1` Chart 的依赖项都位于单独文件 `requirements.yaml` 中。有关更多信息,请参见 [Chart 依赖项](https://helm.sh/zh/docs/topics/charts/#chart-dependency)。 +- `type` 字段用于定义 Chart 的类型。允许的值有 `application` 和 `library`。有关更多信息,请参见 [Chart 类型](https://helm.sh/zh/docs/topics/charts/#chart-types)。 + +{{}} + +## Values.yaml 和模板 + +Helm Chart 模板采用 [Go 模板语言](https://golang.org/pkg/text/template/)编写并存储在 Chart 的 `templates` 文件夹。有两种方式可以为模板提供值: + +1. 在 Chart 中创建一个包含可供引用的默认值的 `values.yaml` 文件。 +2. 创建一个包含必要值的 YAML 文件,通过在命令行使用 `helm install` 命令来使用该文件。 + +下面是 `templates` 文件夹中模板的示例。 + +```yaml +apiVersion: v1 +kind: ReplicationController +metadata: + name: deis-database + namespace: deis + labels: + app.kubernetes.io/managed-by: deis +spec: + replicas: 1 + selector: + app.kubernetes.io/name: deis-database + template: + metadata: + labels: + app.kubernetes.io/name: deis-database + spec: + serviceAccount: deis-database + containers: + - name: deis-database + image: {{.Values.imageRegistry}}/postgres:{{.Values.dockerTag}} + imagePullPolicy: {{.Values.pullPolicy}} + ports: + - containerPort: 5432 + env: + - name: DATABASE_STORAGE + value: {{default "minio" .Values.storage}} +``` + +上述示例在 Kubernetes 中定义 ReplicationController 模板,其中引用的一些值已在 `values.yaml` 文件中进行定义。 + +- `imageRegistry`:Docker 镜像仓库。 +- `dockerTag`:Docker 镜像标签 (tag)。 +- `pullPolicy`:镜像拉取策略。 +- `storage`:存储后端,默认为 `minio`。 + +下面是 `values.yaml` 文件的示例: + +```text +imageRegistry: "quay.io/deis" +dockerTag: "latest" +pullPolicy: "Always" +storage: "s3" +``` + +## 参考 + +[Helm 文档](https://helm.sh/zh/docs/) + +[Chart](https://helm.sh/zh/docs/topics/charts/) + diff --git a/content/zh/docs/v3.4/application-store/app-lifecycle-management.md b/content/zh/docs/v3.4/application-store/app-lifecycle-management.md new file mode 100644 index 000000000..020b0780c --- /dev/null +++ b/content/zh/docs/v3.4/application-store/app-lifecycle-management.md @@ -0,0 +1,230 @@ +--- +title: "应用程序生命周期管理" +keywords: 'Kubernetes, KubeSphere, 应用商店' +description: '您可以跨整个生命周期管理应用,包括提交、审核、测试、发布、升级和下架。' +linkTitle: '应用程序生命周期管理' +weight: 14100 +--- + +KubeSphere 集成了 [OpenPitrix](https://github.com/openpitrix/openpitrix)(一个跨云管理应用程序的开源平台)来构建应用商店,管理应用程序的整个生命周期。应用商店支持两种应用程序部署方式: + +- **应用模板**:这种方式让开发者和独立软件供应商 (ISV) 能够与企业空间中的用户共享应用程序。您也可以在企业空间中导入第三方应用仓库。 +- **自制应用**:这种方式帮助用户使用多个微服务来快速构建一个完整的应用程序。KubeSphere 让用户可以选择现有服务或者创建新的服务,用于在一站式控制台上创建自制应用。 + +本教程使用 [Redis](https://redis.io/) 作为示例应用程序,演示如何进行应用全生命周期管理,包括提交、审核、测试、发布、升级和下架。 + +## 视频演示 + + + +## 准备工作 + +- 您需要启用 [KubeSphere 应用商店 (OpenPitrix)](../../pluggable-components/app-store/)。 +- 您需要创建一个企业空间、一个项目以及一个用户 (`project-regular`)。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../quick-start/create-workspace-and-project/)。 + +## 动手实验 + +### 步骤一:创建自定义角色和帐户 + +首先,您需要创建两个帐户,一个是 ISV 的帐户 (`isv`),另一个是应用技术审核员的帐户 (`reviewer`)。 + +1. 使用 `admin` 帐户登录 KubeSphere 控制台。点击左上角的**平台管理**,选择**访问控制**。转到**平台角色**,点击**创建**。 + +2. 为角色设置一个名称,例如 `app-review`,然后点击**编辑权限**。 + +3. 转到**应用管理**,选择权限列表中的**应用商店管理**和**应用商店查看**,然后点击**确定**。 + + {{< notice note >}} + + 被授予 `app-review` 角色的用户能够查看平台上的应用商店并管理应用,包括审核和下架应用。 + + {{}} + +4. 创建角色后,您需要创建一个用户,并授予 `app-review` 角色。转到**用户**,点击**创建**。输入必需的信息,然后点击**确定**。 + +5. 再创建另一个用户 `isv`,把 `platform-regular` 角色授予它。 + +6. 邀请上面创建好的两个帐户进入现有的企业空间,例如 `demo-workspace`,并授予它们 `workspace-admin` 角色。 + +### 步骤二:上传和提交应用程序 + +1. 以 `isv` 身份登录控制台,转到您的企业空间。您需要上传示例应用 Redis 至该企业空间,供后续使用。首先,下载应用 [Redis 11.3.4](https://github.com/kubesphere/tutorial/raw/master/tutorial%205%20-%20app-store/redis-11.3.4.tgz),然后转到**应用模板**,点击**上传模板**。 + + {{< notice note >}} + + 在本示例中,稍后会上传新版本的 Redis 来演示升级功能。 + + {{}} + +2. 在弹出的对话框中,点击**上传 Helm Chart** 上传 Chart 文件。点击**确定**继续。 + +3. **应用信息**下显示了应用的基本信息。要上传应用的图标,点击**上传图标**。您也可以跳过上传图标,直接点击**确定**。 + + {{< notice note >}} + + 应用图标支持的最大分辨率为 96 × 96 像素。 + + {{}} + +4. 成功上传后,模板列表中会列出应用,状态为**开发中**,意味着该应用正在开发中。上传的应用对同一企业空间下的所有成员均可见。 + +5. 点击列表中的 Redis 进入应用模板详情页面。您可以点击**编辑**来编辑该应用的基本信息。 + +6. 您可以通过在弹出窗口中指定字段来自定义应用的基本信息。 + +7. 点击**确定**保存更改,然后您可以通过将其部署到 Kubernetes 来测试该应用程序。点击待提交版本展开菜单,选择**安装**。 + + {{< notice note >}} + + 如果您不想测试应用,可以直接提交审核。但是,建议您先测试应用部署和功能,再提交审核,尤其是在生产环境中。这会帮助您提前发现问题,加快审核过程。 + + {{}} + +8. 选择要部署应用的集群和项目,为应用设置不同的配置,然后点击**安装**。 + + {{< notice note >}} + + 有些应用可以在表单中设置所有配置后进行部署。您可以使用拨动开关查看它的 YAML 文件,文件中包含了需要在表单中指定的所有参数。 + + {{}} + +9. 稍等几分钟,切换到**应用实例**选项卡。您会看到 Redis 已经部署成功。 + +10. 测试应用并且没有发现问题后,便可以点击**提交发布**,提交该应用程序进行发布。 + + {{< notice note >}} + +版本号必须以数字开头并包含小数点。 + +{{}} + +11. 应用提交后,它的状态会变成**已提交**。现在,应用审核员便可以进行审核。 + + +### 步骤三:发布应用程序 + +1. 登出控制台,然后以 `reviewer` 身份重新登录 KubeSphere。点击左上角的**平台管理**,选择**应用商店管理**。在**应用发布**页面,上一步中提交的应用会显示在**待发布**选项卡下。 + +2. 点击该应用进行审核,在弹出窗口中查看应用信息、介绍、配置文件和更新日志。 + +3. 审核员的职责是决定该应用是否符合发布至应用商店的标准。点击**通过**来批准,或者点击**拒绝**来拒绝提交的应用。 + +### 步骤四:发布应用程序至应用商店 + +应用获批后,`isv` 便可以将 Redis 应用程序发布至应用商店,让平台上的所有用户都能找到并部署该应用程序。 + +1. 登出控制台,然后以 `isv` 身份重新登录 KubeSphere。转到您的企业空间,点击**应用模板**页面上的 Redis。在详情页面上展开版本菜单,然后点击**发布到商店**。在弹出的提示框中,点击**确定**以确认操作。 + +2. 在**应用发布**下,您可以查看应用状态。**已上架**意味着它在应用商店中可用。 + +3. 点击**在商店查看**转到应用商店的**应用信息**页面,或者点击左上角的**应用商店**也可以查看该应用。 + + {{< notice note >}} + + 您可能会在应用商店看到两个 Redis 应用,其中一个是 KubeSphere 中的内置应用。请注意,新发布的应用会显示在应用商店列表的开头。 + + {{}} + +4. 现在,企业空间中的用户可以从应用商店中部署 Redis。要将应用部署至 Kubernetes,请点击应用转到**应用信息**页面,然后点击**安装**。 + + {{< notice note >}} + + 如果您在部署应用时遇到问题,**状态**栏显示为**失败**,您可以将光标移至**失败**图标上方查看错误信息。 + + {{}} + +### 步骤五:创建应用分类 + +`reviewer` 可以根据不同类型应用程序的功能和用途创建多个分类。这类似于设置标签,可以在应用商店中将分类用作筛选器,例如大数据、中间件和物联网等。 + +1. 以 `reviewer` 身份登录 KubeSphere。要创建分类,请转到**应用商店管理**页面,再点击**应用分类**页面中的 。 + +2. 在弹出的对话框中设置分类名称和图标,然后点击**确定**。对于 Redis,您可以将**分类名称**设置为 `Database`。 + + {{< notice note >}} + + 通常,应用审核员会提前创建必要的分类,ISV 会选择应用所属的分类,然后提交审核。新创建的分类中没有应用。 + + {{}} + +3. 创建好分类后,您可以给您的应用分配分类。在**未分类**中选择 Redis,点击**调整分类**。 + +4. 在弹出对话框的下拉列表中选择分类 (**Database**) 然后点击**确定**。 + +5. 该应用便会显示在对应分类中。 + + +### 步骤六:添加新版本 + +要让企业空间用户能够更新应用,您需要先向 KubeSphere 添加新的应用版本。按照下列步骤为示例应用添加新版本。 + +1. 再次以 `isv` 身份登录 KubeSphere,点击**应用模板**,点击列表中的 Redis 应用。 + +2. 下载 [Redis 12.0.0](https://github.com/kubesphere/tutorial/raw/master/tutorial%205%20-%20app-store/redis-12.0.0.tgz),这是 Redis 的一个新版本,本教程用它来演示。在**版本**选项卡中点击右侧的**上传新版本**,上传您刚刚下载的文件包。 + +3. 点击**上传 Helm Chart**,上传完成后点击**确定**。 + +4. 新的应用版本会显示在版本列表中。您可以通过点击来展开菜单并测试新的版本。另外,您也可以提交审核并发布至应用商店,操作步骤和上面说明的一样。 + + +### 步骤七:升级 + +新版本发布至应用商店后,所有用户都可以升级该应用程序至新版本。 + +{{< notice note >}} + +要完成下列步骤,您必须先部署应用的一个旧版本。本示例中,Redis 11.3.4 已经部署至项目 `demo-project`,它的新版本 12.0.0 也已经发布至应用商店。 + +{{}} + +1. 以 `project-regular` 身份登录 KubeSphere,搜寻到项目的**应用**页面,点击要升级的应用。 + +2. 点击**更多操作**,在下拉菜单中选择**编辑设置**。 + +3. 在弹出窗口中,您可以查看应用配置 YAML 文件。在右侧的下拉列表中选择新版本,您可以自定义新版本的 YAML 文件。在本教程中,点击**更新**,直接使用默认配置。 + + {{< notice note >}} + + 您可以在右侧的下拉列表中选择与左侧相同的版本,通过 YAML 文件自定义当前应用的配置。 + + {{}} + +4. 在**应用**页面,您会看到应用正在升级中。升级完成后,应用状态会变成**运行中**。 + + +### 步骤八:下架应用程序 + +您可以选择将应用完全从应用商店下架,或者下架某个特定版本。 + +1. 以 `reviewer` 身份登录 KubeSphere。点击左上角的**平台管理**,选择**应用商店管理**。在**应用商店**页面,点击 Redis。 + +2. 在详情页面,点击**下架应用**,在弹出的对话框中选择**确定**,确认将应用从应用商店下架的操作。 + + {{< notice note >}} + + 将应用从应用商店下架不影响正在使用该应用的租户。 + + {{}} + +3. 要让应用再次在应用商店可用,点击**上架应用**。 + +4. 要下架应用的特定版本,展开版本菜单,点击**下架版本**。在弹出的对话框中,点击**确定**以确认操作。 + + {{< notice note >}} + + 下架应用版本后,该版本在应用商店将不可用。下架应用版本不影响正在使用该版本的租户。 + + {{}} + +5. 要让应用版本再次在应用商店可用,点击**上架版本**。 + + + + + + + + + diff --git a/content/zh/docs/v3.4/application-store/built-in-apps/_index.md b/content/zh/docs/v3.4/application-store/built-in-apps/_index.md new file mode 100644 index 000000000..49cf0ae27 --- /dev/null +++ b/content/zh/docs/v3.4/application-store/built-in-apps/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "内置应用" +weight: 14200 + +_build: + render: false +--- diff --git a/content/zh/docs/v3.4/application-store/built-in-apps/chaos-mesh-app.md b/content/zh/docs/v3.4/application-store/built-in-apps/chaos-mesh-app.md new file mode 100644 index 000000000..8cdb60fa4 --- /dev/null +++ b/content/zh/docs/v3.4/application-store/built-in-apps/chaos-mesh-app.md @@ -0,0 +1,93 @@ +--- +title: "在 KubeSphere 中部署 Chaos Mesh" +keywords: 'KubeSphere, Kubernetes, Chaos Mesh, Chaos Engineering' +description: '了解如何在 KubeSphere 中部署 Chaos Mesh 并进行混沌实验。' +linkTitle: "部署 Chaos Mesh" +--- + +[Chaos Mesh](https://github.com/chaos-mesh/chaos-mesh) 是一个开源的云原生混沌工程平台,提供丰富的故障模拟类型,具有强大的故障场景编排能力,方便用户在开发测试中以及生产环境中模拟现实世界中可能出现的各类异常,帮助用户发现系统潜在的问题。 + +![Chaos Mesh architecture](/images/docs/v3.3/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-architecture-v2.png) + +本教程演示了如何在 KubeSphere 上部署 Chaos Mesh 进行混沌实验。 + +## **准备工作** + +* 部署 [KubeSphere 应用商店](../../../pluggable-components/app-store/)。 +* 您需要为本教程创建一个企业空间、一个项目和两个帐户(ws-admin 和 project-regular)。帐户 ws-admin 必须在企业空间中被赋予 workspace-admin 角色,帐户 project-regular 必须被邀请至项目中赋予 operator 角色。若还未创建好,请参考[创建企业空间、项目、用户和角色](https://kubesphere.io/zh/docs/quick-start/create-workspace-and-project/)。 + + +## **开始混沌实验** + +### 步骤1: 部署 Chaos Mesh + +1. 使用 `project-regular` 身份登陆,在应用市场中搜索 `chaos-mesh`,点击搜索结果进入应用。 + + ![Chaos Mesh app](/images/docs/v3.3/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-app.png) + + +2. 进入应用信息页后,点击右上角**安装**按钮。 + + ![Install Chaos Mesh](/images/docs/v3.3/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/install-chaos-mesh.png) + +3. 进入应用设置页面,可以设置应用**名称**(默认会随机一个唯一的名称)和选择安装的**位置**(对应的 Namespace) 和**版本**,然后点击右上角**下一步**。 + + ![Chaos Mesh basic information](/images/docs/v3.3/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-basic-info.png) + +4. 根据实际需要编辑 `values.yaml` 文件,也可以直接点击**安装**使用默认配置。 + + ![Chaos Mesh configurations](/images/docs/v3.3/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-config.png) + +5. 等待 Chaos Mesh 开始正常运行。 + + ![Chaos Mesh deployed](/images/docs/v3.3/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-deployed.png) + +6. 访问**应用负载**, 可以看到 Chaos Mesh 创建的三个部署。 + + ![Chaos Mesh deployments](/images/docs/v3.3/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-deployments.png) + +### 步骤 2: 访问 Chaos Mesh + +1. 前往**应用负载**下服务页面,复制 chaos-dashboard 的 **NodePort**。 + + ![Chaos Mesh NodePort](/images/docs/v3.3/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-nodeport.png) + +2. 您可以通过 `${NodeIP}:${NODEPORT}` 方式访问 Chaos Dashboard。并参考[管理用户权限](https://chaos-mesh.org/zh/docs/manage-user-permissions/)文档,生成 Token,并登陆 Chaos Dashboard。 + + ![Login to Chaos Dashboard](/images/docs/v3.3/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/login-to-dashboard.png) + +### 步骤 3: 创建混沌实验 + +1. 在开始混沌实验之前,需要先确定并部署您的实验目标,比如,测试某应用在网络延时下的工作状态。本文使用了一个 demo 应用 `web-show` 作为待测试目标,观测系统网络延迟。 你可以使用下面命令部署一个 Demo 应用 `web-show` : + + ```bash + curl -sSL https://mirrors.chaos-mesh.org/latest/web-show/deploy.sh | bash + ``` + + {{< notice note >}} + + web-show 应用页面上可以直接观察到自身到 kube-system 命名空间下 Pod 的网络延迟。 + + {{}} + +2. 访问 **web-show** 应用程序。从您的网络浏览器,进入 `${NodeIP}:8081`。 + + ![Chaos Mesh web show app](/images/docs/v3.3/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/web-show-app.png) + +3. 登陆 Chaos Dashboard 创建混沌实验,为了更好的观察混沌实验效果,这里只创建一个独立的混沌实验,混沌实验的类型选择**网络攻击**,模拟网络延迟的场景: + + ![Chaos Dashboard](/images/docs/v3.3/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-dashboard-networkchaos.png) + + 实验范围设置为 web-show 应用: + + ![Chaos Experiment scope](/images/docs/v3.3/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-experiment-scope.png) + +4. 提交混沌实验后,查看实验状态: + + ![Chaos Experiment status](/images/docs/v3.3/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/experiment-status.png) + +5. 访问 web-show 应用观察实验结果 : + + ![Chaos Experiment result](/images/docs/v3.3/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/experiment-result.png) + +更多详情参考 [Chaos Mesh 使用文档](https://chaos-mesh.org/zh/docs/)。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/application-store/built-in-apps/etcd-app.md b/content/zh/docs/v3.4/application-store/built-in-apps/etcd-app.md new file mode 100644 index 000000000..ca74dfa5a --- /dev/null +++ b/content/zh/docs/v3.4/application-store/built-in-apps/etcd-app.md @@ -0,0 +1,60 @@ +--- +title: "在 KubeSphere 中部署 etcd" +keywords: 'Kubernetes, KubeSphere, etcd, 应用商店' +description: '了解如何从 KubeSphere 应用商店中部署 etcd 并访问服务。' +linkTitle: "在 KubeSphere 中部署 etcd" +weight: 14210 +--- + +[etcd](https://etcd.io/) 是一个采用 Go 语言编写的分布式键值存储库,用来存储供分布式系统或机器集群访问的数据。在 Kubernetes 中,etcd 是服务发现的后端,存储集群状态和配置。 + +本教程演示如何从 KubeSphere 应用商店部署 etcd。 + +## 准备工作 + +- 请确保[已启用 OpenPitrix 系统](../../../pluggable-components/app-store/)。 +- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`) 供本教程操作使用。该帐户需要是平台普通用户,并邀请至项目中赋予 `operator` 角色作为项目操作员。本教程中,请以 `project-regular` 身份登录控制台,在企业空间 `demo-workspace` 中的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## 动手实验 + +### 步骤 1:从应用商店中部署 etcd + +1. 在 `demo-project` 项目的**概览**页面,点击左上角的**应用商店**。 + +2. 找到 etcd,点击**应用信息**页面上的**安装**。 + +3. 设置名称并选择应用版本。请确保将 etcd 部署在 `demo-project` 中,点击**下一步**。 + +4. 在**应用设置**页面,指定 etcd 的持久化持久卷大小,点击**安装**。 + + {{< notice note >}} + + 要指定 etcd 的更多值,请使用右上角的**编辑YAML**查看 YAML 格式的应用清单文件,并编辑其配置。 + + {{}} + +5. 在**应用**页面的**基于模板的应用**选项卡下,稍等片刻待 etcd 启动并运行。 + + +### 步骤 2:访问 etcd 服务 + +应用部署后,您可以在 KubeSphere 控制台上使用 etcdctl 命令行工具与 etcd 服务器进行交互,直接访问 etcd。 + +1. 在**工作负载**的**有状态副本集**选项卡中,点击 etcd 的服务名称。 + +2. 在**容器组**下,展开菜单查看容器详情,然后点击**终端**图标。 + +3. 在终端中,您可以直接读写数据。例如,分别执行以下两个命令。 + + ```bash + etcdctl set /name kubesphere + ``` + + ```bash + etcdctl get /name + ``` + +4. KubeSphere 集群内的客户端可以通过 `..svc.:2379`(例如本教程中是 `etcd-rscvf6.demo-project.svc.cluster.local:2379`) 访问 etcd 服务。 + +5. 有关更多信息,请参见 [etcd 官方文档](https://etcd.io/docs/v3.4.0/)。 + diff --git a/content/zh/docs/v3.4/application-store/built-in-apps/harbor-app.md b/content/zh/docs/v3.4/application-store/built-in-apps/harbor-app.md new file mode 100644 index 000000000..eae277b2f --- /dev/null +++ b/content/zh/docs/v3.4/application-store/built-in-apps/harbor-app.md @@ -0,0 +1,124 @@ +--- +title: "在 KubeSphere 中部署 Harbor" +keywords: 'Kubernetes, KubeSphere, Harbor, 应用商店' +description: '了解如何从 KubeSphere 应用商店中部署 Harbor 并访问服务。' +linkTitle: "在 KubeSphere 中部署 Harbor" +weight: 14220 +--- +[Harbor](https://goharbor.io/) 是一个开源仓库,通过各种策略和基于角色的访问控制来保护制品,确保镜像经过扫描且没有漏洞,并对镜像签名使其受信。 + +本教程演示如何从 KubeSphere 应用商店部署 [Harbor](https://goharbor.io/)。 + +## 准备工作 + +- 请确保[已启用 OpenPitrix 系统](../../../pluggable-components/app-store/)。 +- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`) 供本教程操作使用。该帐户需要是平台普通用户,并邀请至项目中赋予 `operator` 角色作为项目操作员。本教程中,请以 `project-regular` 身份登录控制台,在企业空间 `demo-workspace` 中的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## 动手实验 + +### 步骤 1:从应用商店中部署 Harbor + +1. 在 `demo-project` 项目的**概览**页面,点击左上角的**应用商店**。 + +2. 找到 Harbor,点击**应用信息**页面上的**安装**。 + +3. 设置名称并选择应用版本。请确保将 Harbor 部署在 `demo-project` 中,点击**下一步**。 + +4. 在**应用设置**页面,编辑 Harbor 的配置文件,请注意以下字段。 + + `type`:访问 Harbor 服务的方式。本示例使用 `nodePort`。 + + `tls`:指定是否启用 HTTPS。多数情况下设置为 `false`。 + + `externalURL`:暴露给租户的 URL。 + + {{< notice note >}} + + - 请指定 `externalURL`,如果您访问 Harbor 有问题,该字段会对解决问题非常有用。 + + - 请确保在本教程中使用 HTTP 协议和其对应的 `nodePort`。有关更多信息,请参见常见问题中的[示例配置](#常见问题)。 + + {{}} + + 配置编辑完成后,点击**安装**继续。 + +5. 稍等片刻待 Harbor 启动并运行。 + + +### 步骤 2:访问 Harbor + +1. 基于配置文件中 `expose.type` 字段的设置,访问方式可能会不同。本示例使用 `nodePort` 访问 Harbor,按照先前步骤中的设置,访问 `http://nodeIP:30002`。 + + ![登录 Harbor](/images/docs/v3.3/zh-cn/appstore/built-in-apps/deploy-harbor-on-ks/harbor-login-7.PNG) + + {{< notice note >}} + + 取决于您的 Kubernetes 集群的部署位置,您可能需要在安全组中放行端口并配置相关的端口转发规则。 + + {{}} + +2. 使用默认帐户和密码 (`admin/Harbor12345`) 登录 Harbor。密码由配置文件中的 `harborAdminPassword` 字段定义。 + + ![Harbor 仪表板](/images/docs/v3.3/zh-cn/appstore/built-in-apps/deploy-harbor-on-ks/harbor-dashboard-8.jpg) + +## 常见问题 + +1. 如何启用 HTTP 登录? + + 在步骤 1 中将 `tls.enabled` 设置为 `false`。`externalURL` 的协议必须和 `expose.nodePort.ports` 相同。 + + 如果您使用 Docker 登录,请在 `daemon.json` 中将 `externalURL` 设置为 `insecure-registries` 其中之一,然后重新加载 Docker。 + + 下面是示例配置文件,供您参考。请注意阅读注解。 + + ```yaml + ## 请注意,192.168.0.9 是示例 IP 地址,您必须使用自己的地址。 + expose: + type: nodePort + tls: + enabled: false + secretName: "" + notarySecretName: "" + commonName: "192.168.0.9" # 将 commonName 更改成您自己的值。 + nodePort: + # NodePort 服务的名称。 + name: harbor + ports: + http: + # 使用 HTTP 服务时,Harbor 监听的服务端口。 + port: 80 + # 使用 HTTP 服务时,Harbor 监听的节点端口。 + nodePort: 30002 + https: + # 使用 HTTPS 服务时,Harbor 监听的服务端口。 + port: 443 + # 使用 HTTPS 服务时,Harbor 监听的服务端口。 + nodePort: 30003 + # 仅在 notary.enabled 设置为 true 时需要此配置。 + notary: + # Notary 监听的服务端口。 + port: 4443 + # Notary 监听的节点端口。 + nodePort: 30004 + + externalURL: http://192.168.0.9:30002 # 使用您自己的 IP 地址。 + + # Harbor admin 的初始密码。启动 Harbor 后可以通过主页修改。 + harborAdminPassword: "Harbor12345" + # 用于加密的密钥,必须是包含 16 个字符的字符串。 + secretKey: "not-a-secure-key" + ``` + +2. 如何启用 HTTPS 登录? + + a. 使用自签名证书。 + * 在步骤 1 中将配置文件中的 `tls.enabled` 设置为 `true`,并对应编辑 `externalURL`。 + * 将 Pod `harbor-core` 的 `/etc/core/ca` 中存储的自签名证书复制到您的主机。 + * 先在您的主机中信任该自签名证书,然后重启 Docker。 + + b. 使用公共 SSL。 + * 将证书添加为密钥 (Secret)。 + * 在步骤 1 中将配置文件中的 `tls.enabled` 设置为 `true`,并对应编辑 `externalURL`。 + * 编辑 `tls.secretName`。 + +有关更多信息,请参见 [Harbor 文档](https://goharbor.io/docs/2.1.0/)。 diff --git a/content/zh/docs/v3.4/application-store/built-in-apps/jh-gitlab.md b/content/zh/docs/v3.4/application-store/built-in-apps/jh-gitlab.md new file mode 100644 index 000000000..3e83bdc93 --- /dev/null +++ b/content/zh/docs/v3.4/application-store/built-in-apps/jh-gitlab.md @@ -0,0 +1,69 @@ +--- +title: "在 KubeSphere 上部署极狐GitLab" +keywords: 'KubeSphere, Kubernetes, 极狐GitLab, DevOps' +description: '了解并掌握如何在 KubeSphere 中部署和使用极狐GitLab' +linkTitle: "部署极狐GitLab" +--- + +[极狐GitLab](https://gitlab.cn)是GitLab DevOps平台的中国发行版,作为一套完备的一站式DevOps平台,从设计到投产,一个平台覆盖 DevSecOps 全流程。极狐GitLab 帮助团队更快、更安全地交付更好的软件,提升研运效能,实现 DevOps 价值最大化。 + +本教程将演示如何从 KubeSphere 应用商店部署极狐GitLab。 + +## **准备工作** + + +- 您需要[启用 OpenPitrix 系统](../../../pluggable-components/app-store/)。 +- 您需要创建一个企业空间、一个项目和一个用户帐户。该用户必须是已邀请至项目的平台普通用户,并且在项目中的角色为 `operator`。在本教程中,您需要以 `project-regular` 用户登录,并在 `demo-workspace` 企业空间的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 +- 确保 `KubeSphere` 环境有至少 4 Core,8GB RAM 以及 50GB 以上存储。 + +## 安装 + +1. 创建一个 `Workspace`: + +![workspace creation](/images/docs/v3.3/zh-cn/appstore/built-in-apps/jh-app/workspace-creation.png) + +2. 创建一个 `Project` + +![project creation](/images/docs/v3.3/zh-cn/appstore/built-in-apps/jh-app/project-creation.png) + +3. 在左侧导航栏 `Application Workload` 的 `App` 中,创建一个 `App`: + +![app creation](/images/docs/v3.3/zh-cn/appstore/built-in-apps/jh-app/app-creation.png) + +4. 在出现的安装选项界面中选择 **From App Store**(从应用商店安装): + +![from app store](/images/docs/v3.3/zh-cn/appstore/built-in-apps/jh-app/from-app-store.png) + +5. 在 `App Store` 中输入 **jh** 进行搜索,会出现 **jh-gitlab** 的应用: + +![jh gitlab app](/images/docs/v3.3/zh-cn/appstore/built-in-apps/jh-app/jihu-gitlab-app.png) + +6. 点击 jh-gitlab 应用,在出现的界面上点击 `install`,即可开始安装。根据表单填写基本信息,然后点击 `next`: + +![jh install basic info](/images/docs/v3.3/zh-cn/appstore/built-in-apps/jh-app/jh-install-basic-info.png) + +7. 接着需要根据自身需求填写 App 的设置信息(也就是 values.yaml 文件内容,详细说明可以参考[极狐GitLab Helm Chart 官网](https://jihulab.com/gitlab-cn/charts/gitlab/-/blob/main-jh/values.yaml))。 + +![jh helm chart](/images/docs/v3.3/zh-cn/appstore/built-in-apps/jh-app/jh-helm-charts.png) + +8. 然后点击 `install` 开始安装,整个过程需要持续一段时间,最后可以在 `Application Workload` 的 `App` 选项里面看到安装成功的极狐GitLab 应用程序: + +![succ installation](/images/docs/v3.3/zh-cn/appstore/built-in-apps/jh-app/succ-installation.png) + +9. 如果需要调试,可以利用 KubeSphere 的小工具(下图右下角红色方框所示的小锤子)来查看安装的极狐GitLab实例所对应的 Kubernetes 资源: + +![kubectl check](/images/docs/v3.3/zh-cn/appstore/built-in-apps/jh-app/kubectl-check.png) + +10. `Pod` 和 `Ingress` 的内容如下: + +![pod status](/images/docs/v3.3/zh-cn/appstore/built-in-apps/jh-app/pod-status.png) + +11. 使用 `gitlab.jihu-xiaomage.cn`(需要根据自身需求设置访问域名)来访问已经安装成功的极狐GitLab实例: + +![jh instance](/images/docs/v3.3/zh-cn/appstore/built-in-apps/jh-app/jh-instance.png) + +接下来你就可以使用极狐GitLab实例来开启你的 DevOps 之旅了。 + +## 了解更多 + +如果你想了解更多极狐GitLab的使用场景和最佳实践,请访问[极狐(GitLab)公司官网](https://gitlab.cn)。 diff --git a/content/zh/docs/v3.4/application-store/built-in-apps/memcached-app.md b/content/zh/docs/v3.4/application-store/built-in-apps/memcached-app.md new file mode 100644 index 000000000..f2cd04440 --- /dev/null +++ b/content/zh/docs/v3.4/application-store/built-in-apps/memcached-app.md @@ -0,0 +1,50 @@ +--- +title: "在 KubeSphere 中部署 Memcached" +keywords: 'Kubernetes, KubeSphere, Memcached, 应用商店' +description: '了解如何从 KubeSphere 应用商店中部署 Memcached 并访问服务。' +linkTitle: "在 KubeSphere 中部署 Memcached" +weight: 14230 +--- +[Memcached](https://memcached.org/) 是一个内存中的 (in-memory) 键值存储库,用于存储由数据库调用、API 调用或页面渲染产生的小块任意数据(字符串、对象)。它的 API 对大多数主流的语言均可用。 + +本教程演示如何从 KubeSphere 应用商店部署 Memcached。 + +## 准备工作 + +- 请确保[已启用 OpenPitrix 系统](../../../pluggable-components/app-store/)。 +- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`) 供本教程操作使用。该帐户需要是平台普通用户,并邀请至项目中赋予 `operator` 角色作为项目操作员。本教程中,请以 `project-regular` 身份登录控制台,在企业空间 `demo-workspace` 中的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## 动手实验 + +### 步骤 1:从应用商店中部署 Memcached + +1. 在 `demo-project` 项目的**概览**页面,点击左上角的**应用商店**。 + +2. 找到 Memcached,点击**应用信息**页面上的**安装**。 + +3. 设置名称并选择应用版本。请确保将 Memcached 部署在 `demo-project` 中,点击**下一步**。 + +4. 在**应用设置**页面,您可以使用默认配置或者直接编辑 YAML 文件来自定义配置。点击**安装**继续。 + +5. 稍等片刻待 Memcached 启动并运行。 + + +### 步骤 2:访问 Memcached + +1. 在**服务**页面点击 Memcached 的服务名称。 + +2. 在详情页面,您可以分别在**端口**和**容器组**下找到端口号和 Pod IP。 + +3. Memcached 服务是 Headless 服务,因此在集群内通过 Pod IP 和端口号访问它。Memcached `telnet` 命令的基本语法是 `telnet HOST PORT`。例如: + + ```bash + # telnet 10.10.235.3 11211 + Trying 10.10.235.3... + Connected to 10.10.235.3. + Escape character is '^]'. + set runoob 0 900 9 + memcached + STORED + ``` + +4. 有关更多信息,请参见 [Memcached](https://memcached.org/)。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/application-store/built-in-apps/minio-app.md b/content/zh/docs/v3.4/application-store/built-in-apps/minio-app.md new file mode 100644 index 000000000..9ef720444 --- /dev/null +++ b/content/zh/docs/v3.4/application-store/built-in-apps/minio-app.md @@ -0,0 +1,58 @@ +--- +title: "在 KubeSphere 中部署 MinIO" +keywords: 'Kubernetes, KubeSphere, Minio, 应用商店' +description: '了解如何从 KubeSphere 应用商店中部署 Minio 并访问服务。' +linkTitle: "在 KubeSphere 中部署 MinIO" +weight: 14240 +--- +[MinIO](https://min.io/) 对象存储为高性能和 S3 API 而设计。对于具有严格安全要求的大型私有云环境来说,MinIO 是理想选择,它可以为多种工作负载提供任务关键型可用性。 + +本教程演示如何从 KubeSphere 应用商店部署 MinIO。 + +## 准备工作 + +- 请确保[已启用 OpenPitrix 系统](../../../pluggable-components/app-store/)。 +- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`) 供本教程操作使用。该帐户需要是平台普通用户,并邀请至项目中赋予 `operator` 角色作为项目操作员。本教程中,请以 `project-regular` 身份登录控制台,在企业空间 `demo-workspace` 中的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## 动手实验 + +### 步骤 1:从应用商店中部署 MinIO + +1. 在 `demo-project` 项目的**概览**页面,点击左上角的**应用商店**。 + +2. 找到 MinIO,点击**应用信息**页面上的**安装**。 + +3. 设置名称并选择应用版本。请确保将 MinIO 部署在 `demo-project` 中,点击**下一步**。 + +4. 在**应用设置**页面,您可以使用默认配置或者直接编辑 YAML 文件来自定义配置。点击**安装**继续。 + +5. 稍等片刻待 MinIO 启动并运行。 + + +### 步骤 2:访问 MinIO Browser + +要从集群外部访问 MinIO,您需要先通过 NodePort 暴露该应用。 + +1. 在**服务**页面点击 MinIO 的服务名称。 + +2. 点击**更多操作**,在下拉菜单中选择**编辑外部访问**。 + +3. 在**访问模式**的下拉列表中选择 **NodePort**,然后点击**确定**。有关更多信息,请参见[项目网关](../../../project-administration/project-gateway/)。 + +4. 您可以在**端口**中查看已暴露的端口。 + +5. 要访问 MinIO Browser,您需要 `accessKey` 和 `secretKey`,都在 MinIO 配置文件中指定。在**应用**的**应用模板**选项卡中,点击 MinIO,随后可以在**配置文件**选项卡下查找这两个字段的值。 + +6. 通过 `:` 使用 `accessKey` 和 `secretKey` 访问 MinIO Browser。 + + ![Minio Browser](/images/docs/v3.3/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/minio-browser-13.PNG) + + ![Minio Browser 界面](/images/docs/v3.3/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/minio-browser-interface-14.PNG) + + {{< notice note >}} + + 取决于您的 Kubernetes 集群的部署位置,您可能需要在安全组中放行端口并配置相关的端口转发规则。 + + {{}} + +7. 有关 MinIO 的更多信息,请参见 [MinIO 官方文档](https://docs.min.io/)。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/application-store/built-in-apps/mongodb-app.md b/content/zh/docs/v3.4/application-store/built-in-apps/mongodb-app.md new file mode 100644 index 000000000..9d3c2d34f --- /dev/null +++ b/content/zh/docs/v3.4/application-store/built-in-apps/mongodb-app.md @@ -0,0 +1,55 @@ +--- +title: "在 KubeSphere 中部署 MongoDB" +keywords: 'KubeSphere, Kubernetes, 安装, MongoDB' +description: '了解如何从 KubeSphere 应用商店中部署 MongoDB 并访问服务。' +linkTitle: "在 KubeSphere 中部署 MongoDB" +weight: 14250 +--- + +[MongoDB](https://www.mongodb.com/) 是一个通用的、基于文档的分布式数据库,为现代应用开发者和云时代而打造。 + +本教程演示如何从 KubeSphere 应用商店部署 MongoDB。 + +## 准备工作 + +- 请确保[已启用 OpenPitrix 系统](../../../pluggable-components/app-store/)。 +- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`) 供本教程操作使用。该帐户需要是平台普通用户,并邀请至项目中赋予 `operator` 角色作为项目操作员。本教程中,请以 `project-regular` 身份登录控制台,在企业空间 `demo-workspace` 中的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## 动手实验 + +### 步骤 1:从应用商店中部署 MongoDB + +1. 在 `demo-project` 项目的**概览**页面,点击左上角的**应用商店**。 + +2. 找到 MongoDB,点击**应用信息**页面上的**安装**。 + +3. 设置名称并选择应用版本。请确保将 MongoDB 部署在 `demo-project` 中,点击**下一步**。 + +4. 在**应用设置**页面,为该应用指定持久卷大小,并记录用户名和密码用于访问该应用。操作完成后,点击**安装**。 + + {{< notice note >}} + + 要为 MongoDB 指定更多值,请打开右上角的**编辑YAML**开关查看 YAML 格式的应用清单文件,编辑其配置。 + + {{}} + +5. 稍等片刻待 MongoDB 启动并运行。 + + +### 步骤 2:访问 MongoDB 终端 + +1. 转到**服务**页面,点击 MongoDB 的服务名称。 + +2. 在**容器组**下,展开菜单查看容器详情,然后点击**终端**图标。 + +3. 在弹出窗口中,直接向终端输入命令使用该应用。 + + ![Mongodb 服务终端](/images/docs/v3.3/zh-cn/appstore/built-in-apps/mongodb-app/mongodb-service-terminal-9.PNG) + + {{< notice note >}} + + 如果您想从集群外部访问 MongoDB,请点击**更多操作**,选择**编辑外网服务**。在弹出的对话框中,选择 **NodePort** 作为访问方式。端口暴露后,使用该端口号访问 MongoDB。取决于您的 Kubernetes 集群的部署位置,您可能需要在安全组中放行端口并配置相关的端口转发规则。 + + {{}} + +4. 有关更多信息,请参见 [MongoDB 官方文档](https://docs.mongodb.com/manual/)。 diff --git a/content/zh/docs/v3.4/application-store/built-in-apps/mysql-app.md b/content/zh/docs/v3.4/application-store/built-in-apps/mysql-app.md new file mode 100644 index 000000000..1ad6f2007 --- /dev/null +++ b/content/zh/docs/v3.4/application-store/built-in-apps/mysql-app.md @@ -0,0 +1,67 @@ +--- +title: "在 KubeSphere 中部署 MySQL" +keywords: 'KubeSphere, Kubernetes, 安装, MySQL' +description: '了解如何从 KubeSphere 应用商店中部署 MySQL 并访问服务。' + +link title: "在 KubeSphere 中部署 MySQL" +weight: 14260 +--- +[MySQL ](https://www.mysql.com/)是一个开源的关系型数据库管理系统 (RDBMS),它基于最常用的数据库管理语言 SQL。作为世界上最受欢迎的开源数据库,MySQL 为云原生应用部署提供了完全托管的数据库服务。 + +本教程演示如何从 KubeSphere 的应用商店部署 MySQL。 + +## 准备工作 + +- 您需要[启用 OpenPitrix 系统](../../../pluggable-components/app-store/)。 +- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`)。该用户必须是已邀请至项目的平台普通用户,并且在项目中的角色为 `operator`。在本教程中,您需要以 `project-regular` 用户登录,并在 `demo-workspace` 企业空间的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## 动手实验 + +### 步骤 1:从应用商店部署 MySQL + +1. 在 `demo-project` 的**概览**页面,点击左上角的**应用商店**。 + +2. 找到 MySQL,在**应用信息**页面点击**安装**。 + +3. 设置应用名称和版本,确保 MySQL 部署在 `demo-project` 项目中,然后点击**下一步**。 + +4. 在**应用设置**页面,取消对 `mysqlRootPassword` 字段的注释并设置密码,然后点击**安装**。 + +5. 等待 MySQL 创建完成并开始运行。 + + +### 步骤 2:访问 MySQL 终端 + +1. 打开**工作负载**页面并点击 MySQL 的工作负载名称。 + +2. 在**容器组**区域,展开容器详情,点击终端图标。 + +3. 在终端窗口中,执行 `mysql -uroot -ptesting` 命令以 `root` 用户登录 MySQL。 + + +### 步骤 3:从集群外访问 MySQL 数据库 + +要从集群外访问 MySQL,您需要先用 NodePort 暴露该应用。 + +1. 打开**服务**页面并点击 MySQL 的服务名称。 + +2. 点击**更多操作**,在下拉菜单中选择**编辑外部访问**。 + +3. 将**访问模式**设置为 **NodePort** 并点击**确定**。有关更多信息,请参见[项目网关](../../../project-administration/project-gateway/)。 + +4. 您可以在**端口**区域查看暴露的端口。该端口号和公网 IP 地址将在下一步用于访问 MySQL 数据库。 + +5. 您需要使用 MySQL Client 或第三方应用(例如 SQLPro Studio)才能访问 MySQL 数据库。以下演示如何使用 SQLPro Studio 访问 MySQL 数据库。 + + ![login](/images/docs/v3.3/zh-cn/appstore/built-in-apps/mysql-app/login.png) + + ![access-mysql-success](/images/docs/v3.3/zh-cn/appstore/built-in-apps/mysql-app/access-mysql-success.png) + + {{< notice note >}} + + 取决于您的 Kubernetes 集群的部署位置,您可能需要在安全组中放行端口并配置相关的端口转发规则。 + + {{}} + +6. 有关 MySQL 的更多信息,请参考[ MySQL 官方文档](https://dev.mysql.com/doc/)。 + diff --git a/content/zh/docs/v3.4/application-store/built-in-apps/nginx-app.md b/content/zh/docs/v3.4/application-store/built-in-apps/nginx-app.md new file mode 100644 index 000000000..50fb8eb58 --- /dev/null +++ b/content/zh/docs/v3.4/application-store/built-in-apps/nginx-app.md @@ -0,0 +1,61 @@ +--- +title: "在 KubeSphere 中部署 NGINX" +keywords: 'KubeSphere, Kubernetes, 安装, NGINX' +description: '了解如何从 KubeSphere 应用商店中部署 NGINX 并访问服务。' +linkTitle: "在 KubeSphere 中部署 NGINX" +weight: 14270 +--- + +[NGINX](https://www.nginx.com/) 是一个开源软件应用,用于 Web 服务、反向代理、缓存、负载均衡、流媒体等。 + +本教程演示如何从 KubeSphere 应用商店部署 NGINX。 + +## 准备工作 + +- 请确保[已启用 OpenPitrix 系统](../../../pluggable-components/app-store/)。 +- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`) 供本教程操作使用。该帐户需要是平台普通用户,并邀请至项目中赋予 `operator` 角色作为项目操作员。本教程中,请以 `project-regular` 身份登录控制台,在企业空间 `demo-workspace` 中的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## 动手实验 + +### 步骤 1:从应用商店中部署 NGINX + +1. 在 `demo-project` 项目的**概览**页面,点击左上角的**应用商店**。 + +2. 找到 NGINX,点击**应用信息**页面上的**安装**。 + +3. 设置名称并选择应用版本。请确保将 NGINX 部署在 `demo-project` 中,点击**下一步**。 + +4. 在**应用设置**页面,指定要为该应用部署的副本数量,根据需要启用应用路由 (Ingress)。操作完成后,点击**安装**。 + + {{< notice note >}} + + 要为 NGINX 指定更多值,请打开右上角的拨动开关,查看 YAML 格式的应用清单文件,编辑其配置。 + + {{}} + +5. 稍等片刻待 NGINX 启动并运行。 + + +### 步骤 2:访问 NGINX + +要从集群外部访问 NGINX,您需要先通过 NodePort 暴露该应用。 + +1. 转到**服务**页面,点击 NGINX 的服务名称。 + +2. 在服务详情页面,点击**更多操作**,在下拉菜单中选择**编辑外部访问**。 + +3. **访问模式**选择 **NodePort**,然后点击**确定**。有关更多信息,请参见[项目网关](../../../project-administration/project-gateway/)。 + +4. 在**端口**下,您可以查看已暴露的端口。 + +5. 通过 `:` 访问 NGINX。 + + ![访问 Nginx](/images/docs/v3.3/zh-cn/appstore/built-in-apps/nginx-app/access-nginx-12.PNG) + + {{< notice note >}} + + 取决于您的 Kubernetes 集群的部署位置,您可能需要在安全组中放行端口并配置相关的端口转发规则。 + + {{}} + +6. 有关更多信息,请参见 [NGINX 官方文档](https://docs.nginx.com/?_ga=2.48327718.1445131049.1605510038-1186152749.1605510038)。 diff --git a/content/zh/docs/v3.4/application-store/built-in-apps/postgresql-app.md b/content/zh/docs/v3.4/application-store/built-in-apps/postgresql-app.md new file mode 100644 index 000000000..9b2425ac3 --- /dev/null +++ b/content/zh/docs/v3.4/application-store/built-in-apps/postgresql-app.md @@ -0,0 +1,59 @@ +--- +title: "在 KubeSphere 中部署 PostgreSQL" +keywords: 'Kubernetes, KubeSphere, PostgreSQL, 应用商店' +description: '了解如何从 KubeSphere 应用商店中部署 PostgreSQL 并访问服务。' +linkTitle: "在 KubeSphere 中部署 PostgreSQL" +weight: 14280 +--- + +作为强大的开源对象关系型数据库系统,[PostgreSQL](https://www.postgresql.org/) 以其卓越的可靠性、功能鲁棒性和高性能而著称。 + +本教程演示如何从 KubeSphere 的应用商店部署 PostgreSQL。 + +## 准备工作 + +- 您需要[启用 OpenPitrix 系统](../../../pluggable-components/app-store/)。 +- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`)。该用户必须是已邀请至项目的平台普通用户,并且在项目中的角色为 `operator`。在本教程中,您需要以 `project-regular` 用户登录,并在 `demo-workspace` 企业空间的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## 动手实验 + +### 步骤 1:从应用商店部署 PostgreSQL + +1. 在 `demo-project` 的**概览**页面,点击左上角的**应用商店**。 + +2. 找到 PostgreSQL,在**应用信息**页面点击**安装**。 + +3. 设置应用名称和版本,确保 PostgreSQL 部署在 `demo-project` 项目中,然后点击**下一步**。 + +4. 在**应用设置**页面,为应用设置持久卷,记录用户名和密码用于后续访问应用,然后点击**安装**。 + + {{< notice note >}} + + 如需为 PostgreSQL 设置更多的参数,可点击 **编辑YAML** 开关打开应用的 YAML 清单文件,并在清单文件中设置相关参数。 + + {{}} + +5. 等待 PostgreSQL 创建完成并开始运行。 + + +### 步骤 2:访问 PostgreSQL 数据库 + +要从集群外访问 PostgreSQL,您需要先用 NodePort 暴露该应用。 + +1. 打开**服务**页面并点击 PostgreSQL 的服务名称。 + +2. 点击**更多操作**,在下拉菜单中选择**编辑外部访问**。 + +3. 将**访问模式**设置为 **NodePort** 并点击**确定**。有关更多信息,请参见[项目网关](../../../project-administration/project-gateway/)。 + +4. 您可以在**端口**区域查看暴露的端口。该端口将在下一步中用于访问 PostgreSQL 数据库。 + +5. 在**容器组**区域,展开容器详情,点击终端图标。在弹出的窗口中直接输入命令访问数据库。 + + {{< notice note >}} + + 您也可以使用第三方应用例如 SQLPro Studio 连接数据库。取决于您的 Kubernetes 集群的部署位置,您可能需要在安全组中放行端口并配置相关的端口转发规则。 + + {{}} + +6. 有关更多信息,请参考[ PostgreSQL 官方文档](https://www.postgresql.org/docs/)。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/application-store/built-in-apps/rabbitmq-app.md b/content/zh/docs/v3.4/application-store/built-in-apps/rabbitmq-app.md new file mode 100644 index 000000000..f29056e3c --- /dev/null +++ b/content/zh/docs/v3.4/application-store/built-in-apps/rabbitmq-app.md @@ -0,0 +1,63 @@ +--- +title: "在 KubeSphere 中部署 RabbitMQ" +keywords: 'KubeSphere, RabbitMQ, Kubernetes, 安装' +description: '了解如何从 KubeSphere 应用商店中部署 RabbitMQ 并访问服务。' + +link title: "在 KubeSphere 中部署 RabbitMQ" +weight: 14290 +--- +[RabbitMQ](https://www.rabbitmq.com/) 是部署最广泛的开源消息代理。它轻量且易于在本地和云上部署,支持多种消息协议。RabbitMQ 可在分布和联邦的配置中部署,以满足大规模和高可用性需求。 + +本教程演示如何从 KubeSphere 的应用商店部署 RabbitMQ。 + +## 准备工作 + +- 您需要[启用 OpenPitrix 系统](../../../pluggable-components/app-store/)。 +- 您需要创建一个企业空间、一个项目和一个用户帐户。该用户必须是已邀请至项目的平台普通用户,并且在项目中的角色为 `operator`。在本教程中,您需要以 `project-regular` 用户登录,并在 `demo-workspace` 企业空间的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## 动手实验 + +### 步骤 1:从应用商店部署 RabbitMQ + +1. 在 `demo-project` 的**概览**页面,点击左上角的**应用商店**。 + +2. 找到 RabbitMQ,在**应用信息**页面点击**安装**。 + +3. 设置应用名称和版本,确保 RabbitMQ 部署在 `demo-project` 项目中,然后点击**下一步**。 + +4. 在**应用设置**页面,您可以直接使用默认配置,也可以通过修改表单参数或编辑 YAML 文件自定义配置。您需要记录 **Root Username** 和 **Root Password** 的值,用于在后续步骤中登录系统。设置完成后点击**安装**。 + + {{< notice tip >}} + + 如需查看清单文件,请点击 **编辑YAML** 开关。 + + {{}} + +5. 等待 RabbitMQ 创建完成并开始运行。 + + +### 步骤 2:访问 RabbitMQ 主页 + +要从集群外访问 RabbitMQ,您需要先用 NodePort 暴露该应用。 + +1. 打开**服务**页面并点击 RabbitMQ 的服务名称。 + +2. 点击**更多操作**,在下拉菜单中选择**编辑外部访问**。 + +3. 将**访问模式**设置为 **NodePort** 并点击**确定**。有关更多信息,请参见[项目网关](../../../project-administration/project-gateway/)。 + +4. 您可以在**端口**区域查看暴露的端口。 + +5. 用 `:` 地址以及步骤 1 中记录的用户名和密码访问 RabbitMQ 的 **management** 端口。 + ![rabbitmq-dashboard](/images/docs/v3.3/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq-dashboard.png) + + ![rabbitma-dashboard-detail](/images/docs/v3.3/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitma-dashboard-detail.png) + + {{< notice note >}} + + 取决于您的 Kubernetes 集群的部署位置,您可能需要在安全组中放行端口并配置相关的端口转发规则。 + + {{}} + +6. 有关 RabbitMQ 的更多信息,请参考[ RabbitMQ 官方文档](https://www.rabbitmq.com/documentation.html)。 + diff --git a/content/zh/docs/v3.4/application-store/built-in-apps/radondb-mysql-app.md b/content/zh/docs/v3.4/application-store/built-in-apps/radondb-mysql-app.md new file mode 100644 index 000000000..59db7b705 --- /dev/null +++ b/content/zh/docs/v3.4/application-store/built-in-apps/radondb-mysql-app.md @@ -0,0 +1,51 @@ +--- +title: "在 KubeSphere 中部署 RadonDB MySQL" +keywords: 'KubeSphere, Kubernetes, 安装, RadonDB MySQL' +description: '了解如何从 KubeSphere 应用商店部署 RadonDB MySQL。' +linkTitle: "在 KubeSphere 中部署 RadonDB MySQL" +weight: 14293 +--- + +[RadonDB MySQL](https://github.com/radondb/radondb-mysql-kubernetes) 是基于 [MySQL](https://MySQL.org) 的开源、云原生、高可用集群解决方案。通过使用 Raft 协议,RadonDB MySQL 可以快速进行故障转移,且不会丢失任何事务。 + +本教程演示如何从 KubeSphere 应用商店部署 RadonDB MySQL。 + +{{< notice note >}} + +应用商店 RadonDB MySQL 的版本为 v1.0.0,已不再维护。 + +推荐您选用最新版 RadonDB MySQL,相关部署说明请参见[部署 RadonDB MySQL Operator 和集群](../../external-apps/deploy-radondb-mysql/)。 + +{{}} + +## 准备工作 + +- 请确保[已启用 OpenPitrix 系统](../../../pluggable-components/app-store/)。 +- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`) 供本教程操作使用。该帐户需要是平台普通用户,并邀请至项目中赋予 `operator` 角色作为项目操作员。本教程中,请以 `project-regular` 身份登录控制台,在企业空间 `demo-workspace` 中的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## 动手实验 + +### 步骤 1:从应用商店中部署 RadonDB MySQL + +1. 在 `demo-project` 项目的**概览**页面,点击左上角的**应用商店**。 + +2. 找到 RadonDB MySQL,点击**应用信息**页面上的**安装**。 + +3. 设置名称并选择应用版本。请确保将 RadonDB MySQL 部署在 `demo-project` 中,点击**下一步**。 + +4. 在**应用设置**页面,您可以使用默认配置,或者编辑 YAML 文件以自定义配置。点击**安装**继续。 + +5. 稍等片刻待 RadonDB MySQL 启动并运行。 + + +### 步骤 2:访问 RadonDB MySQL + +1. 进入**应用负载**下的**服务**页面,点击 RadonDB MySQL 服务名称。 + +2. 在**容器组**下,展开菜单查看容器详情,然后点击**终端**图标。 + +3. 在弹出窗口中,直接向终端输入命令使用该应用。 + + ![访问 RadonDB MySQL](/images/docs/v3.3/zh-cn/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-service-terminal.png) + +4. 如果您想从集群外部访问 RadonDB MySQL,详细信息请参见 [RadonDB MySQL 开源项目](https://github.com/radondb/radondb-mysql-kubernetes)。 diff --git a/content/zh/docs/v3.4/application-store/built-in-apps/radondb-postgresql-app.md b/content/zh/docs/v3.4/application-store/built-in-apps/radondb-postgresql-app.md new file mode 100644 index 000000000..8283e9d8f --- /dev/null +++ b/content/zh/docs/v3.4/application-store/built-in-apps/radondb-postgresql-app.md @@ -0,0 +1,60 @@ +--- +title: "在 KubeSphere 中部署 RadonDB PostgreSQL" +keywords: 'KubeSphere, Kubernetes, 安装, RadonDB PostgreSQL' +description: '了解如何从 KubeSphere 应用商店部署 RadonDB PostgreSQL。' +linkTitle: "在 KubeSphere 中部署 RadonDB PostgreSQL" +weight: 14294 +--- + +[RadonDB PostgreSQL](https://github.com/radondb/radondb-postgresql-kubernetes) 是基于 [PostgreSQL](https://postgresql.org) 的开源、云原生、高可用集群解决方案。 + +本教程演示如何从 KubeSphere 应用商店部署 RadonDB PostgreSQL。 + +## 准备工作 + +- 请确保[已启用 OpenPitrix 系统](../../../pluggable-components/app-store/)。 +- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`) 供本教程操作使用。该帐户需要是平台普通用户,并邀请至项目中赋予 `operator` 角色作为项目操作员。本教程中,以 `project-regular` 身份登录控制台,在企业空间 `demo-workspace` 中的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## 动手实验 + +### 步骤 1:从应用商店中部署 RadonDB PostgreSQL + +1. 在 `demo-project` 项目的**概览**页面,点击左上角的**应用商店**。 + +2. 找到 RadonDB PostgreSQL,点击**应用信息**页面上的**安装**。 + +3. 设置名称并选择应用版本。请确保将 RadonDB PostgreSQL 部署在 `demo-project` 中,点击**下一步**。 + +4. 在**应用设置**页面,您可以使用默认配置,或者编辑 YAML 文件以自定义配置。点击**安装**继续。 + +5. 稍等片刻待 RadonDB PostgreSQL 启动并运行。 + + +### 步骤 2:查看 PostgreSQL 集群状态 + +1. 在 `demo-project` 项目的**概览**页面,可查看当前项目资源使用情况。 + +2. 进入**应用负载**下的**工作负载**页面,点击**有状态副本集**,查看集群状态。 + + 进入一个有状态副本集群详情页面,点击**监控**标签页,可查看一定时间范围内的集群指标。 + +3. 进入**应用负载**下的**容器组**页面,可查看所有状态的容器。 + +4. 进入**存储**下的**持久卷声明**页面,可查看持久卷声明,所有组件均使用了持久化存储。 + + 查看某个持久卷声明用量信息,以其中一个数据节点为例,可以看到当前存储的存储容量和剩余容量等监控数据。 + + +### 步骤 3:访问 RadonDB PostgreSQL + +1. 在 **应用负载**下的**容器组**页面,点击一个容器的名称,进入容器详情页面。 + +2. 在**资源状态**页面,点击**终端**图标。 + +3. 在弹出窗口中,向终端输入命令使用该应用。 + + ```bash + psql -h -p 5432 -U postgres -d postgres + ``` + +4. 如果您想从集群外部访问 RadonDB PostgreSQL,详细信息请参见 [RadonDB PostgreSQL 开源项目](https://github.com/radondb/radondb-postgresql-kubernetes)。 diff --git a/content/zh/docs/v3.4/application-store/built-in-apps/redis-app.md b/content/zh/docs/v3.4/application-store/built-in-apps/redis-app.md new file mode 100644 index 000000000..6d267feec --- /dev/null +++ b/content/zh/docs/v3.4/application-store/built-in-apps/redis-app.md @@ -0,0 +1,47 @@ +--- +title: "在 KubeSphere 中部署 Redis" +keywords: 'KubeSphere, Kubernetes, 安装, Redis' +description: '了解如何从 KubeSphere 应用商店中部署 Redis 并访问服务。' +linkTitle: "在 KubeSphere 中部署 Redis" +weight: 14291 +--- + +[Redis](https://redis.io/) 是一个开源的(遵循 BSD 协议)、内存中的 (in-memory) 数据结构存储库,用作数据库、缓存和消息代理。 + +本教程演示如何从 KubeSphere 应用商店部署 Redis。 + +## 准备工作 + +- 请确保[已启用 OpenPitrix 系统](../../../pluggable-components/app-store/)。 +- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`) 供本教程操作使用。该帐户需要是平台普通用户,并邀请至项目中赋予 `operator` 角色作为项目操作员。本教程中,请以 `project-regular` 身份登录控制台,在企业空间 `demo-workspace` 中的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## 动手实验 + +### 步骤 1:从应用商店中部署 Redis + +1. 在 `demo-project` 项目的**概览**页面,点击左上角的**应用商店**。 + +2. 找到 Redis,点击**应用信息**页面上的**安装**。 + +3. 设置名称并选择应用版本。请确保将 Redis 部署在 `demo-project` 中,点击**下一步**。 + +4. 在**应用设置**页面,为应用指定持久化存储卷和密码。操作完成后,点击**安装**。 + + {{< notice note >}} + + 要为 Redis 指定更多值,请打开右上角的拨动开关查看 YAML 格式的应用清单文件,编辑其配置。 + + {{}} + +5. 稍等片刻待 Redis 启动并运行。 + + +### 步骤 2:访问 Redis 终端 + +1. 转到**服务**页面,点击 Redis 的服务名称。 + +2. 在**容器组**中展开菜单查看容器详情,随后点击**终端**图标。 + +3. 在弹出窗口的终端中运行 `redis-cli` 命令来使用该应用。 + +4. 有关更多信息,请参见 [Redis 官方文档](https://redis.io/documentation)。 diff --git a/content/zh/docs/v3.4/application-store/built-in-apps/tomcat-app.md b/content/zh/docs/v3.4/application-store/built-in-apps/tomcat-app.md new file mode 100644 index 000000000..72b3abddf --- /dev/null +++ b/content/zh/docs/v3.4/application-store/built-in-apps/tomcat-app.md @@ -0,0 +1,65 @@ +--- +title: "在 KubeSphere 中部署 Tomcat" +keywords: 'KubeSphere, Kubernetes, 安装, Tomcat' +description: '了解如何从 KubeSphere 应用商店中部署 Tomcat 并访问服务。' + +link title: "在 KubeSphere 中部署 Tomcat" +weight: 14292 +--- +[Apache Tomcat](https://tomcat.apache.org/index.html) 支撑着诸多行业和组织中的众多大规模任务关键型 Web 应用。它提供了一个纯 Java HTTP Web 服务器环境,可用于执行 Java 代码。 + +本教程演示如何从 KubeSphere 的应用商店部署 Tomcat。 + +## 准备工作 + +- 您需要[启用 OpenPitrix 系统](../../../pluggable-components/app-store/)。 +- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`)。该用户必须是已邀请至项目的平台普通用户,并且在项目中的角色为 `operator`。在本教程中,您需要以 `project-regular` 用户登录,并在 `demo-workspace` 企业空间的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## 动手实验 + +### 步骤 1:从应用商店部署 Tomcat + +1. 在 `demo-project` 的**概览**页面,点击左上角的**应用商店**。 + +2. 找到 Tomcat,在**应用信息**页面点击**安装**。 + +3. 设置应用名称和版本,确保 Tomcat 部署在 `demo-project` 项目中,然后点击**下一步**。 + +4. 在**应用设置**页面,您可以直接使用默认配置,也可以通过编辑 YAML 文件自定义配置。设置完成后点击**安装**。 + +5. 等待 Tomcat 创建完成并开始运行。 + + +### 步骤 2:访问 Tomcat 终端 + +1. 打开**服务**页面并点击 Tomcat 的服务名称。 + +2. 在**容器组**区域,展开容器详情,点击终端图标。 + +3. 在 `/usr/local/tomcat/webapps` 目录下查看部署的项目。 + + +### 步骤 3:用浏览器访问 Tomcat 项目 + +要从集群外访问 Tomcat 项目,您需要先用 NodePort 暴露该应用。 + +1. 打开**服务**页面并点击 Tomcat 的服务名称。 + +2. 点击**更多操作**,在下拉菜单中选择**编辑外部访问**。 + +3. 将**访问模式**设置为 **NodePort** 并点击**确定**。有关更多信息,请参见[项目网关](../../../project-administration/project-gateway/)。 + +4. 您可以在**端口**区域查看暴露的端口。 + +5. 在浏览器中用 `:/sample` 地址访问 Tomcat 示例项目。 + + ![access-tomcat-browser](/images/docs/v3.3/zh-cn/appstore/built-in-apps/tomcat-app/access-tomcat-browser.png) + + {{< notice note >}} + + 取决于您的 Kubernetes 集群的部署位置,您可能需要在安全组中放行端口并配置相关的端口转发规则。 + + {{}} + +6. 有关 Tomcat 的更多信息,请参考[ Tomcat 官方文档](https://tomcat.apache.org/index.html)。 + diff --git a/content/zh/docs/v3.4/application-store/external-apps/_index.md b/content/zh/docs/v3.4/application-store/external-apps/_index.md new file mode 100644 index 000000000..e4fcc31f1 --- /dev/null +++ b/content/zh/docs/v3.4/application-store/external-apps/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "外部应用" +weight: 14300 + +_build: + render: false +--- diff --git a/content/zh/docs/v3.4/application-store/external-apps/deploy-clickhouse.md b/content/zh/docs/v3.4/application-store/external-apps/deploy-clickhouse.md new file mode 100644 index 000000000..b6d526235 --- /dev/null +++ b/content/zh/docs/v3.4/application-store/external-apps/deploy-clickhouse.md @@ -0,0 +1,150 @@ +--- +title: "在 KubeSphere 中部署 ClickHouse 集群" +keywords: 'KubeSphere, Kubernetes, 安装, ClickHouse' +description: '了解如何从 KubeSphere 应用商店部署 ClickHouse。' +linkTitle: "部署 RadonDB ClickHouse 集群" +weight: 14340 +--- + +[ClickHouse](https://clickhouse.tech/) 是一款用于联机分析 (OLAP) 的列式数据库管理系统 (DBMS)。[RadonDB ClickHouse](https://github.com/radondb/radondb-clickhouse-kubernetes) 是一款深度定制的 ClickHouse 集群应用,完美保持了 ClickHouse 集群功能特性,并具备集群自动管理、集群数据重分布、高性能低成本等优势功能特性。 + +本教程演示了如何在 KubeSphere 上部署 ClickHouse Operator 和 ClickHouse 集群。 + +## 准备工作 + +- 请确保[已启用 OpenPitrix 系统](../../../pluggable-components/app-store/)。 +- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`) 供本教程操作使用。该帐户需要是平台普通用户,并邀请至项目中赋予 `operator` 角色作为项目操作员。本教程中,请以 `project-regular` 身份登录控制台,在企业空间 `demo-workspace` 中的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 +- 请确保 KubeSphere 项目网关已开启外网访问。有关更多信息,请参见[项目网关](../../../project-administration/project-gateway/)。 + +## 动手实验 + +### 步骤 1:部署 ClickHouse Operator + +1. 以 `admin` 身份登录 KubeSphere 的 Web 控制台,并使用**工具箱**中的 **Kubectl** 执行以下命令来安装 ClickHouse Operator。建议至少准备 2 个可用集群节点。 + + ```bash + $ kubectl apply -f https://raw.githubusercontent.com/radondb/radondb-clickhouse-kubernetes/main/clickhouse-operator-install.yml + ``` + + {{< notice note >}} + + ClickHouse Operator 将会被安装在 `kube-system` 命名空间下,因此一个 Kubernetes 集群只需要安装一次 ClickHouse Operator。 + + {{}} + + **预期结果** + + ```powershell + $ kubectl apply -f https://raw.githubusercontent.com/radondb/radondb-clickhouse-kubernetes/main/clickhouse-operator-install.yml + customresourcedefinition.apiextensions.k8s.io/clickhouseinstallations.clickhouse.radondb.com created + customresourcedefinition.apiextensions.k8s.io/clickhouseinstallationtemplates.clickhouse.radondb.com created + customresourcedefinition.apiextensions.k8s.io/clickhouseoperatorconfigurations.clickhouse.radondb.com created + serviceaccount/clickhouse-operator created + clusterrole.rbac.authorization.k8s.io/clickhouse-operator-kube-system created + clusterrolebinding.rbac.authorization.k8s.io/clickhouse-operator-kube-system created + configmap/etc-clickhouse-operator-files created + configmap/etc-clickhouse-operator-confd-files created + configmap/etc-clickhouse-operator-configd-files created + configmap/etc-clickhouse-operator-templatesd-files created + configmap/etc-clickhouse-operator-usersd-files created + deployment.apps/clickhouse-operator created + service/clickhouse-operator-metrics created + ``` + +2. 执行如下命令可查看 ClickHouse Operator 资源状态。 + + ```bash + $ kubectl get all --selector=app=clickhouse-operator -n kube-system + ``` + **预期结果** + ``` + NAME READY STATUS RESTARTS AGE + pod/clickhouse-operator-644fcb8759-9tfcx 2/2 Running 0 4m32s + + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + service/clickhouse-operator-metrics ClusterIP 10.96.72.49 8888/TCP 4m32s + + NAME READY UP-TO-DATE AVAILABLE AGE + deployment.apps/clickhouse-operator 1/1 1 1 4m32s + + NAME DESIRED CURRENT READY AGE + replicaset.apps/clickhouse-operator-644fcb8759 1 1 1 4m32s + + ``` + +### 步骤 2:添加应用仓库 + +1. 以 `ws-admin` 身份登录 KubeSphere 的 Web 控制台。在企业空间中,进入**应用管理**下的**应用仓库**页面,点击**添加**。 + +2. 在出现的对话框中,输入 `clickhouse` 作为应用仓库名称,输入 `https://radondb.github.io/radondb-clickhouse-kubernetes/` 作为仓库的 URL。点击**验证**以验证 URL。在 URL 旁边呈现一个绿色的对号,验证通过后,点击**确定**继续。 + +3. 将仓库成功导入到 KubeSphere 之后,在列表中可查看 ClickHouse 仓库。 + + +### 步骤 3:部署 ClickHouse 集群 + +1. 以 `project-regular` 身份登录 KubeSphere 的 Web 控制台。在 `demo-project` 项目中,进入**应用负载**下的**应用**页面,点击**创建**。 + +2. 在对话框中,选择**从应用模板**。 + +3. 从下拉菜单中选择 `clickhouse` 应用仓库 ,然后点击 **clickhouse-cluster**。 + +4. 在**Chart 文件**选项卡,可以直接通过控制台查看配置信息,也可以通过下载默认 `values.yaml` 文件查看。在**版本**列框下,选择一个版本号,点击**安装**以继续。 + +5. 在**基本信息**页面,确认应用名称、应用版本以及部署位置。点击**下一步**以继续。 + +6. 在**应用设置**页面,可以编辑 `values.yaml` 文件,也可以直接点击**安装**使用默认配置。 + +7. 等待 ClickHouse 集群正常运行。可在**工作负载**下的**应用**页面,查看部署的应用。 + + +### 步骤 4:查看 ClickHouse 集群状态 + +1. 以 `project-regular` 身份登录 KubeSphere 的 Web 控制台。 + +2. 进入**应用负载**下的**工作负载**页面,点击**有状态副本集**,查看集群状态。 + + 进入一个有状态副本集群详情页面,点击**监控**标签页,可查看一定时间范围内的集群指标。 + +3. 进入**应用负载**下的**容器组**页面,可查看所有状态的容器。 + +4. 进入**存储**下的**持久卷声明**页面,可查看持久卷声明,所有组件均使用了持久化存储。 + + 查看某个持久卷声明用量信息,以其中一个数据节点为例,可以看到当前存储的存储容量和剩余容量等监控数据。 + +5. 在项目**概览**页面,可查看当前项目资源使用情况。 + + +### 步骤 5:访问 ClickHouse 集群 + +1. 以 `admin` 身份登录 KubeSphere 的 Web 控制台,将鼠标悬停在右下角的锤子图标上,选择 **Kubectl**。 + +2. 打开终端窗口,执行如下命令,并输入 ClickHouse 集群用户名和密码。 + + ```bash + $ kubectl edit chi -n + ``` + + {{< notice note >}} + + 以下命令示例中 **app name** 为 `clickhouse-app` ,**project name** 为 `demo-project`。 + + {{}} + + ![get-username-password](/images/docs/v3.3/zh-cn/appstore/external-apps/deploy-clickhouse/get-username-password.png) + +3. 执行如下命令,访问 ClickHouse 集群,并可通过 `show databases` 命令查看数据库。 + + ```bash + $ kubectl exec -it -n -- clickhouse-client --user= --password= + ``` + + {{< notice note >}} + + - 以下命令示例中 **pod name** 为 `chi-clickhouse-app-all-nodes-0-1-0` ,**project name** 为 `demo-project`,**user name** 为 `clickhouse`,**password** 为 `clickh0use0perator`。 + + - 可在**应用负载**的**容器组**下获取 **pod name**。 + + {{}} + + ![use-clickhouse](/images/docs/v3.3/zh-cn/appstore/external-apps/deploy-clickhouse/use-clickhouse.png) diff --git a/content/zh/docs/v3.4/application-store/external-apps/deploy-gitlab.md b/content/zh/docs/v3.4/application-store/external-apps/deploy-gitlab.md new file mode 100644 index 000000000..8218aeb18 --- /dev/null +++ b/content/zh/docs/v3.4/application-store/external-apps/deploy-gitlab.md @@ -0,0 +1,122 @@ +--- +title: "在 KubeSphere 上部署 GitLab" +keywords: 'KubeSphere, Kubernetes, GitLab, 应用商店' +description: '了解如何在 KubeSphere 中部署 GitLab 并访问服务。' +linkTitle: "在 KubeSphere 上部署 GitLab" +weight: 14310 +--- + +[GitLab](https://about.gitlab.com/) 是一个端到端的开源软件开发平台,具有内置的版本控制、问题追踪、代码审查、CI/CD 等功能。 + +本教程演示了如何在 KubeSphere 上部署 GitLab。 + +## 准备工作 + +- 您需要启用 [OpenPitrix 系统](../../../pluggable-components/app-store/)。 +- 您需要为本教程创建一个企业空间、一个项目以及两个帐户(`ws-admin` 和 `project-regular`)。在企业空间中,`ws-admin` 帐户必须被赋予 `workspace-admin` 角色,`project-regular` 帐户必须被赋予 `operator` 角色。如果还未创建好,请参考[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## 动手实验 + +### 步骤 1:添加应用仓库 + +1. 以 `ws-admin` 身份登录 KubeSphere。在企业空间中,访问**应用管理**下的**应用仓库**,然后点击**添加**。 + +2. 在出现的对话框中,输入 `main` 作为应用仓库名称,输入 `https://charts.kubesphere.io/main` 作为应用仓库 URL。点击**验证**来验证 URL,如果可用,则会在 URL 右侧看到一个绿色的对号。点击**确定**继续操作。 + +3. 仓库成功导入到 KubeSphere 后,会显示在列表里。 + + +### 步骤 2:部署 GitLab + +1. 登出 KubeSphere,再以 `project-regular` 登录。在您的项目中,访问**应用负载**下的**应用**,然后点击**创建**。 + +2. 在出现的对话框中,选择**从应用模板**。 + +3. 从下拉菜单中选择 `main`,然后点击 **gitlab**。 + +4. 在**应用信息**选项卡和**Chart 文件**选项卡,可以看到控制台的默认配置。点击**安装**继续。 + +5. 在**基本信息**页面,可以看到应用名称、应用版本以及部署位置。本教程使用 `4.2.3 [13.2.2]` 版本。点击**下一步**继续。 + +6. 在**应用设置**页面,使用以下配置替换默认配置,然后点击**安装**。 + + ```yaml + global: + hosts: + domain: demo-project.svc.cluster.local + gitlab-runner: + install: false + gitlab: + webservice: + helmTests: + enabled: false + ``` + + {{< notice note >}} + + `demo-project` 指的是部署 GitLab 的项目名称,请确保使用您自己的项目名称。 + + {{}} + +7. 等待 GitLab 正常运行。 + +8. 访问**工作负载**,可以看到为 GitLab 创建的所有部署和有状态副本集。 + + {{< notice note >}} + + 可能需要过一段时间才能看到所有部署和有状态副本集正常运行。 + + {{}} + +### 步骤 3:获取 root 用户的密码 + +1. 选择**配置** > **保密字典**,在搜索栏输入 `gitlab-initial-root-password`,然后按下键盘上的**回车键**来搜索密钥。 + +2. 点击密钥访问其详情页,然后点击右上角的 icon 查看密码。请确保将密码进行复制。 + + +### 步骤 4:编辑 hosts 文件 + +1. 在本地机器上找到 hosts 文件。 + + {{< notice note >}} + + 对于 Linux,hosts 文件的路径是 `/etc/hosts`;对于 Windows,则是 `c:\windows\system32\drivers\etc\hosts`。 + + {{}} + +2. 将以下条目添加进 hosts 文件中。 + + ``` + 192.168.4.3 gitlab.demo-project.svc.cluster.local + ``` + + {{< notice note >}} + + - `192.168.4.3` 和 `demo-project` 分别指的是部署 GitLab 的 NodeIP 和项目名称,请确保使用自己的 NodeIP 和项目名称。 + - 您可以使用自己 Kubernetes 集群中任意节点的 IP 地址。 + + {{}} + +### 步骤 5:访问 GitLab + +1. 访问**应用负载**下的**服务**,在搜索栏输入 `nginx-ingress-controller`,然后按下键盘上的**回车键**搜索该服务,可以看到通过端口 `31246` 暴露的服务,您可以使用该端口访问 GitLab。 + + {{< notice note >}} + + 在不同控制台上显示的端口号可能不同,请您确保使用自己的端口号。 + + {{}} + +2. 通过 `http://gitlab.demo-project.svc.cluster.local:31246` 使用 root 帐户及其初始密码 (`root/ojPWrWECLWN0XFJkGs7aAqtitGMJlVfS0fLEDE03P9S0ji34XDoWmxs2MzgZRRWF`) 访问 GitLab。 + + ![access-gitlab](/images/docs/v3.3/zh-cn/appstore/external-apps/deploy-gitlab/access_gitlab.png) + + ![gitlab-console](/images/docs/v3.3/zh-cn/appstore/external-apps/deploy-gitlab/gitlab_console.png) + + {{< notice note >}} + + 根据您 Kubernetes 集群部署位置的不同,您可能需要在安全组中打开端口,并配置相关的端口转发规则。 + + {{}} + diff --git a/content/zh/docs/v3.4/application-store/external-apps/deploy-metersphere.md b/content/zh/docs/v3.4/application-store/external-apps/deploy-metersphere.md new file mode 100644 index 000000000..bd7d91b0d --- /dev/null +++ b/content/zh/docs/v3.4/application-store/external-apps/deploy-metersphere.md @@ -0,0 +1,65 @@ +--- +title: "在 KubeSphere 上部署 MeterSphere" +keywords: 'KubeSphere, Kubernetes, 应用程序, MeterSphere' +description: '了解如何在 KubeSphere 中部署 MeterSphere。' +linkTitle: "在 KubeSphere 上部署 MeterSphere" +weight: 14330 +--- + +MeterSphere 是一站式的开源企业级连续测试平台,涵盖测试跟踪、界面测试和性能测试等功能。 + +本教程演示了如何在 KubeSphere 上部署 MeterSphere。 + +## 准备工作 + +- 您需要启用 [OpenPitrix 系统](../../../pluggable-components/app-store/)。 +- 您需要为本教程创建一个企业空间、一个项目以及两个帐户(`ws-admin` 和 `project-regular`)。在企业空间中,`ws-admin` 帐户必须被赋予 `workspace-admin` 角色,`project-regular` 帐户必须被赋予 `operator` 角色。如果还未创建好,请参考[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## **动手实验** + +### 步骤 1:添加应用仓库 + +1. 以 `ws-admin` 身份登录 KubeSphere。在企业空间中,访问**应用管理**下的**应用仓库**,然后点击**添加**。 + +2. 在出现的对话框中,输入 `metersphere` 作为应用仓库名称,输入 `https://charts.kubesphere.io/test` 作为应用仓库 URL。点击**验证**来验证 URL,如果可用,则会在 URL 右侧看到一个绿色的对号。点击**确定**继续操作。 + +3. 仓库成功导入到 KubeSphere 后,会显示在列表里。 + + +### 步骤 2:部署 MeterSphere + +1. 登出 KubeSphere,再以 `project-regular` 登录。在您的项目中,访问**应用负载**下的**应用**,然后点击**创建**。 + +2. 在出现的对话框中,选择**从应用模板**。 + +3. 从下拉菜单中选择 `metersphere`,然后点击 **metersphere-chart**。 + +4. 在**应用信息**选项卡和**Chart 文件**选项卡,可以看到控制台的默认配置。点击**安装**继续。 + +5. 在**基本信息**页面,可以看到应用名称、应用版本以及部署位置。点击**下一步**继续。 + +6. 在**应用设置**页面,将 `imageTag` 的值从 `master` 改为 `v1.6`,然后点击**安装**。 + +7. 等待 MeterSphere 应用正常运行。 + +8. 访问**工作负载**,可以看到为 MeterSphere 创建的所有部署和有状态副本集。 + + {{< notice note >}} + + 可能需要过一段时间才能看到所有部署和有状态副本集正常运行。 + + {{}} + +### 步骤 3:访问 MeterSphere + +1. 问**应用负载**下的**服务**,可以看到 MeterSphere 服务,其服务类型默认设置为 `NodePort`。 + +2. 您可以通过 `:` 使用默认帐户及密码 (`admin/metersphere`) 访问 MeterSphere。 + + ![login-metersphere](/images/docs/v3.3/zh-cn/appstore/external-apps/deploy-metersphere/login-metersphere.png) + + {{< notice note >}} + + 根据您 Kubernetes 集群部署位置的不同,您可能需要在安全组中打开端口,并配置相关的端口转发规则。请确保使用自己的 `NodeIP`。 + + {{}} \ No newline at end of file diff --git a/content/zh/docs/v3.4/application-store/external-apps/deploy-radondb-mysql.md b/content/zh/docs/v3.4/application-store/external-apps/deploy-radondb-mysql.md new file mode 100644 index 000000000..301a114b0 --- /dev/null +++ b/content/zh/docs/v3.4/application-store/external-apps/deploy-radondb-mysql.md @@ -0,0 +1,167 @@ +--- +title: "在 KubeSphere 中部署 RadonDB MySQL Operator 和 RadonDB MySQL 集群" +keywords: 'KubeSphere, Kubernetes, 安装, RadonDB MySQL' +description: '了解如何从 KubeSphere 应用商店部署 RadonDB MySQL。' +linkTitle: "部署 RadonDB MySQL Operator 和集群" +weight: 14350 +--- + +[RadonDB MySQL](https://github.com/radondb/radondb-mysql-kubernetes) 是基于 [MySQL](https://MySQL.org) 的开源、云原生、高可用集群解决方案。通过使用 Raft 协议,RadonDB MySQL 可以快速进行故障转移,且不会丢失任何事务。 + +本教程演示了如何在 KubeSphere 上部署 RadonDB MySQL Operator 和 RadonDB MySQL 集群。 + +## 准备工作 + +- 请确保[已启用 OpenPitrix 系统](../../../pluggable-components/app-store/)。 +- 您需要创建一个企业空间、一个项目和一个用户供本教程操作使用。本教程中,以 `admin` 身份在企业空间 `demo` 中的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 +- 请确保 KubeSphere 项目网关已开启外网访问。有关更多信息,请参见[项目网关](../../../project-administration/project-gateway/)。 + +## 动手实验 + +### 步骤 1:添加应用仓库 + +1. 登录 KubeSphere 的 Web 控制台。 + +2. 在 `demo` 企业空间中,进入**应用管理**下的**应用仓库**页面,点击**添加**,弹出仓库配置对话框。 + +3. 输入仓库名称和仓库 URL。 + + 输入 `radondb-mysql-operator` 作为应用仓库名称。 + 输入 `https://radondb.github.io/radondb-mysql-kubernetes/` 作为仓库的 URL,并点击**验证**以验证 URL。 + +4. 在 URL 旁边呈现一个绿色的对号,验证通过后,点击**确定**继续。 + + 将仓库成功导入到 KubeSphere 之后,在列表中即可查看 RadonDB MySQL 仓库。 + +![certify URL](/images/docs/v3.3/zh-cn/appstore/external-apps/deploy-radondb-mysql/certify_url.png) + +### 步骤 2:部署 RadonDB MySQL Operator + +1. 在 `demo-project` 项目中,进入**应用负载**下的**应用**页面,点击**创建**。 + +2. 在对话框中,选择**从应用模板**,进入应用模板页面。 + +3. 从下拉菜单中选择 `radondb-mysql-operator` 应用仓库。 + +4. 点击 `mysql-operator` 应用,查看和配置 RadonDB MySQL Operator 应用信息。 + + 在**配置文件**选项卡,可查看和编辑 `.yaml` 配置文件。 + 在**版本**列框区域,可查看和选择版本号。 + + ![operator 配置文件](/images/docs/v3.3/zh-cn/appstore/external-apps/deploy-radondb-mysql/operator_yaml.png) + +5. 点击**部署**,进入 `mysql-operator` 应用基本信息配置页面。 + + 确认应用名称、应用版本,以及配置部署位置。 + +6. 点击**下一步**,进入 `mysql-operator` 应用配置页面。 + + 确认 `values.yaml` 配置信息,并可编辑文件修改配置。 + +7. 点击**部署**,返回**应用模版**页面。 + + 待应用状态切换为`运行中`,则应用部署成功。 + +### 步骤 3:部署 RadonDB MySQL 集群 + +您可以任选一个 [RadonDB MySQL 配置示例](https://github.com/radondb/radondb-mysql-kubernetes/tree/main/config/samples) 部署,或自定义配置部署。 + +以 `mysql_v1alpha1_mysqlcluster.yaml` 模板为例,创建一个 RadonDB MySQL 集群。 + +1. 在右下角**工具箱**中选择 **Kubectl** 工具,打开终端窗口。 + +2. 执行以下命令,安装 RadonDB MySQL 集群。 + + ```kubectl + kubectl apply -f https://github.com/radondb/radondb-mysql-kubernetes/releases/latest/download/mysql_v1alpha1_mysqlcluster.yaml --namespace= + ``` + + {{< notice note >}} + + 未指定项目时,集群将被默认安装在 `kubesphere-controls-system` 项目中。若需指定项目,安装命令需添加 `--namespace=`。 + + {{}} + + **预期结果** + + ```powershell + $ kubectl apply -f https://github.com/radondb/radondb-mysql-kubernetes/releases/latest/download/mysql_v1alpha1_mysqlcluster.yaml --namespace=demo-project + mysqlcluster.mysql.radondb.com/sample created + ``` + +3. 集群创建成果后,执行如下命令,可查看 RadonDB MySQL 集群节点服务。 + + ```kubectl + kubectl get statefulset,svc + ``` + + **预期结果** + + ```powershell + $ kubectl get statefulset,svc + NAME READY AGE + statefulset.apps/sample-mysql 3/3 10m + + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + service/default-http-backend ClusterIP 10.96.69.202 80/TCP 3h2m + service/sample-follower ClusterIP 10.96.9.162 3306/TCP 10m + service/sample-leader ClusterIP 10.96.255.188 3306/TCP 10m + service/sample-mysql ClusterIP None 3306/TCP 10m + ``` + +### 步骤 4:查看 RadonDB MySQL 集群状态 + +在 `demo-project` 项目中,查看集群状态。 + +1. 进入**应用负载**下的**服务**页面,可查看集群服务状态。 + +2. 进入**应用负载**下的**工作负载**页面,点击**有状态副本集**,可查看集群状态。 + + 进入一个有状态副本集群详情页面,点击**监控**标签页,可查看一定时间范围内的集群指标。 + +3. 进入**应用负载**下的**容器组**页面,可查看集群节点运行状态。 + +4. 进入**存储**下的**持久卷声明**页面,可查看持久卷声明,所有组件均使用了持久化存储。 + + 查看某个持久卷声明用量信息,以其中一个数据节点为例,可以看到当前存储的存储容量和剩余容量等监控数据。 + +### 步骤 5:访问 RadonDB MySQL + +以下演示在 KubeSphere 访问 RadonDB MySQL 的方式,若需从集群外部访问 RadonDB MySQL,请参考 [RadonDB MySQL 开源项目](https://github.com/radondb/radondb-mysql-kubernetes/)。 + +**方式一:** + +进入 `demo-project` 项目管理页面,通过容器组终端访问 RadonDB MySQL。 + +1. 进入**应用负载**下的**容器组**页面。 + +2. 在**容器组**下,点击集群其中一个容器组名称,进入容器组详情页面。 + +3. 在**资源状态**中**容器**列框下,点击 **mysql** 容器的**终端**图标。 + +4. 在终端窗口中,输入命令连接集群。 + +![访问 RadonDB MySQL](/images/docs/v3.3/zh-cn/appstore/external-apps/deploy-radondb-mysql/pod_terminal.png) + +**方式二:** + +在右下角**工具箱**中选择 **Kubectl** 工具,通过 Kubectl 工具访问 RadonDB MySQL。 + +执行如下命令连接集群,连接成功后即可使用 RadonDB MySQL 应用。 + +```kubectl +kubectl exec -it -c mysql -n -- mysql --user= --password= +``` + +{{< notice note >}} + +以下示例中相应参数取值如下: + +- **pod_name** 为 `sample-mysql-0` +- **project_name** 为 `demo-project` +- **user_name** 为 `radondb_usr` +- **user_password** 为 `RadonDB@123` + +{{}} + +![访问 RadonDB MySQL](/images/docs/v3.3/zh-cn/appstore/external-apps/deploy-radondb-mysql/kubectl_terminal.png) diff --git a/content/zh/docs/v3.4/application-store/external-apps/deploy-tidb.md b/content/zh/docs/v3.4/application-store/external-apps/deploy-tidb.md new file mode 100644 index 000000000..b07db8751 --- /dev/null +++ b/content/zh/docs/v3.4/application-store/external-apps/deploy-tidb.md @@ -0,0 +1,146 @@ +--- +title: "在 KubeSphere 中部署 TiDB Operator 和 TiDB 集群" +keywords: 'KubeSphere, Kubernetes, TiDB, TiDB Operator, TiDB 集群' +description: '了解如何在 KubeSphere 中部署 TiDB Operator 和 TiDB 集群。' +linkTitle: "部署 TiDB Operator 和 TiDB 集群" +weight: 14320 +--- + +[TiDB](https://en.pingcap.com/) 是一个支持混合事务和分析处理 (HTAP) 工作负载的云原生、开源 NewSQL 数据库,具有水平扩缩性、强一致性以及高可用性。 + +本教程演示了如何在 KubeSphere 上部署 TiDB Operator 和 TiDB 集群。 + +## **准备工作** + +- 您需要准备至少 3 个可调度的节点。 +- 您需要启用 [OpenPitrix 系统](../../../pluggable-components/app-store/)。 +- 您需要为本教程创建一个企业空间、一个项目和两个帐户(`ws-admin` 和 `project-regular`)。帐户 `ws-admin` 必须在企业空间中被赋予 `workspace-admin` 角色,帐户 `project-regular` 必须被邀请至项目中赋予 `operator` 角色。若还未创建好,请参考[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## **动手实验** + +### 步骤 1:安装 TiDB Operator CRD + +1. 以 `admin` 身份登录 KubeSphere 的 Web 控制台,使用右下角**工具箱**中的 **Kubectl** 执行以下命令来安装 TiDB Operator CRD: + + ```bash + kubectl apply -f https://raw.githubusercontent.com/pingcap/tidb-operator/v1.1.6/manifests/crd.yaml + ``` + +2. 预期输出如下所示: + + ```bash + customresourcedefinition.apiextensions.k8s.io/tidbclusters.pingcap.com created + customresourcedefinition.apiextensions.k8s.io/backups.pingcap.com created + customresourcedefinition.apiextensions.k8s.io/restores.pingcap.com created + customresourcedefinition.apiextensions.k8s.io/backupschedules.pingcap.com created + customresourcedefinition.apiextensions.k8s.io/tidbmonitors.pingcap.com created + customresourcedefinition.apiextensions.k8s.io/tidbinitializers.pingcap.com created + customresourcedefinition.apiextensions.k8s.io/tidbclusterautoscalers.pingcap.com created + ``` + +### 步骤 2:添加应用仓库 + +1. 登出 KubeSphere,再以 `ws-admin` 身份登录。在企业空间中,访问**应用管理**下的**应用仓库**,然后点击**添加**。 + +2. 在出现的对话框中,输入 `pingcap` 作为应用仓库名称,输入 `https://charts.pingcap.org` 作为 PingCAP Helm 仓库的 URL。点击**验证**以验证 URL,如果可用,您将会在 URL 旁边看到一个绿色的对号。点击**确定**以继续。 + +3. 将仓库成功导入到 KubeSphere 之后,它将显示在列表中。 + + +### 步骤 3:部署 TiDB Operator + +1. 登出 KubeSphere,再以 `project-regular` 身份登录。在您的项目中,访问**应用负载**下的**应用**,点击**创建**。 + +2. 在出现的对话框中,选择**从应用模板**。 + +3. 从下拉菜单中选择 `pingcap`,然后点击 **tidb-operator**。 + + {{< notice note >}} + + 本教程仅演示如何部署 TiDB Operator 和 TiDB 集群。您也可以按需部署其他工具。 + + {{}} + +4. 在**Chart 文件**选项卡,您可以直接从控制台查看配置,也可以通过点击右上角的图标以下载默认 `values.yaml` 文件。在**版本**下,从下拉菜单中选择一个版本号,点击**安装**。 + +5. 在**基本信息**页面,确认应用名称、应用版本以及部署位置。点击**下一步**以继续。 + +6. 在**应用设置**页面,您可以编辑 `values.yaml` 文件,也可以直接点击**安装**使用默认配置。 + +7. 等待 TiDB Operator 正常运行。 + +8. 访问**工作负载**,可以看到为 TiDB Operator 创建的两个部署。 + + +### 步骤 4:部署 TiDB 集群 + +部署 TiDB 集群的过程与部署 TiDB Operator 的过程相似。 + +1. 访问**应用负载**下的**应用**,再次点击**创建**,然后选择**从应用模板**。 + +2. 在 PingCAP 仓库中,点击 **tidb-cluster**。 + +3. 在**Chart 文件**选项卡,可以查看配置和下载 `values.yaml` 文件。点击**安装**以继续。 + +4. 在**基本信息**页面,确认应用名称、应用版本和部署位置。点击**下一步**以继续。 + +5. 一些 TiDB 组件需要[存储类](../../../cluster-administration/storageclass/)。您可以运行以下命令查看存储类型。 + + ``` + / # kubectl get sc + NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE + csi-high-capacity-legacy csi-qingcloud Delete Immediate true 71m + csi-high-perf csi-qingcloud Delete Immediate true 71m + csi-ssd-enterprise csi-qingcloud Delete Immediate true 71m + csi-standard (default) csi-qingcloud Delete Immediate true 71m + csi-super-high-perf csi-qingcloud Delete Immediate true 71m + ``` + +6. 在**应用设置**页面,将所有 `storageClassName` 字段的默认值从 `local-storage` 更改为您的存储类型名称。例如,您可以根据以上输出将这些默认值更改为 `csi-standard`。 + + {{< notice note >}} + + 此处仅更改字段 `storageClassName` 以提供外部持久化存储。若想在单个节点上部署每个 TiDB 组件(例如 [TiKV](https://docs.pingcap.com/tidb/dev/tidb-architecture#tikv-server) 和 [Placement Driver](https://docs.pingcap.com/tidb/dev/tidb-architecture#placement-driver-pd-server)),请指定 `nodeAffinity` 字段。 + + {{}} + +7. 点击**安装**,然后就可以在列表中看到安装的应用。 + + +### 步骤 5:查看 TiDB 集群状态 + +1. 访问**应用负载**下的**工作负载**,确认所有的 TiDB 集群部署都在正常运行。 + +2. 切换到**有状态副本集**选项卡,可以看到 TiDB、TiKV 和 PD 均正常运行。 + + {{< notice note >}} + + TiKV 和 TiDB 会自动创建,可能要过一段时间才能在列表中显示。 + + {{}} + +3. 点击单个有状态副本集以访问其详情页。在**监控**选项卡下,可以看到一段时间内以折线图显示的指标。 + +4. 在**应用负载**下的**容器组**中,可以看到 TiDB 集群包含两个 TiDB Pod、三个 TiKV Pod 和三个 PD Pod。 + +5. 在**存储**下的**持久卷声明**中,可以看到 TiKV 和 PD 都在使用持久卷。 + +6. 同时,也会监控持久卷声明的使用情况。点击一个持久卷声明以访问其详情页。 + +7. 在项目的**概览**页面,可以看到当前项目的资源使用情况列表。 + + +### 步骤 6: 访问 TiDB 集群 + +1. 访问**应用负载**下的**服务**,可以看到所有服务的详细信息。由于服务类型默认设置为 `NodePort`,因此您可以通过集群外部的 Node IP 地址进行访问。 + +3. TiDB 集成了 Prometheus 和 Grafana 以监控数据库集群的性能。例如,您可以通过 `:` 访问 Grafana 以查看指标。 + + ![tidb-grafana](/images/docs/v3.3/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-grafana.png) + + {{< notice note >}} + + 根据 Kubernetes 集群部署位置的不同,您可能需要在安全组中打开端口,并配置相关的端口转发规则。 + + {{}} + diff --git a/content/zh/docs/v3.4/cluster-administration/_index.md b/content/zh/docs/v3.4/cluster-administration/_index.md new file mode 100644 index 000000000..b6240e6d9 --- /dev/null +++ b/content/zh/docs/v3.4/cluster-administration/_index.md @@ -0,0 +1,20 @@ +--- +title: "集群管理" +description: "理解管理集群的基础知识" +layout: "second" + +linkTitle: "集群管理" + +weight: 8000 + +icon: "/images/docs/v3.3/docs.svg" + +--- + +在 KubeSphere 中,您可以使用交互式 Web 控制台或内置的原生命令行工具 `kubectl` 来设置集群并配置其功能。作为集群管理员,您将负责一系列任务,包括在节点上管理调度并添加标签、控制集群可见性、​​监控集群状态、设置集群的告警规则和通知规则,以及配置存储和日志收集解决方案等。 + +{{< notice note >}} + +本章未介绍多集群管理。有关此功能的更多信息,请参见[多集群管理](../multicluster-management/)。 + +{{}} \ No newline at end of file diff --git a/content/zh/docs/v3.4/cluster-administration/application-resources-monitoring.md b/content/zh/docs/v3.4/cluster-administration/application-resources-monitoring.md new file mode 100644 index 000000000..a8840c3e9 --- /dev/null +++ b/content/zh/docs/v3.4/cluster-administration/application-resources-monitoring.md @@ -0,0 +1,29 @@ +--- +title: "应用资源监控" +keywords: "Kubernetes, KubeSphere, 资源, 监控" +description: "监控集群中的应用资源,例如不同项目的部署数量和 CPU 使用情况。" +linkTitle: "应用资源监控" +weight: 8300 +--- + + +除了在物理资源级别监控数据外,集群管理员还需要密切跟踪整个平台上的应用资源,例如项目和 DevOps 项目的数量,以及特定类型的工作负载和服务的数量。**应用资源**提供了平台的资源使用情况和应用级趋势的汇总信息。 + +## 准备工作 + +您需要一个被授予**集群管理**权限的用户。例如,您可以直接用 `admin` 用户登录控制台,或创建一个具有**集群管理**权限的角色然后将此角色授予一个用户。 + +## 使用情况 + +1. 点击左上角的**平台管理**,然后选择**集群管理**。 + +2. 如果您已启用了[多集群功能](../../multicluster-management/)并已导入了成员集群,您可以选择一个集群以查看其应用程序资源。如果尚未启用该功能,请直接进行下一步。 + +3. 在左侧导航栏选择**监控告警**下的**应用资源**以查看应用资源概览,包括集群中所有资源使用情况的汇总信息。 + +4. **集群资源用量**和**应用资源用量**提供最近 7 天的监控数据,并支持自定义时间范围查询。 + +5. 点击特定资源以查看特定时间段内的使用详情和趋势,例如**集群资源用量**下的 **CPU**。在详情页面,您可以按项目查看特定的监控数据,以及自定义时间范围查看资源的确切使用情况。 + +## 用量排行 +**用量排行**支持按照资源使用情况对项目进行排序,帮助平台管理员了解当前集群中每个项目的资源使用情况,包括 **CPU 用量**、**内存用量**、**容器组数量**、**网络流出速率**和**网络流入速率**。您可以选择下拉列表中的任一指标对项目按升序或降序进行排序。此功能可以帮助您快速定位大量消耗 CPU 或内存资源的应用程序(容器组)。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/cluster-administration/cluster-settings/_index.md b/content/zh/docs/v3.4/cluster-administration/cluster-settings/_index.md new file mode 100644 index 000000000..374421aae --- /dev/null +++ b/content/zh/docs/v3.4/cluster-administration/cluster-settings/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "集群设置" +weight: 8600 + +_build: + render: false +--- diff --git a/content/zh/docs/v3.4/cluster-administration/cluster-settings/cluster-gateway.md b/content/zh/docs/v3.4/cluster-administration/cluster-settings/cluster-gateway.md new file mode 100644 index 000000000..9076f29b4 --- /dev/null +++ b/content/zh/docs/v3.4/cluster-administration/cluster-settings/cluster-gateway.md @@ -0,0 +1,84 @@ +--- +title: "集群网关" +keywords: 'KubeSphere, Kubernetes, 集群, 网关, NodePort, LoadBalancer' +description: '学习如何在 KubeSphere 中创建集群级别的网关。' +linkTitle: "集群网关" +weight: 8630 + +--- + +KubeSphere 3.3 提供集群级别的网关,使所有项目共用一个全局网关。本文档介绍如何在 KubeSphere 设置集群网关。 + +## 准备工作 + +您需要创建一个拥有 `platform-admin` 角色的用户,例如:`admin`。有关更多信息,请参见[创建企业空间、项目、用户和平台角色](../../../quick-start/create-workspace-and-project/). + +## 创建集群网关 + +1. 以 `admin` 身份登录 web 控制台,点击左上角的**平台管理**并选择**集群管理**。 + +2. 点击导航面板中**集群设置**下的**网关设置**,选择**集群网关**选项卡,并点击**启用网关**。 + +3. 在显示的对话框中,从以下的两个选项中选择网关的访问模式: + + - **NodePort**:通过网关使用对应节点端口来访问服务。NodePort 访问模式提供以下配置: + - **链路追踪**:打开**链路追踪**开关以启用 KubeSphere 的链路追踪功能。功能开启后,如应用路由不可访问,请检查是否为应用路由是否添加注解(`nginx.ingress.kubernetes.io/service-upstream: true`)。如注解没有添加,则添加注解至您的应用路由中。 + - **配置选项**:在集群网关中加入键值对。 + - **LoadBalancer**:通过网关使用单个 IP 地址访问服务。LoadBalancer 访问模式提供以下配置: + - **链路追踪**:打开**链路追踪**开关以启用 KubeSphere 的链路追踪功能。功能开启后,如应用路由不可访问,请检查是否为应用路由是否添加注解(`nginx.ingress.kubernetes.io/service-upstream: true`)。如注解没有添加,则添加注解至您的应用路由中。 + - **负载均衡器提供商**:从下拉列表中选择负载均衡器提供商。 + - **注解**:添加注解至集群网关。 + - **配置选项**: 添加键值对至集群网关。 + + {{< notice info >}} + + - 为了使用链路追踪功能,请在创建自制应用时打开**应用治理**。 + - 有关如何使用配置选项的更多信息,请参见 [Configuration options](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#configuration-options)。 + + {{}} + +4. 点击**确定**创建集群网关。 + +5. 在这个页面中会展示创建的集群网关和该网关的基本信息。 + + {{< notice note >}} + + 同时还创建了名为 kubesphere-router-kubesphere-system 的网关,作为集群中所有项目的全局网关。 + + {{}} + +6. 点击**管理**,从下拉菜单中选择一项操作: + + - **查看详情**:转至集群网关详情页面。 + - **编辑**:编辑集群网关配置。 + - **关闭**:关闭集群网关。 + +7. 创建集群网关后,有关如何创建应用路由的更多信息,请参见[应用路由](../../../project-user-guide/application-workloads/routes/#create-a-route)。 + +## 集群网关详情页面 + +1. 在**集群网关**选项卡下,点击集群网关右侧的**管理**,选择**查看详情**以打开其详情页面。 +2. 在详情页面,点击**编辑**以配置集群网关,或点击**关闭**以关闭网关。 +3. 点击**监控**选项卡,查看集群网关的监控指标。 +4. 点击**配置选项**选项卡以查看集群网关的配置选项。 +5. 点击**网关日志**选项卡以查看集群网关日志。 +6. 点击**资源状态**选项卡,以查看集群网关的负载状态。点击 iconicon 按钮,以增加或减少副本数量。 +7. 点击**元数据**选项卡,以查看集群网关的注解。 + +## 查看项目网关 + +在**网关设置**页面,点击**项目网关**选项卡,以查看项目网关。 + +点击项目网关右侧的 icon ,从下拉菜单中选择操作: + +- **编辑**:编辑项目网关的配置。 +- **关闭**:关闭项目网关。 + +{{< notice note >}} + +如果在创建集群网关之前存在项目网关,则项目网关地址可能会在集群网关地址和项目网关地址之间切换。建议您只使用集群网关或项目网关。 + +{{}} + +关于如何创建项目网关的更多信息,请参见[项目网关](../../../project-administration/project-gateway/)。 + diff --git a/content/zh/docs/v3.4/cluster-administration/cluster-settings/cluster-visibility-and-authorization.md b/content/zh/docs/v3.4/cluster-administration/cluster-settings/cluster-visibility-and-authorization.md new file mode 100644 index 000000000..0b5c6b61c --- /dev/null +++ b/content/zh/docs/v3.4/cluster-administration/cluster-settings/cluster-visibility-and-authorization.md @@ -0,0 +1,53 @@ +--- +title: "集群可见性和授权" +keywords: "集群可见性, 集群管理" +description: "了解如何设置集群可见性和授权。" +linkTitle: "集群可见性和授权" +weight: 8610 +--- + +在 KubeSphere 中,您可以通过授权将一个集群分配给多个企业空间,让企业空间资源都可以在该集群上运行。同时,一个企业空间也可以关联多个集群。拥有必要权限的企业空间用户可以使用分配给该企业空间的集群来创建多集群项目。 + +本指南演示如何设置集群可见性。 + +## 准备工作 +* 您需要启用[多集群功能](../../../multicluster-management/)。 +* 您需要有一个企业空间和一个拥有创建企业空间权限的帐户,例如 `ws-manager`。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## 设置集群可见性 + +### 在创建企业空间时选择可用集群 + +1. 使用拥有创建企业空间权限的用户登录 KubeSphere,例如 `ws-manager`。 + +2. 点击左上角的**平台管理**,选择**访问控制**。在左侧导航栏选择**企业空间**,然后点击**创建**。 + +3. 输入企业空间的基本信息,点击**下一步**。 + +4. 在**集群设置**页面,您可以看到可用的集群列表,选择要分配给企业空间的集群并点击**创建**。 + +5. 创建企业空间后,拥有必要权限的企业空间成员可以创建资源,在关联集群上运行。 + + {{< notice warning >}} + +尽量不要在主集群上创建资源,避免负载过高导致多集群稳定性下降。 + +{{}} + +### 在创建企业空间后设置集群可见性 + +创建企业空间后,您可以通过授权向该企业空间分配其他集群,或者将集群从企业空间中解绑。按照以下步骤调整集群可见性。 + +1. 使用拥有集群管理权限的帐户登录 KubeSphere,例如 `admin`。 + +2. 点击左上角的**平台管理**,选择**集群管理**。从列表中选择一个集群查看集群信息。 + +3. 在左侧导航栏找到**集群设置**,选择**集群可见性**。 + +4. 您可以看到已授权企业空间的列表,这意味着所有这些企业空间中的资源都能使用当前集群。 + +5. 点击**编辑可见性**设置集群可见性。您可以选择让新的企业空间使用该集群,或者将该集群从企业空间解绑。 + +### 将集群设置为公开集群 + +您可以打开**设置为公开集群**,以便平台用户访问该集群,并在该集群上创建和调度资源。 diff --git a/content/zh/docs/v3.4/cluster-administration/cluster-settings/log-collections/_index.md b/content/zh/docs/v3.4/cluster-administration/cluster-settings/log-collections/_index.md new file mode 100644 index 000000000..bce4fe493 --- /dev/null +++ b/content/zh/docs/v3.4/cluster-administration/cluster-settings/log-collections/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "日志接收器" +weight: 8620 + +_build: + render: false +--- \ No newline at end of file diff --git a/content/zh/docs/v3.4/cluster-administration/cluster-settings/log-collections/add-es-as-receiver.md b/content/zh/docs/v3.4/cluster-administration/cluster-settings/log-collections/add-es-as-receiver.md new file mode 100644 index 000000000..70a1807f8 --- /dev/null +++ b/content/zh/docs/v3.4/cluster-administration/cluster-settings/log-collections/add-es-as-receiver.md @@ -0,0 +1,34 @@ +--- +title: "添加 Elasticsearch 作为接收器" +keywords: 'Kubernetes, 日志, Elasticsearch, Pod, 容器, Fluentbit, 输出' +description: '了解如何添加 Elasticsearch 来接收容器日志、资源事件或审计日志。' +linkTitle: "添加 Elasticsearch 作为接收器" +weight: 8622 +--- +您可以在 KubeSphere 中使用 Elasticsearch、Kafka 和 Fluentd 日志接收器。本教程演示如何添加 Elasticsearch 接收器。 + +## 准备工作 + +- 您需要一个被授予**集群管理**权限的用户。例如,您可以直接用 `admin` 用户登录控制台,或创建一个具有**集群管理**权限的角色然后将此角色授予一个用户。 +- 添加日志接收器前,您需要启用组件 `logging`、`events` 或 `auditing`。有关更多信息,请参见[启用可插拔组件](../../../../pluggable-components/)。本教程启用 `logging` 作为示例。 + +## 添加 Elasticsearch 作为接收器 + +1. 以 `admin` 身份登录 KubeSphere 的 Web 控制台。点击左上角的**平台管理**,然后选择**集群管理**。 + + {{< notice note >}} + +如果您启用了[多集群功能](../../../../multicluster-management/),您可以选择一个集群。 + +{{}} + +2. 在左侧导航栏,选择**集群设置**下的**日志接收器**。 + +3. 点击**添加日志接收器**并选择 **Elasticsearch**。 + +4. 提供 Elasticsearch 服务地址和端口信息。 + +5. Elasticsearch 会显示在**日志接收器**页面的接收器列表中,状态为**收集中**。 + +6. 若要验证 Elasticsearch 是否从 Fluent Bit 接收日志,从右下角的**工具箱**中点击**日志查询**,在控制台中搜索日志。有关更多信息,请参阅[日志查询](../../../../toolbox/log-query/)。 + diff --git a/content/zh/docs/v3.4/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver.md b/content/zh/docs/v3.4/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver.md new file mode 100644 index 000000000..dc90d4e52 --- /dev/null +++ b/content/zh/docs/v3.4/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver.md @@ -0,0 +1,154 @@ +--- +title: "添加 Fluentd 作为接收器" +keywords: 'Kubernetes, 日志, Fluentd, 容器组, 容器, Fluentbit, 输出' +description: '了解如何添加 Fluentd 来接收容器日志、资源事件或审计日志。' +linkTitle: "添加 Fluentd 作为接收器" +weight: 8624 +--- +您可以在 KubeSphere 中使用 Elasticsearch、Kafka 和 Fluentd 日志接收器。本教程演示: + +- 创建 Fluentd 部署以及对应的服务(Service)和配置字典(ConfigMap)。 +- 添加 Fluentd 作为日志接收器以接收来自 Fluent Bit 的日志,并输出为标准输出。 +- 验证 Fluentd 能否成功接收日志。 + +## 准备工作 + +- 您需要一个被授予**集群管理**权限的用户。例如,您可以直接用 `admin` 用户登录控制台,或创建一个具有**集群管理**权限的角色然后将此角色授予一个用户。 + +- 添加日志接收器前,您需要启用组件 `logging`、`events` 或 `auditing`。有关更多信息,请参见[启用可插拔组件](../../../../pluggable-components/)。本教程启用 `logging` 作为示例。 + +## 步骤 1:创建 Fluentd 部署 + +由于内存消耗低,KubeSphere 选择 Fluent Bit。Fluentd 一般在 Kubernetes 中以守护进程集的形式部署,在每个节点上收集容器日志。此外,Fluentd 支持多个插件。因此,Fluentd 会以部署的形式在 KubeSphere 中创建,将从 Fluent Bit 接收到的日志发送到多个目标,例如 S3、MongoDB、Cassandra、MySQL、syslog 和 Splunk 等。 + +执行以下命令: + +{{< notice note >}} + +- 以下命令将在默认命名空间 `default` 中创建 Fluentd 部署、服务和配置字典,并为该 Fluentd 配置字典添加 `filter` 以排除 `default` 命名空间中的日志,避免 Fluent Bit 和 Fluentd 重复日志收集。 +- 如果您想要将 Fluentd 部署至其他命名空间,请修改以下命令中的命名空间名称。 + +{{}} + +```yaml +cat < + @type forward + port 24224 + + + # Because this will send logs Fluentd received to stdout, + # to avoid Fluent Bit and Fluentd loop logs collection, + # add a filter here to avoid sending logs from the default namespace to stdout again + + @type grep + + key $.kubernetes.namespace_name + pattern /^default$/ + + + + # Send received logs to stdout for demo/test purpose only + # Various output plugins are supported to output logs to S3, MongoDB, Cassandra, MySQL, syslog, Splunk, etc. + + @type stdout + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: fluentd + name: fluentd + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + app: fluentd + template: + metadata: + labels: + app: fluentd + spec: + containers: + - image: fluentd:v1.9.1-1.0 + imagePullPolicy: IfNotPresent + name: fluentd + ports: + - containerPort: 24224 + name: forward + protocol: TCP + - containerPort: 5140 + name: syslog + protocol: TCP + volumeMounts: + - mountPath: /fluentd/etc + name: config + readOnly: true + volumes: + - configMap: + defaultMode: 420 + name: fluentd-config + name: config +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: fluentd + name: fluentd + namespace: default +spec: + ports: + - name: forward + port: 24224 + protocol: TCP + targetPort: forward + selector: + app: fluentd + sessionAffinity: None + type: ClusterIP +EOF +``` + +## 步骤 2:添加 Fluentd 作为日志接收器 + +1. 以 `admin` 身份登录 KubeSphere 的 Web 控制台。点击左上角的**平台管理**,然后选择**集群管理**。 + + {{< notice note >}} + + 如果您启用了[多集群功能](../../../../multicluster-management/),您可以选择一个集群。 + + {{}} + +2. 在**集群管理**页面,选择**集群设置**下的**日志接收器**。 + +3. 点击**添加日志接收器**并选择 **Fluentd**。 + +4. 输入 **Fluentd** 服务地址和端口信息。 + +5. Fluentd 会显示在**日志接收器**页面的接收器列表中,状态为**收集中**。 + + +## 步骤 3:验证 Fluentd 能否从 Fluent Bit 接收日志 + +1. 在**集群管理**页面点击**应用负载**。 + +2. 点击**工作负载**,并在**部署**选项卡中选择 `default` 项目。 + +3. 点击 **fluentd** 项目并选择 **fluentd-xxxxxxxxx-xxxxx** 容器组。 + +4. 点击 **fluentd** 容器。 + +5. 在 **fluentd** 容器页面,选择**容器日志**选项卡。 + +6. 您可以看到日志持续滚动输出。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/cluster-administration/cluster-settings/log-collections/add-kafka-as-receiver.md b/content/zh/docs/v3.4/cluster-administration/cluster-settings/log-collections/add-kafka-as-receiver.md new file mode 100644 index 000000000..4e452637c --- /dev/null +++ b/content/zh/docs/v3.4/cluster-administration/cluster-settings/log-collections/add-kafka-as-receiver.md @@ -0,0 +1,131 @@ +--- +title: "添加 Kafka 作为接收器" +keywords: 'Kubernetes, 日志, Kafka, Pod, 容器, Fluentbit, 输出' +description: '了解如何添加 Kafka 来接收容器日志、资源事件或审计日志。' +linkTitle: "添加 Kafka 作为接收器" +weight: 8623 +--- +您可以在 KubeSphere 中使用 Elasticsearch、Kafka 和 Fluentd 日志接收器。本教程演示: + +- 部署 [strimzi-kafka-operator](https://github.com/strimzi/strimzi-kafka-operator),然后通过创建 `Kafka` 和 `KafkaTopic` CRD 以创建 Kafka 集群和 Kafka 主题。 +- 添加 Kafka 作为日志接收器以从 Fluent Bit 接收日志。 +- 使用 [Kafkacat](https://github.com/edenhill/kafkacat) 验证 Kafka 集群是否能接收日志。 + +## 准备工作 + +- 您需要一个被授予**集群管理**权限的用户。例如,您可以直接用 `admin` 用户登录控制台,或创建一个具有**集群管理**权限的角色然后将此角色授予一个用户。 +- 添加日志接收器前,您需要启用组件 `logging`、`events` 或 `auditing`。有关更多信息,请参见[启用可插拔组件](../../../../pluggable-components/)。本教程启用 `logging` 作为示例。 + +## 步骤 1:创建 Kafka 集群和 Kafka 主题 + +您可以使用 [strimzi-kafka-operator](https://github.com/strimzi/strimzi-kafka-operator) 创建 Kafka 集群和 Kafka 主题。如果您已经有了一个 Kafka 集群,您可以直接从下一步开始。 + +1. 在 `default` 命名空间中安装 [strimzi-kafka-operator](https://github.com/strimzi/strimzi-kafka-operator): + + ```bash + helm repo add strimzi https://strimzi.io/charts/ + ``` + + ```bash + helm install --name kafka-operator -n default strimzi/strimzi-kafka-operator + ``` + + +2. 运行以下命令在 `default` 命名空间中创建 Kafka 集群和 Kafka 主题,该命令所创建的 Kafka 和 Zookeeper 集群的存储类型为 `ephemeral`,使用 `emptyDir` 进行演示。若要在生产环境下配置储存类型,请参见 [kafka-persistent](https://github.com/strimzi/strimzi-kafka-operator/blob/0.19.0/examples/kafka/kafka-persistent.yaml)。 + + ```yaml + cat <}} + + 如果您启用了[多集群功能](../../../../multicluster-management/),您可以选择一个集群。 + + {{}} + +2. 在**集群管理**页面,选择**集群设置**下的**日志接收器**。 + +3. 点击**添加日志接收器**并选择 **Kafka**。输入 Kafka 服务地址和端口信息,然后点击**确定**继续。 + + | 服务地址 | 端口号 | + | ------------------------------------------------------- | ---- | + | my-cluster-kafka-0.my-cluster-kafka-brokers.default.svc | 9092 | + | my-cluster-kafka-1.my-cluster-kafka-brokers.default.svc | 9092 | + | my-cluster-kafka-2.my-cluster-kafka-brokers.default.svc | 9092 | + +4. 运行以下命令验证 Kafka 集群是否能从 Fluent Bit 接收日志: + + ```bash + # Start a util container + kubectl run --rm utils -it --generator=run-pod/v1 --image arunvelsriram/utils bash + # Install Kafkacat in the util container + apt-get install kafkacat + # Run the following command to consume log messages from kafka topic: my-topic + kafkacat -C -b my-cluster-kafka-0.my-cluster-kafka-brokers.default.svc:9092,my-cluster-kafka-1.my-cluster-kafka-brokers.default.svc:9092,my-cluster-kafka-2.my-cluster-kafka-brokers.default.svc:9092 -t my-topic + ``` \ No newline at end of file diff --git a/content/zh/docs/v3.4/cluster-administration/cluster-settings/log-collections/introduction.md b/content/zh/docs/v3.4/cluster-administration/cluster-settings/log-collections/introduction.md new file mode 100644 index 000000000..1d6aca019 --- /dev/null +++ b/content/zh/docs/v3.4/cluster-administration/cluster-settings/log-collections/introduction.md @@ -0,0 +1,87 @@ +--- +title: "介绍" +keywords: 'Kubernetes, 日志, Elasticsearch, Kafka, Fluentd, Pod, 容器, Fluentbit, 输出' +description: '了解集群日志接收器的基础知识,包括工具和一般步骤。' +linkTitle: "介绍" +weight: 8621 +--- + +KubeSphere 提供灵活的日志接收器配置方式。基于 [Fluent Operator](https://github.com/fluent/fluent-operator),用户可以轻松添加、修改、删除、启用或禁用 Elasticsearch、Kafka 和 Fluentd 接收器。接收器添加后,日志会发送至该接收器。 + +此教程简述在 KubeSphere 中添加日志接收器的一般性步骤。 + +## 准备工作 + +- 您需要一个被授予**集群管理**权限的用户。例如,您可以直接用 `admin` 用户登录控制台,或创建一个具有**集群管理**权限的角色然后将此角色授予一个用户。 + +- 添加日志接收器前,您需要启用组件 `logging`、`events` 或 `auditing`。有关更多信息,请参见[启用可插拔组件](../../../../pluggable-components/)。 + +## 为容器日志添加日志接收器 + +若要添加日志接收器: + +1. 以 `admin` 身份登录 KubeSphere 的 Web 控制台。 + +2. 点击左上角的**平台管理**,然后选择**集群管理**。 + + {{< notice note >}} + + 如果您启用了[多集群功能](../../../../multicluster-management/),您可以选择一个集群。 + + {{}} + +3. 选择**集群设置**下的**日志接收器**。 + +4. 在日志接收器列表页,点击**添加日志接收器**。 + + {{< notice note >}} + +- 每个接收器类型至多可以添加一个接收器。 +- 可以同时添加不同类型的接收器。 + +{{}} + +### 添加 Elasticsearch 作为日志接收器 + +如果 [ClusterConfiguration](https://github.com/kubesphere/kubekey/blob/release-2.2/docs/config-example.md) 中启用了 `logging`、`events` 或 `auditing`,则会添加默认的 Elasticsearch 接收器,服务地址会设为 Elasticsearch 集群。 + +当 `logging`、`events` 或 `auditing` 启用时,如果 [ClusterConfiguration](https://github.com/kubesphere/kubekey/blob/release-2.2/docs/config-example.md) 中未指定 `externalElasticsearchHost` 和 `externalElasticsearchPort`,则内置 Elasticsearch 集群会部署至 Kubernetes 集群。内置 Elasticsearch 集群仅用于测试和开发。生产环境下,建议您集成外置 Elasticsearch 集群。 + +日志查询需要依靠所配置的内置或外置 Elasticsearch 集群。 + +如果默认的 Elasticsearch 日志接收器被删除,请参考[添加 Elasticsearch 作为接收器](../add-es-as-receiver/)重新添加。 + +### 添加 Kafka 作为日志接收器 + +Kafka 往往用于接收日志,并作为 Spark 等处理系统的代理 (Broker)。[添加 Kafka 作为接收器](../add-kafka-as-receiver/)演示如何添加 Kafka 接收 Kubernetes 日志。 + +### 添加 Fluentd 作为日志接收器 + +如果您需要将日志输出到除 Elasticsearch 或 Kafka 以外的其他地方,您可以添加 Fluentd 作为日志接收器。Fluentd 支持多种输出插件,可以将日志发送至多个目标,例如 S3、MongoDB、Cassandra、MySQL、syslog 和 Splunk 等。[添加 Fluentd 作为接收器](../add-fluentd-as-receiver/)演示如何添加 Fluentd 接收 Kubernetes 日志。 + +## 为资源事件或审计日志添加日志接收器 + +自 KubeSphere v3.0.0 起,资源事件和审计日志可以通过和容器日志相同的方式进行存档。如果在 [ClusterConfiguration](https://github.com/kubesphere/kubekey/blob/release-2.2/docs/config-example.md) 中启用了 `events` 或 `auditing`,**日志接收器**页面会对应显示**资源事件**或**审计日志**选项卡。您可以前往对应选项卡为资源事件或审计日志配置日志接收器。 + +容器日志、资源事件和审计日志应存储在不同的 Elasticsearch 索引中以便在 KubeSphere 中进行搜索。系统以`<索引前缀>-<年-月-日>`格式自动生成索引。 + +## 启用或停用日志接收器 + +无需新增或删除日志接收器,您可以随时启用或停用日志接收器,具体步骤如下: + +1. 在**日志接收器**页面,点击一个日志接收器并进入其详情页面。 +2. 点击**更多操作**并选择**更改状态**。 + +3. 选择**收集中**或**关闭**以启用或停用该日志接收器。 + +4. 停用后,日志接收器的状态会变为**关闭**,激活时状态为**收集中**。 + + +## 编辑或删除日志接收器 + +您可以编辑或删除日志接收器: + +1. 在**日志接收器**页面,点击一个日志接收器并进入其详情页面。 +2. 点击**编辑**或从下拉菜单中选择**编辑 YAML** 以编辑日志接收器。 + +3. 点击**删除**以删除日志接收器。 diff --git a/content/zh/docs/v3.4/cluster-administration/cluster-status-monitoring.md b/content/zh/docs/v3.4/cluster-administration/cluster-status-monitoring.md new file mode 100644 index 000000000..cbc554174 --- /dev/null +++ b/content/zh/docs/v3.4/cluster-administration/cluster-status-monitoring.md @@ -0,0 +1,137 @@ +--- +title: "集群状态监控" +keywords: "Kubernetes, KubeSphere, 状态, 监控" +description: "根据不同的指标(包括物理资源、etcd 和 API server)监控集群如何运行。" +linkTitle: "集群状态监控" +weight: 8200 +--- + +KubeSphere 支持对集群 CPU、内存、网络和磁盘等资源的相关指标进行监控。在**集群状态**页面,您可以查看历史监控数据并根据不同资源的使用率对节点进行排序。 + +## 准备工作 + +您需要一个被授予**集群管理**权限的用户。例如,您可以直接用 `admin` 用户登录控制台,或创建一个具有**集群管理**权限的角色然后将此角色授予一个用户。 + +## 集群状态监控 + +1. 点击左上角的**平台管理**,然后选择**集群管理**。 + +2. 如果您已启用了[多集群功能](../../multicluster-management/)并已导入了成员集群,您可以选择一个特定集群以查看其应用程序资源。如果尚未启用该功能,请直接进行下一步。 + +3. 在左侧导航栏选择**监控告警**下的**集群状态**以查看集群状态概览,包括**集群节点状态**、**组件状态**、**集群资源用量**、**etcd 监控**和**服务组件监控**。 + +### 集群节点状态 + +1. **集群节点状态**显示在线节点和所有节点的数量。您可以点击**节点在线状态**跳转到**集群节点**页面以查看所有节点的实时资源使用情况。 + +2. 在**集群节点**页面,点击节点名称可打开**运行状态**页面查看**资源用量**,**已分配资源**和**健康状态**。 + +3. 点击**监控**选项卡,可以查看节点在特定时间范围内的各种运行指标,包括 **CPU 用量**、**CPU 平均负载**、**内存用量**、**磁盘用量**、**Inode 用量**、**IOPS**、**磁盘吞吐**和**网络带宽**。 + + {{< notice tip >}} + +您可以在右上角的下拉列表中自定义时间范围查看历史数据。 + +{{}} + +### 组件状态 + +KubeSphere 监控集群中各种服务组件的健康状态。当关键组件发生故障时,系统可能会变得不可用。KubeSphere 的监控机制确保平台可以在组件出现故障时将所有问题通知租户,以便快速定位问题并采取相应的措施。 + +1. 在**集群状态**页面,点击**组件状态**区域的组件可查看其状态。 + +2. **系统组件**页面列出了所有的组件。标记为绿色的组件是正常运行的组件,标记为橙色的组件存在问题,需要特别关注。 + +{{< notice tip >}} +标记为橙色的组件可能会由于各种原因在一段时间后变为绿色,例如重试拉取镜像或重新创建实例。您可以点击一个组件查看其服务详情。 +{{}} + +### 集群资源用量 + +**集群资源用量**显示集群中所有节点的 **CPU 用量**、**内存用量**、**磁盘用量**和**容器组数量**。您可以点击左侧的饼图切换指标。右侧的曲线图显示一段时间内指示的变化趋势。 + +## 物理资源监控 + +您可以利用**物理资源监控**页面提供的数据更好地掌控物理资源状态,并建立正常资源和集群性能的标准。KubeSphere 允许用户查看最近 7 天的集群监控数据,包括 **CPU 用量**、**内存用量**、**CPU 平均负载(1 分钟/5 分钟/15 分钟)**、**磁盘用量**、**Inode 用量**、**磁盘吞吐(读写)**、**IOPS(读写)**、**网络带宽**和**容器组状态**。您可以在 KubeSphere 中自定义时间范围和时间间隔以查看物理资源的历史监控数据。以下简要介绍每个监控指标。 + +### CPU 用量 + +**CPU 用量**显示一段时间内 CPU 资源的用量。如果某一时间段的 CPU 用量急剧上升,您首先需要定位占用 CPU 资源最多的进程。例如,Java 应用程序代码中的内存泄漏或无限循环可能会导致 CPU 用量急剧上升。 + +### 内存用量 + +内存是机器上的重要组件之一,是与 CPU 通信的桥梁。因此,内存对机器的性能有很大影响。当程序运行时,数据加载、线程并发和 I/O 缓冲都依赖于内存。可用内存的大小决定了程序能否正常运行以及如何运行。**内存使用情况**反映了集群内存资源的整体用量,显示为特定时刻内存占用的百分比。 +### CPU 平均负载 + +CPU 平均负载是单位时间内系统中处于可运行状态和非中断状态的平均进程数(亦即活动进程的平均数量)。CPU 平均负载和 CPU 利用率之间没有直接关系。理想情况下,平均负载应该等于 CPU 的数量。因此,在查看平均负载时,需要考虑 CPU 的数量。只有当平均负载大于 CPU 数量时,系统才会超载。 + +KubeSphere 为用户提供了 1 分钟、5 分钟和 15 分钟三种不同的平均负载。通常情况下,建议您比较这三种数据以全面了解平均负载情况。 + +- 如果在一定时间范围内 1 分钟、5 分钟和 15 分钟的曲线相似,则表明集群的 CPU 负载相对稳定。 +- 如果某一时间范围或某一特定时间点 1 分钟的数值远大于 15 分钟的数值,则表明最近 1 分钟的负载在增加,需要继续观察。一旦 1 分钟的数值超过 CPU 数量,系统可能出现超载,您需要进一步分析问题的根源。 +- 如果某一时间范围或某一特定时间点 1 分钟的数值远小于 15 分钟的数值,则表明系统在最近 1 分钟内负载在降低,在前 15 分钟内出现了较高的负载。 + +### 磁盘用量 + +KubeSphere 的工作负载(例如`有状态副本集`和`守护进程集`)都依赖于持久卷。某些组件和服务也需要持久卷。这种后端存储依赖于磁盘,例如块存储或网络共享存储。因此,实时的磁盘用量监控环境对确保数据的高可靠性尤为重要。 + +在 Linux 系统的日常管理中,平台管理员可能会遇到磁盘空间不足导致数据丢失甚至系统崩溃的情况。作为集群管理的重要组成部分,平台管理员需要密切关注系统的磁盘使用情况,并确保文件系统不会被用尽或滥用。通过监控磁盘使用的历史数据,您可以评估特定时间范围内磁盘的使用情况。在磁盘用量较高的情况下,您可以通过清理不必要的镜像或容器来释放磁盘空间。 + +### Inode 用量 + +每个文件都有一个 inode,用于存储文件的创建者和创建日期等元信息。inode 也会占用磁盘空间,众多的小缓存文件很容易导致 inode 资源耗尽。此外,在 inode 已用完但磁盘未满的情况下,也无法在磁盘上创建新文件。 + +在 KubeSphere 中,对 inode 使用率的监控可以帮助您清楚地了解集群 inode 的使用率,从而提前检测到此类情况。该机制提示用户及时清理临时文件,防止集群因 inode 耗尽而无法工作。 + +### 磁盘吞吐 + +磁盘吞吐和 IOPS 监控是磁盘监控不可或缺的一部分,可帮助集群管理员调整数据布局和其他管理活动以优化集群整体性能。磁盘吞吐量是指磁盘传输数据流(包括读写数据)的速度,单位为 MB/s。当传输大块非连续数据时,该指标具有重要的参考意义。 + +### IOPS + +IOPS 表示每秒读写操作数。具体来说,磁盘的 IOPS 是每秒连续读写的总和。当传输小块非连续数据时,该指示器具有重要的参考意义。 + +### 网络带宽 + +网络带宽是网卡每秒接收或发送数据的能力,单位为 Mbps。 + +### 容器组状态 + +**容器组状态**显示不同状态的容器组的总数,包括**运行中**、**已完成**和**异常**状态。标记为**已完成**的容器组通常为任务(Job)或定时任务(CronJob)。标记为**异常**的容器组需要特别注意。 + +## etcd 监控 + +**etcd 监控**可以帮助您更好地利用 etcd,特别用于是定位性能问题。etcd 服务提供了原生的指标接口。KubeSphere 监控系统提供了高度图形化和响应性强的仪表板,用于显示原生数据。 + +| 指标 | 描述 | +| --- | --- | +| 服务状态 | - **是否有 Leader** 表示成员是否有 Leader。如果成员没有 Leader,则成员完全不可用。如果集群中的所有成员都没有任何 Leader,则整个集群完全不可用。
- **1 小时内 Leader 变更次数**表示集群成员观察到的 1 小时内 Leader 变更总次数。频繁变更 Leader 将显著影响 etcd 性能,同时这还表明 Leader 可能由于网络连接问题或 etcd 集群负载过高而不稳定。 | +| 库大小 | etcd 的底层数据库大小,单位为 MiB。图表中显示的是 etcd 的每个成员数据库的平均大小。 | +| 客户端流量 | 包括发送到 gRPC 客户端的总流量和从 gRPC 客户端接收的总流量。有关该指标的更多信息,请参阅[ etcd Network](https://github.com/etcd-io/etcd/blob/v3.2.17/Documentation/metrics.md#network)。 | +| gRPC 流式消息 | 服务器端的 gRPC 流消息接收速率和发送速率,反映集群内是否正在进行大规模的数据读写操作。有关该指标的更多信息,请参阅[ go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus#counters)。 | +| WAL 日志同步时间 | WAL 调用 fsync 的延迟。在应用日志条目之前,etcd 会在持久化日志条目到磁盘时调用 `wal_fsync`。有关该指标的更多信息,请参阅[ etcd Disk](https://etcd.io/docs/v3.3.12/metrics/#disk)。 | +| 库同步时间 | 后端调用提交延迟的分布。当 etcd 将其最新的增量快照提交到磁盘时,会调用 `backend_commit`。需要注意的是,磁盘操作延迟较大(WAL 日志同步时间或库同步时间较长)通常表示磁盘存在问题,这可能会导致请求延迟过高或集群不稳定。有关该指标的详细信息,请参阅[ etcd Disk](https://etcd.io/docs/v3.3.12/metrics/#disk)。 | +| Raft 提议 | - **提议提交速率**记录提交的协商一致提议的速率。如果集群运行状况良好,则该指标应随着时间的推移而增加。etcd 集群的几个健康成员可以同时具有不同的一般提议。单个成员与其 Leader 之间的持续较大滞后表示该成员缓慢或不健康。
- **提议应用速率**记录协商一致提议的总应用率。etcd 服务器异步地应用每个提交的提议。**提议提交速率**和**提议应用速率**的差异应该很小(即使在高负载下也只有几千)。如果它们之间的差异持续增大,则表明 etcd 服务器过载。当使用大范围查询或大量 txn 操作等大规模查询时,可能会出现这种情况。
- **提议失败速率**记录提议失败的总速率。这通常与两个问题有关:与 Leader 选举相关的临时失败或由于集群成员数目达不到规定数目而导致的长时间停机。
- **排队提议数**记录当前待处理提议的数量。待处理提议的增加表明客户端负载较高或成员无法提交提议。
目前,仪表板上显示的数据是 etcd 成员的平均数值。有关这些指标的详细信息,请参阅[ etcd Server](https://etcd.io/docs/v3.3.12/metrics/#server)。 | + +## API Server 监控 + +[API Server](https://kubernetes.io/docs/concepts/overview/kubernetes-api/) 是 Kubernetes 集群中所有组件交互的中枢。下表列出了 API Server 的主要监控指标。 + +| 指标 | 描述 | +| --- | --- | +| 请求延迟 | 资源请求响应延迟,单位为毫秒。该指标按照 HTTP 请求方法进行分类。 | +| 每秒请求次数 | kube-apiserver 每秒接受的请求数。 | + +## 调度器监控 + +[调度器](https://kubernetes.io/zh/docs/reference/command-line-tools-reference/kube-scheduler/)监控新建容器组的 Kubernetes API,并决定这些新容器组运行在哪些节点上。调度器根据收集资源的可用性和容器组的资源需求等数据进行决策。监控调度延迟的数据可确保您及时了解调度器的任何延迟。 + +| 指标 | 描述 | +| --- | --- | +| 调度次数 | 包括调度成功、错误和失败的次数。 | +| 调度频率 | 包括调度成功、错误和失败的频率。 | +| 调度延迟 | 端到端调度延迟,即调度算法延迟和绑定延迟之和。 | + +## 节点用量排行 + +您可以按 **CPU 用量**、**CPU 平均负载**、**内存用量**、**本地存储用量**、**Inode 用量**和**容器组用量**等指标对节点进行升序和降序排序。您可以利用这一功能快速发现潜在问题和节点资源不足的情况。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/_index.md b/content/zh/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/_index.md new file mode 100644 index 000000000..ade0b7a19 --- /dev/null +++ b/content/zh/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "集群告警和通知" +weight: 8500 + +_build: + render: false +--- diff --git a/content/zh/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/alerting-message.md b/content/zh/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/alerting-message.md new file mode 100644 index 000000000..1b5c0c490 --- /dev/null +++ b/content/zh/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/alerting-message.md @@ -0,0 +1,27 @@ +--- +title: "告警消息(节点级别)" +keywords: 'KubeSphere, Kubernetes, 节点, 告警, 消息' +description: '了解如何查看节点的告警消息。' + +linkTitle: "告警消息(节点级别)" +weight: 8540 +--- + +告警消息会记录按照告警规则触发的告警的详细信息。本教程演示如何查看节点级别的告警消息。 +## 准备工作 + +- 您需要启用 [KubeSphere 告警系统](../../../pluggable-components/alerting/)。 +- 您需要创建一个用户 (`cluster-admin`) 并授予其 `clusters-admin` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/#step-4-create-a-role)。 +- 您已经创建节点级别的告警策略并已触发该告警。有关更多信息,请参考[告警策略(节点级别)](../alerting-policy/)。 + +## 查看告警消息 + +1. 使用 `cluster-admin` 帐户登录 KubeSphere 控制台,导航到**监控告警**下的**告警消息**。 + +2. 在**告警消息**页面,可以看到列表中的全部告警消息。第一列显示了为告警消息定义的概括和详情。如需查看告警消息的详细信息,点击告警策略的名称,然后点击告警策略详情页面上的**告警历史**选项卡。 + +3. 在**告警历史**选项卡,您可以看到告警级别、监控目标和告警激活时间。 + +## 查看通知 + +如果需要接收告警通知(例如,邮件和 Slack 消息),则须先配置[一个通知渠道](../../../cluster-administration/platform-settings/notification-management/configure-email/)。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy.md b/content/zh/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy.md new file mode 100644 index 000000000..84e44c2d3 --- /dev/null +++ b/content/zh/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy.md @@ -0,0 +1,70 @@ +--- +title: "告警策略(节点级别)" +keywords: 'KubeSphere, Kubernetes, 节点, 告警, 策略, 通知' +description: '了解如何为节点设置告警策略。' + +linkTitle: "告警策略(节点级别)" +weight: 8530 +--- + +KubeSphere 为节点和工作负载提供告警策略。本教程演示如何为集群中的节点创建告警策略。如需了解如何为工作负载配置告警策略,请参见[告警策略(工作负载级别)](../../../project-user-guide/alerting/alerting-policy/)。 + +KubeSphere 还具有内置策略,一旦满足为这些策略定义的条件,将会触发告警。 在**内置策略**选项卡,您可以点击任一策略查看其详情。请注意,这些策略不能直接在控制台上进行删除或编辑。 + +## 准备工作 + +- 您需要启用 [KubeSphere 告警系统](../../../pluggable-components/alerting)。 +- 如需接收告警通知,您需要预先配置[通知渠道](../../../cluster-administration/platform-settings/notification-management/configure-email/)。 +- 您需要创建一个用户 (`cluster-admin`) 并授予其 `clusters-admin` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/#step-4-create-a-role)。 +- 您需要确保集群中存在工作负载。如果尚未就绪,请参见[部署并访问 Bookinfo](../../../quick-start/deploy-bookinfo-to-k8s/) 创建一个示例应用。 + +## 创建告警策略 + +1. 使用 `cluster-admin` 用户登录控制台。点击左上角的**平台管理**,然后点击**集群管理**。 + +2. 前往**监控告警**下的**告警策略**,然后点击**创建**。 + +3. 在出现的对话框中,填写以下基本信息。点击**下一步**继续。 + + - **名称**:使用简明名称作为其唯一标识符,例如 `node-alert`。 + - **别名**:帮助您更好地识别告警策略。 + - **阈值时间(分钟)**:告警规则中设置的情形持续时间达到该阈值后,告警策略将变为触发中状态。 + - **告警级别**:提供的值包括**一般告警**、**重要告警**和**危险告警**,代表告警的严重程度。 + - **描述信息**:对告警策略的简要介绍。 + +4. 在**规则设置**选项卡,您可以使用规则模板或创建自定义规则。如需使用规则模板,请设置以下参数,然后点击**下一步**继续。 + + - **监控目标**:选择至少一个集群节点进行监控。 + - **告警规则**:为告警策略定义一个规则。下拉菜单中提供的规则基于 Prometheus 表达式,满足条件时将会触发告警。您可以对 CPU、内存等对象进行监控。 + + {{< notice note >}} + + 您可以在**监控指标**字段输入表达式(支持自动补全),以使用 PromQL 创建自定义规则。有关更多信息,请参见 [Querying Prometheus](https://prometheus.io/docs/prometheus/latest/querying/basics/)。 + + {{}} + +5. 在**消息设置**选项卡,输入告警消息的概括和详情,点击**创建**。 + +6. 告警策略刚创建后将显示为**未触发**状态;一旦满足规则表达式中的条件,则会首先达到**待触发**状态;满足告警条件的时间达到阈值时间后,将变为**触发中**状态。 + +## 编辑告警策略 + +如需在创建后编辑告警策略,在**告警策略**页面点击右侧的 。 + +1. 点击下拉菜单中的**编辑**,根据与创建时相同的步骤来编辑告警策略。点击**消息设置**页面的**确定**保存更改。 + +2. 点击下拉菜单中的**删除**以删除告警策略。 + +## 查看告警策略 + +在**告警策略**页面,点击一个告警策略的名称查看其详情,包括告警规则和告警历史。您还可以看到创建告警策略时基于所使用模板的告警规则表达式。 + +在**监控**下,**告警监控**图显示一段时间内的实际资源使用情况或使用量。**告警消息**显示您在通知中设置的自定义消息。 + +{{< notice note >}} + +您可以点击右上角的 icon 选择告警监控的时间范围或者自定义时间范围。 + +您还可以点击右上角的 icon 来手动刷新告警监控图。 + +{{}} diff --git a/content/zh/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/alertmanager.md b/content/zh/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/alertmanager.md new file mode 100644 index 000000000..c56c69fe7 --- /dev/null +++ b/content/zh/docs/v3.4/cluster-administration/cluster-wide-alerting-and-notification/alertmanager.md @@ -0,0 +1,37 @@ +--- +title: "在 KubeSphere 中使用 Alertmanager 管理告警" +keywords: 'Kubernetes, Prometheus, Alertmanager, 告警' +description: '了解如何在 KubeSphere 中使用 Alertmanager 管理告警。' +linkTitle: "KubeSphere 中的 Alertmanager" +weight: 8510 +--- + +Alertmanager 处理由客户端应用程序(例如 Prometheus 服务器)发出的告警。它会将告警去重、分组 (Grouping) 并路由至正确的接收器,例如电子邮件、PagerDuty 或者 OpsGenie。它还负责告警沉默 (Silencing) 和抑制 (Inhibition)。有关更多详细信息,请参考 [Alertmanager 指南](https://prometheus.io/docs/alerting/latest/alertmanager/)。 + +从初次发布开始,KubeSphere 就一直使用 Prometheus 作为监控服务的后端。从 3.0 版本开始,KubeSphere 的监控栈新增了 Alertmanager 来管理从 Prometheus 和其他服务组件(例如 [kube-events](https://github.com/kubesphere/kube-events) 和 kube-auditing)发出的告警。 + +![alertmanager-kubesphere](/images/docs/v3.3/cluster-administration/cluster-wide-alerting-and-notification/alertmanager-in-kubesphere/alertmanager@kubesphere.png) + +## 使用 Alertmanager 管理 Prometheus 告警 + +Prometheus 的告警分为两部分。Prometheus 服务器根据告警规则向 Alertmanager 发送告警。随后,Alertmanager 管理这些告警,包括沉默、抑制、聚合等,并通过不同方式发送通知,例如电子邮件、应需 (on-call) 通知系统以及聊天平台。 + +从 3.0 版本开始,KubeSphere 向 Prometheus 添加了开源社区中流行的告警规则,用作内置告警规则。默认情况下,KubeSphere 3.3 中的 Prometheus 会持续评估这些内置告警规则,然后向 Alertmanager 发送告警。 + +## 使用 Alertmanager 管理 Kubernetes 事件告警 + +Alertmanager 可用于管理 Prometheus 以外来源发出的告警。在 3.0 版及更高版本的 KubeSphere 中,用户可以用它管理由 Kubernetes 事件触发的告警。有关更多详细信息,请参考 [kube-events](https://github.com/kubesphere/kube-events)。 + +## 使用 Alertmanager 管理 KubeSphere 审计告警 + +在 3.0 版及更高版本的 KubeSphere 中,用户还可以使用 Alertmanager 管理由 Kubernetes 或 KubeSphere 审计事件触发的告警。 + +## 接收 Alertmanager 告警的通知 + +一般来说,要接收 Alertmanager 告警的通知,用户需要手动编辑 Alertmanager 的配置文件,配置接收器(例如电子邮件和 Slack)的设置。 + +这对 Kubernetes 用户来说并不方便,并且违背了 KubeSphere 的多租户规则/架构。具体来说,由不同命名空间中的工作负载所触发的告警可能会发送至同一个租户,然而这些告警信息本应发给不同的租户。 + +为了使用 Alertmanager 管理平台上的告警,KubeSphere 提供了 [Notification Manager](https://github.com/kubesphere/notification-manager),它是一个 Kubernetes 原生通知管理工具,完全开源。它符合多租户规则,提供用户友好的 Kubernetes 通知体验,3.0 版及更高版本的 KubeSphere 均默认安装 Notification Manager。 + +有关使用 Notification Manager 接收 Alertmanager 通知的详细信息,请参考 [Notification Manager](https://github.com/kubesphere/notification-manager)。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/cluster-administration/nodes.md b/content/zh/docs/v3.4/cluster-administration/nodes.md new file mode 100644 index 000000000..e16b6bd56 --- /dev/null +++ b/content/zh/docs/v3.4/cluster-administration/nodes.md @@ -0,0 +1,64 @@ +--- +title: "节点管理" +keywords: "Kubernetes, KubeSphere, 污点, 节点, 标签, 请求, 限制" +description: "监控节点状态并了解如何添加节点标签和污点。" + +linkTitle: "节点管理" +weight: 8100 +--- + +Kubernetes 将容器放入容器组(Pod)中并在节点上运行,从而运行工作负载。取决于具体的集群环境,节点可以是虚拟机,也可以是物理机。每个节点都包含运行容器组所需的服务,这些服务由控制平面管理。有关节点的更多信息,请参阅[ Kubernetes 官方文档](https://kubernetes.io/zh/docs/concepts/architecture/nodes/)。 + +本教程介绍集群管理员可查看的集群节点信息和可执行的操作。 + +## 准备工作 + +您需要一个被授予**集群管理**权限的用户。例如,您可以直接用 `admin` 用户登录控制台,或创建一个具有**集群管理**权限的角色然后将此角色授予一个用户。 + +## 节点状态 + +只有集群管理员可以访问集群节点。由于一些节点指标对集群非常重要,集群管理员应监控这些指标并确保节点可用。请按照以下步骤查看节点状态。 + +1. 点击左上角的**平台管理**,然后选择**集群管理**。 + +2. 如果您已启用了[多集群功能](../../multicluster-management/)并已导入了成员集群,您可以选择一个特定集群以查看其节点信息。如果尚未启用该功能,请直接进行下一步。 + +3. 在左侧导航栏中选择**节点**下的**集群节点**,查看节点的状态详情。 + + - **名称**:节点的名称和子网 IP 地址。 + - **状态**:节点的当前状态,标识节点是否可用。 + - **角色**:节点的角色,标识节点是工作节点还是主节点。 + - **CPU 用量**:节点的实时 CPU 用量。 + - **内存用量**:节点的实时内存用量。 + - **容器组**:节点的实时容器组用量。 + - **已分配 CPU**:该指标根据节点上容器组的总 CPU 请求数计算得出。它表示节点上为工作负载预留的 CPU 资源。工作负载实际正在使用 CPU 资源可能低于该数值。该指标对于 Kubernetes 调度器 (kube-scheduler) 非常重要。在大多数情况下,调度器在调度容器组时会偏向配得 CPU 资源较少的节点。有关更多信息,请参阅[为容器管理资源](https://kubernetes.io/zh/docs/concepts/configuration/manage-resources-containers/)。 + - **已分配内存**:该指标根据节点上容器组的总内存请求计算得出。它表示节点上为工作负载预留的内存资源。工作负载实际正在使用内存资源可能低于该数值。 + + {{< notice note >}} + 在大多数情况下,**CPU** 和**已分配 CPU** 的数值不同,**内存**和**已分配内存**的数值也不同,这是正常现象。集群管理员需要同时关注一对指标。最佳实践是根据节点的实际使用情况为每个节点设置资源请求和限制。资源分配不足可能导致集群资源利用率过低,而过度分配资源可能导致集群压力过大从而处于不健康状态。 + {{}} + +## 节点管理 + +在**集群节点**页面,您可以执行以下操作: + +- **停止调度/启用调度**:点击集群节点右侧的 ,然后点击**停止调度**或**启用调度**停止或启用调度节点。您可以在节点重启或维护期间将节点标记为不可调度。Kubernetes 调度器不会将新容器组调度到标记为不可调度的节点。但这不会影响节点上现有工作负载。 + +- **打开终端**:点击集群节点右侧的 ,然后点击**打开终端**。该功能让您更加便捷地管理节点,如修改节点配置、下载镜像等。 + +- **编辑污点**:污点允许节点排斥一些容器组。勾选目标节点前的复选框,在上方弹出的按钮中点击**编辑污点**。在弹出的**编辑污点**对话框,您可以添加或删除污点。 + +同时,您也可以点击列表中的某个节点打开节点详情页面。除了**停止调度/启用调度**和**编辑污点**外,您还可以执行以下操作: + +- **编辑标签**:您可以利用节点标签将容器组分配给特定节点。首先标记节点(例如,用 `node-role.kubernetes.io/gpu-node` 标记 GPU 节点),然后在[创建工作负载](../../project-user-guide/application-workloads/deployments/#步骤-5配置高级设置)时在**高级设置**中添加此标签,从而使容器组在 GPU 节点上运行。要添加节点标签,请点击**更多操作** > **编辑标签**。 + +- 查看节点运行状态、容器组、元数据、监控和事件。 + + {{< notice note >}} +请谨慎添加污点,因为它们可能会导致意外行为从而导致服务不可用。有关更多信息,请参阅[污点和容忍度](https://kubernetes.io/zh/docs/concepts/scheduling-eviction/taint-and-toleration/)。 + {{}} + +## 添加和删除节点 + +当前版本不支持通过 KubeSphere 控制台添加或删除节点。您可以使用 [KubeKey](https://github.com/kubesphere/kubekey) 来进行此类操作。有关更多信息,请参阅[添加新节点](../../installing-on-linux/cluster-operation/add-new-nodes/)和[删除节点](../../installing-on-linux/cluster-operation/remove-nodes/)。 + diff --git a/content/zh/docs/v3.4/cluster-administration/platform-settings/notification-management/_index.md b/content/zh/docs/v3.4/cluster-administration/platform-settings/notification-management/_index.md new file mode 100644 index 000000000..97532a77f --- /dev/null +++ b/content/zh/docs/v3.4/cluster-administration/platform-settings/notification-management/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "通知管理" +weight: 8720 + +_build: + render: false +--- diff --git a/content/zh/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-dingtalk.md b/content/zh/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-dingtalk.md new file mode 100644 index 000000000..2eb1d0448 --- /dev/null +++ b/content/zh/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-dingtalk.md @@ -0,0 +1,127 @@ +--- +title: "配置钉钉通知" +keywords: 'KubeSphere, Kubernetes, 钉钉, 通知, 告警' +description: '配置钉钉通知并添加会话或群机器人来接收告警通知消息。' +linkTitle: "配置钉钉通知" +weight: 8723 +--- + +本教程演示如何配置钉钉通知并添加会话或群机器人来接收告警策略的通知。 + +## 准备工作 + +您需要准备一个[钉钉帐号](https://www.dingtalk.com/oasite/register_new.htm?spm=a213l2.13146415.4929779444.97.7f1521c9FNlFDT&lwfrom=2020052015221741000&source=1008#/)。 + +## 动手实验 + +### 步骤 1:创建应用 + +1. 登录[钉钉管理后台](https://oa.dingtalk.com/?spm=a213l2.13146415.4929779444.99.1c5521c9S8SsLf&lwfrom=2019051610283222000#/login),前往**工作台**页面,点击**自建应用**。 + +2. 在弹出页面中,将鼠标悬停至**应用开发**,选择**企业内部开发**。 + +3. 选择**小程序**,然后点击**创建应用**。 + +4. 在弹出对话框中,填写**应用名称**和**应用描述**,本教程均输入`通知测试`作为示例,**开发方式**选择**企业自助开发**,然后点击**确定创建**。 + +5. 创建应用后,在基础信息页面可以查看此应用的 **AppKey** 和 **AppSecret**。 + +6. 在**开发管理**页面,点击**修改**。 + +7. 您需要在**服务器出口IP** 中输入所有节点的公网 IP,然后点击**保存**。 + +8. 前往**权限管理**页面,在搜索框中搜索`根据手机号姓名获取成员信息的接口访问权限`,然后点击**申请权限**。 + +9. 继续在搜索框中搜索`群会话`,勾选 **chat相关接口的管理权限**和 **chat相关接口的读取权限**,然后点击**批量申请(2)**。 + +10. 在弹出对话框中,填写**联系人**、**联系方式**和**申请原因**,然后点击**申请**。待审核通过后,您需要在**权限管理**页面的**全部状态**中筛选**已开通**,点击**确定**,然后手动点击 **chat相关接口的管理权限**和 **chat相关接口的读取权限**右侧的**申请权限**。 + +### 步骤 2:获取会话 ID + +目前钉钉官方仅提供一种途径来获取会话 ID,即通过创建会话时的返回值来获取。如果您已知会话 ID,或者不需要通过会话接收通知消息,可跳过此步骤。 + +1. 登录[钉钉 API Explorer](https://open-dev.dingtalk.com/apiExplorer#/?devType=org&api=dingtalk.oapi.gettoken),在**获取凭证**下的**获取企业凭证**页面,填写 **appkey** 和 **appsecret**,点击**发起调用**,即可在右侧获取 `access_token`。 + +2. 在**通讯录管理**下的**用户管理**页面,选择**根据手机号获取userid**,**access_token** 已自动预先填写,在 **mobile** 中填写用户手机号,然后点击**发起调用**,即可在右侧获取 `userid`。 + + {{< notice note>}} + + 您只需获取群主的 userid,待创建会话后再在客户端添加群成员。 + + {{}} + +3. 在**消息通知**下的**群消息**页面,选择**创建群会话**,**access_token** 已自动预先填写,在 **name**、**owner** 和 **useridlist** 中分别填写群名称(本教程使用 `test` 作为示例,您可以按需自行设置)、群主的 userid 和群成员的 userid,然后点击**发起调用**,即可在右侧获取 `chatid`。 + +### 步骤 3:创建群机器人(可选) + +如果您不需要通过群机器人接收通知消息,可跳过此步骤。 + +1. 登录钉钉电脑客户端,点击用户头像,选择**机器人管理**。 + +2. 在弹出对话框中,选择**自定义**,然后点击**添加**。 + +3. 在弹出对话框的**机器人名字**中输入名字(例如`告警通知`),在**添加到群组**中选择群组,在**安全设置**中设置**自定义关键词**和**加签**,勾选**我已阅读并同意《自定义机器人服务及免责条款》**,然后点击**完成**。 + + {{< notice note >}} + + 机器人创建完成后不可修改群组。 + + {{}} + +4. 您可以在**机器人管理**页面点击已创建机器人右侧的 icon,查看机器人的具体设置信息,例如 **Webhook**、**自定义关键词**和**加签**。 + +### 步骤 4:在 KubeSphere 控制台配置钉钉通知 + +您必须在 KubeSphere 控制台提供钉钉的通知设置,以便 KubeSphere 将通知发送至您的钉钉。 + +1. 使用具有 `platform-admin` 角色的用户(例如,`admin`)登录 KubeSphere Web 控制台。 + +2. 点击左上角的**平台管理**,选择**平台设置**。 + +3. 前往**通知管理**下的**通知配置**,选择**钉钉**。 + +4. 您可以在**会话设置**下的 **AppKey**、**AppSecret** 和**会话 ID** 中分别输入您的钉钉应用 AppKey、AppSecret、会话 ID,然后点击**添加**以添加会话 ID,您可以添加多个会话 ID。此外,您也可以在**群机器人设置**下的 **Webhook URL**、**关键词**和**密钥**中分别输入您的钉钉机器人 Webhook URL、关键词(输入关键词后请点击**添加**以添加关键词)、加签。操作完成后,点击**确定**。 + +5. 勾选**通知条件**左侧的复选框即可设置通知条件。 + + - **标签**:告警策略的名称、级别或监控目标。您可以选择一个标签或者自定义标签。 + - **操作符**:标签与值的匹配关系,包括**包含值**,**不包含值**,**存在**和**不存在**。 + - **值**:标签对应的值。 + {{< notice note >}} + - 操作符**包含值**和**不包含值**需要添加一个或多个标签值。使用回车分隔多个值。 + - 操作符**存在**和**不存在**判断某个标签是否存在,无需设置标签值。 + {{}} + + 您可以点击**添加**来添加多个通知条件,或点击通知条件右侧的 icon 来删除通知条件。 + +6. 配置完成后,您可以点击右下角的**发送测试信息**进行验证。 + +7. 在右上角,打开**未启用**开关来接收钉钉通知,或者关闭**已启用**开关来停用钉钉通知。 + + {{< notice note >}} + + - 通知条件设置后,接收人只会接受符合条件的通知。 + - 如果您更改了现有配置,则必须点击**确定**以应用更改。 + + {{}} + +### 步骤 5:接收钉钉通知 + +配置钉钉通知并添加会话或群机器人后,您需要启用 [KubeSphere 告警系统](../../../../pluggable-components/alerting/),并为[工作负载](../../../../project-user-guide/alerting/alerting-policy/)或[节点](../../../cluster-wide-alerting-and-notification/alerting-policy/)创建告警策略。告警触发后,您的钉钉将收到通知消息。 + +请参考下方截图中的钉钉通知消息示例。 + +![chat-notification](/images/docs/v3.3/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/chat-notification.png) + +![robot-notification](/images/docs/v3.3/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/robot_notification.png) + +{{< notice note >}} + +- 如果您更新了钉钉通知配置,KubeSphere 将根据最新配置发送通知。 + +- 默认情况下,KubeSphere 大约每 12 小时针对同一告警发送通知。告警重复间隔主要由 `kubesphere-monitoring-system` 项目中 `alertmanager-main` 密钥的 `repeat_interval` 所控制。您可以按需自定义重复间隔。 + +- KubeSphere 设有内置告警策略,在不设置任何自定义告警策略的情况下,只要内置告警策略被触发,您的钉钉仍能接收通知消息。 + +{{}} + diff --git a/content/zh/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-email.md b/content/zh/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-email.md new file mode 100644 index 000000000..232790990 --- /dev/null +++ b/content/zh/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-email.md @@ -0,0 +1,75 @@ +--- +title: "配置邮件通知" +keywords: 'KubeSphere, Kubernetes, 自定义, 平台' +description: '配置邮件服务器并添加接收人以接收邮件通知。' +linkTitle: "配置邮件通知" +weight: 8722 +--- + +本教程演示如何配置邮件通知及添加接收人,以便接收告警策略的邮件通知。 + +## 配置邮件服务器 + +1. 使用具有 `platform-admin` 角色的用户登录 Web 控制台。 + +2. 点击左上角的**平台管理**,选择**平台设置**。 + +3. 导航至**通知管理**下的**通知配置**,选择**邮件**。 + +4. 在**服务器设置**下,填写以下字段配置邮件服务器。 + + - **SMTP 服务器地址**:能够提供邮件服务的 SMTP 服务器地址。端口通常是 `25`。 + - **使用 SSL 安全连接**:SSL 可以用于加密邮件,从而提高通过邮件传输的信息的安全性。通常来说,您必须为邮件服务器配置证书。 + - **SMTP 用户名**:SMTP 用户的名称。 + - **SMTP 密码**:SMTP 帐户的密码。 + - **发件人邮箱**:发件人的邮箱地址。 + +5. 点击**确定**。 + +## 接收设置 + +### 添加接收人 + +1. 在**接收设置**下,输入接收人的邮箱地址,点击**添加**。 + +2. 添加完成后,接收人的邮箱地址将在**接收设置**下列出。您最多可以添加 50 位接收人,所有接收人都将能收到通知。 + +3. 若想移除接收人,请将鼠标悬停在想要移除的邮箱地址上,然后点击右侧的 icon。 + +### 设置通知条件 + +1. 勾选**通知条件**左侧的复选框即可设置通知条件。 + + - **标签**:告警策略的名称、级别或监控目标。您可以选择一个标签或者自定义标签。 + - **操作符**:标签与值的匹配关系,包括**包含值**,**不包含值**,**存在**和**不存在**。 + - **值**:标签对应的值。 + {{< notice note >}} + - 操作符**包含值**和**不包含值**需要添加一个或多个标签值。使用回车分隔多个值。 + - 操作符**存在**和**不存在**判断某个标签是否存在,无需设置标签值。 + {{}} + +2. 您可以点击**添加**来添加多个通知条件。 + +3. 您可以点击通知条件右侧的 icon 来删除通知条件。 + +4. 配置完成后,您可以点击右下角的**发送测试信息**进行验证。 + +5. 在右上角,打开**未启用**开关来接收邮件通知,或者关闭**已启用**开关来停用邮件通知。 + + {{< notice note >}} + - 通知条件设置后,接收人只会接受符合条件的通知。 + - 如果您更改了现有配置,则必须点击**确定**以应用更改。 + + {{}} + +## 接收邮件通知 + +配置邮件通知并添加接收人后,您需要启用 [KubeSphere 告警](../../../../pluggable-components/alerting/),并为工作负载或节点创建告警策略。告警触发后,所有接收人都将能收到邮件通知。 + +{{< notice note >}} + +- 如果您更新了邮件服务器配置,KubeSphere 将根据最新配置发送邮件通知。 +- 默认情况下,KubeSphere 大约每 12 小时针对同一告警发送通知。告警重复间隔期主要由 `kubesphere-monitoring-system` 项目中 `alertmanager-main` 密钥的 `repeat_interval` 所控制。您可以按需自定义间隔期。 +- KubeSphere 拥有内置告警策略,在不设置任何自定义告警策略的情况下,只要内置告警策略被触发,您的接收人仍能收到邮件通知。 + +{{}} diff --git a/content/zh/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-slack.md b/content/zh/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-slack.md new file mode 100644 index 000000000..46b53cb6a --- /dev/null +++ b/content/zh/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-slack.md @@ -0,0 +1,90 @@ +--- +title: "配置 Slack 通知" +keywords: 'KubeSphere, Kubernetes, Slack, 通知' +description: '配置 Slack 通知及添加频道来接收告警策略、事件、审计等通知。' +linkTitle: "配置 Slack 通知" +weight: 8725 +--- + +本教程演示如何配置 Slack 通知及添加频道,以便接收告警策略的通知。 + +## 准备工作 + +您需要准备一个可用的 [Slack](https://slack.com/) 工作区。 + +## 获取 Slack OAuth 令牌 (Token) + +首先,您需要创建一个 Slack 应用,以便发送通知到 Slack 频道。若想认证您的应用,则必须创建一个 OAuth 令牌。 + +1. 登录 Slack 以[创建应用](https://api.slack.com/apps)。 + +2. 在 **Your Apps** 页面,点击 **Create New App**。 + +3. 在出现的对话框中,输入应用名称并为其选择一个 Slack 工作区。点击 **Create App** 继续。 + +4. 在左侧导航栏中,选择 **Features** 下的 **OAuth & Permissions**。在 **Auth & Permissions** 页面,下滑到 **Scopes**,分别点击 **Bot Token Scopes** 和 **User Token Scopes** 下的 **Add an OAuth Scope**,两者都选择 **chart:write** 权限。 + +5. 上滑到 **OAuth Tokens & Redirect URLs**,点击 **Install to Workspace**。授予该应用访问您工作区的权限,您可以在 **OAuth Tokens for Your Team** 下看到已创建的令牌。 + +## 在 KubeSphere 控制台上配置 Slack 通知 + +您必须在 KubeSphere 控制台提供 Slack 令牌用于认证,以便 KubeSphere 将通知发送至您的频道。 + +1. 使用具有 `platform-admin` 角色的用户登录 Web 控制台。 + +2. 点击左上角的**平台管理**,选择**平台设置**。 + +3. 导航到**通知管理**下的**通知配置**,选择 **Slack**。 + +4. 对于**服务器设置**下的 **Slack 令牌**,您可以选择使用 User OAuth Token 或者 Bot User OAuth Token 进行认证。如果使用 User OAuth Token,将由应用所有者往您的 Slack 频道发送通知;如果使用 Bot User OAuth Token,将由应用发送通知。 + +5. 在**接收频道设置**下,输入您想要收取通知的频道,点击**添加**。 + +6. 添加完成后,该频道将在**已添加的频道**下列出。您最多可以添加 20 个频道,所有已添加的频道都将能够收到告警通知。 + + {{< notice note >}} + + 若想从列表中移除频道,请点击频道右侧的 **×** 图标。 + + {{}} + +7. 点击**确定**。 + +8. 勾选**通知条件**左侧的复选框即可设置通知条件。 + + - **标签**:告警策略的名称、级别或监控目标。您可以选择一个标签或者自定义标签。 + - **操作符**:标签与值的匹配关系,包括**包含值**,**不包含值**,**存在**和**不存在**。 + - **值**:标签对应的值。 + {{< notice note >}} + - 操作符**包含值**和**不包含值**需要添加一个或多个标签值。使用回车分隔多个值。 + - 操作符**存在**和**不存在**判断某个标签是否存在,无需设置标签值。 + {{}} + + 您可以点击**添加**来添加多个通知条件,或点击通知条件右侧的 icon 来删除通知条件。 + +9. 配置完成后,您可以点击右下角的**发送测试信息**进行验证。 + +10. 在右上角,打开**未启用**开关来接收 Slack 通知,或者关闭**已启用**开关来停用 Slack 通知。 + + {{< notice note >}} + + - 通知条件设置后,接收人只会接受符合条件的通知。 + - 如果您更改了现有配置,则必须点击**确定**以应用更改。 + + {{}} + +11. 若想由应用发送通知,请确保将其加入频道。请在 Slack 频道中输入 `/invite @` 将应用加入至该频道。 + +## 接收 Slack 通知 + +配置 Slack 通知并添加频道后,您需要启用 [KubeSphere 告警](../../../../pluggable-components/alerting/),并为工作负载或节点创建告警策略。告警触发后,列表中的全部频道都将能接收通知。 + +{{< notice note >}} + +- 如果您更新了 Slack 通知配置,KubeSphere 将根据最新配置发送通知。 + +- 默认情况下,KubeSphere 大约每 12 小时针对同一告警发送通知。告警重复间隔期主要由 `kubesphere-monitoring-system` 项目中 `alertmanager-main` 密钥的 `repeat_interval` 所控制。您可以按需自定义间隔期。 + +- KubeSphere 拥有内置告警策略,在不设置任何自定义告警策略的情况下,只要内置告警策略被触发,您的 Slack 频道仍能接收通知。 + +{{}} \ No newline at end of file diff --git a/content/zh/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-webhook.md b/content/zh/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-webhook.md new file mode 100644 index 000000000..896f4c8d9 --- /dev/null +++ b/content/zh/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-webhook.md @@ -0,0 +1,63 @@ +--- +title: "配置 Webhook 通知" +keywords: 'KubeSphere, Kubernetes, 自定义, 平台, Webhook' +description: '配置 webhook 服务器以通过 webhook 接收平台通知。' +linkTitle: "配置 Webhook 通知" +weight: 8726 +--- + +Webhook 是应用程序发送由特定事件触发的通知的一种方式,可以实时向其他应用程序发送信息,使用户可以立即接收通知。 + +本教程介绍如何配置 Webhook 服务器以接收平台通知。 + +## 准备工作 + +您需要准备一个被授予 `platform-admin` 角色的用户。有关详细信息,请参阅[创建企业空间、项目、用户和角色](../../../../quick-start/create-workspace-and-project/)。 + +## 配置 Webhook 服务器 + +1. 以 `platform-admin` 用户身份登录 KubeSphere Web 控制台。 + +2. 点击左上角的**平台管理**,选择**平台设置**。 + +3. 在左侧导航栏中,点击**通知管理**下的**通知配置**,选择 **Webhook**。 + +4. 在 **Webhook** 标签页,设置如下参数: + + - **Webhook URL**:Webhook 服务器的 URL。 + + - **认证类型**:Webhook 身份认证方法。 + - **无需认证**:无身份认证,所有通知都可以发送到该 URL。 + - **Bearer 令牌**:使用令牌进行身份认证。 + - **基础认证**:使用用户名和密码进行身份认证。 + + {{< notice note>}}目前,KubeSphere 不支持 TLS 连接(HTTPS)。如果您使用 HTTPS URL,则需要选择**跳过 TLS 认证(不安全)**。 + + {{}} + +5. 勾选**通知条件**左侧的复选框,设置通知条件。 + + - **标签**:告警策略的名称、级别或监控目标。可以选择标签,也可以自定义标签。 + - **操作符**:标签和值之间的映射。操作符包括**包含值**、**不包含值**、**存在**和**不存在**。 + - **值**:与标签关联的值。 + {{< notice note >}} + + - 操作符**包含值**和**不包含值**需要一个或多个标签值。使用回车符来分隔标签值。 + - 操作符**存在**和**不存在**判断标签是否存在,不需要标签值。 + + {{}} + +6. 点击**添加**来添加通知条件,也可以点击通知条件右侧的 icon 来删除条件。 + +7. 配置完成后,可以点击**发送测试信息**进行验证。 + +8. 在右上角,可以打开**未开启**开关以启用通知,或关闭**已开启**开关以禁用通知。 + +9. 完成后点击**确定**。 + + {{< notice note >}} + + - 设置通知条件后,接收方只会收到满足条件的通知。 + - 如果更改现有配置,则必须点击**确定**才能应用修改后的配置。 + + {{}} diff --git a/content/zh/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-wecom.md b/content/zh/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-wecom.md new file mode 100644 index 000000000..6638b97d0 --- /dev/null +++ b/content/zh/docs/v3.4/cluster-administration/platform-settings/notification-management/configure-wecom.md @@ -0,0 +1,104 @@ +--- +title: "配置企业微信通知" +keywords: 'KubeSphere, Kubernetes, 企业微信, 通知, 告警' +description: '配置企业微信通知并添加相应 ID 来接收告警通知消息。' +linkTitle: "配置企业微信通知" +weight: 8724 +--- + +本教程演示如何配置企业微信通知并添加相应 ID 来接收告警策略的通知。 + +## 准备工作 + +您需要准备一个[企业微信帐号](https://work.weixin.qq.com/wework_admin/register_wx?from=myhome)。 + +## 动手实验 + +### 步骤 1:创建应用 + +1. 登录[企业微信管理后台](https://work.weixin.qq.com/wework_admin/loginpage_wx),点击**应用管理**。 + +2. 在**应用管理**页面,点击**自建**下的**创建应用**。 + +3. 在**创建应用**页面,上传应用 Logo、输入应用名称(例如,`通知测试`),点击**选择部门 / 成员**编辑**可见范围**,然后点击**创建应用**。 + + {{< notice note >}} + + 请确保将需要接收通知的用户、部门或标签加入可见范围中。 + + {{}} + +4. 应用创建完成后即可查看其详情页面,**AgentId** 右侧显示该应用的 ID。点击 **Secret** 右侧的**查看**,然后在弹出对话框中点击**发送**,便可以在企业微信客户端查看 Secret。此外,您还可以点击**编辑**来编辑可见范围。 + +### 步骤 2:创建部门或标签 + +1. 在**通讯录**页面的**组织架构**选项卡下,点击**测试**(本教程使用`测试`部门作为示例)右侧的 icon,然后选择**添加子部门**。 + +2. 在弹出对话框中,输入部门名称(例如`测试二组`),然后点击**确定**。 + +3. 创建部门后,您可以点击右侧的**添加成员**、**批量导入**或**从其他部门移入**来添加成员。添加成员后,点击该成员进入详情页面,查看其帐号。 + +4. 您可以点击`测试二组`右侧的 icon 来查看其部门 ID。 + +5. 点击**标签**选项卡,然后点击**添加标签**来创建标签。若管理界面无**标签**选项卡,请点击加号图标来创建标签。 + +6. 在弹出对话框中,输入标签名称,例如`组长`。您可以按需指定**可使用人**,点击**确定**完成操作。 + +7. 创建标签后,您可以点击右侧的**添加部门/成员**或**批量导入**来添加部门或成员。点击**标签详情**进入详情页面,可以查看此标签的 ID。 + +8. 要查看企业 ID,请点击**我的企业**,在**企业信息**页面查看 ID。 + +### 步骤 3:在 KubeSphere 控制台配置企业微信通知 + +您必须在 KubeSphere 控制台提供企业微信的相关 ID 和凭证,以便 KubeSphere 将通知发送至您的企业微信。 + +1. 使用具有 `platform-admin` 角色的用户(例如,`admin`)登录 KubeSphere Web 控制台。 + +2. 点击左上角的**平台管理**,选择**平台设置**。 + +3. 前往**通知管理**下的**通知配置**,选择**企业微信**。 + +4. 在**服务器设置**下的**企业 ID**、**应用 AgentId** 以及**应用 Secret** 中分别输入您的企业 ID、应用 AgentId 以及应用 Secret。 + +5. 在**接收设置**中,从下拉列表中选择**用户 ID**、**部门 ID** 或者**标签 ID**,输入对应 ID 后点击**添加**。您可以添加多个 ID。 + +6. 勾选**通知条件**左侧的复选框即可设置通知条件。 + + - **标签**:告警策略的名称、级别或监控目标。您可以选择一个标签或者自定义标签。 + - **操作符**:标签与值的匹配关系,包括**包含值**,**不包含值**,**存在**和**不存在**。 + - **值**:标签对应的值。 + {{< notice note >}} + - 操作符**包含值**和**不包含值**需要添加一个或多个标签值。使用回车分隔多个值。 + - 操作符**存在**和**不存在**判断某个标签是否存在,无需设置标签值。 + {{}} + + 您可以点击**添加**来添加多个通知条件,或点击通知条件右侧的 icon 来删除通知条件。 + +7. 配置完成后,您可以点击右下角的**发送测试信息**进行验证。 + +8. 在右上角,打开**未启用**开关来接收企业微信通知,或者关闭**已启用**开关来停用企业微信通知。 + + {{< notice note >}} + + - 通知条件设置后,接收人只会接受符合条件的通知。 + - 如果您更改了现有配置,则必须点击**确定**以应用更改。 + + {{}} + +### 步骤 4:接收企业微信通知 + +配置企业微信通知并添加 ID 后,您需要启用 [KubeSphere 告警系统](../../../../pluggable-components/alerting/),并为[工作负载](../../../../project-user-guide/alerting/alerting-policy/)或[节点](../../../cluster-wide-alerting-and-notification/alerting-policy/)创建告警策略。告警触发后,接收设置中添加的用户或部门将收到通知消息。 + +请参考下方截图中的企业微信通知消息示例。 + +![notification-message](/images/docs/v3.3/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/notification_message.png) + +{{< notice note >}} + +- 如果您更新了企业微信服务器配置,KubeSphere 将根据最新配置发送通知。 + +- 默认情况下,KubeSphere 大约每 12 小时针对同一告警发送通知。告警重复间隔主要由 `kubesphere-monitoring-system` 项目中 `alertmanager-main` 密钥的 `repeat_interval` 所控制。您可以按需自定义重复间隔。 + +- KubeSphere 设有内置告警策略,在不设置任何自定义告警策略的情况下,只要内置告警策略被触发,您的企业微信仍能接收通知消息。 + +{{}} diff --git a/content/zh/docs/v3.4/cluster-administration/platform-settings/notification-management/customize-cluster-name.md b/content/zh/docs/v3.4/cluster-administration/platform-settings/notification-management/customize-cluster-name.md new file mode 100644 index 000000000..f02d264c8 --- /dev/null +++ b/content/zh/docs/v3.4/cluster-administration/platform-settings/notification-management/customize-cluster-name.md @@ -0,0 +1,40 @@ +--- +title: "自定义通知消息中的集群名称" +keywords: 'KubeSphere, Kubernetes, 平台, 通知' +description: '了解如何自定义 KubeSphere 发送的通知消息中的集群名称。' +linkTitle: "自定义通知消息中的集群名称" +weight: 8721 +--- + +本文档说明如何自定义 KubeSphere 发送的通知消息中的集群名称。 + +## 准备工作 + +您需要有一个具有 `platform-admin` 角色的用户,例如 `admin` 用户。有关更多信息,请参阅[创建企业空间、项目、用户和角色](../../../../quick-start/create-workspace-and-project/)。 + +## 自定义通知消息中的集群名称 + +1. 以 `admin` 用户登录 KubeSphere 控制台。 + +2. 点击右下角的 icon 并选择 **Kubectl**。 + +3. 在弹出的对话框中,执行以下命令: + + ```bash + kubectl edit nm notification-manager + ``` + +4. 在 `.spec.receiver.options.global` 下方添加 `cluster` 字段以自定义您的集群名称: + + ```yaml + spec: + receivers: + options: + global: + cluster: <集群名称> + ``` + +5. 完成操作后,请保存更改。 + + + diff --git a/content/zh/docs/v3.4/cluster-administration/shut-down-and-restart-cluster-gracefully.md b/content/zh/docs/v3.4/cluster-administration/shut-down-and-restart-cluster-gracefully.md new file mode 100644 index 000000000..9f09ff515 --- /dev/null +++ b/content/zh/docs/v3.4/cluster-administration/shut-down-and-restart-cluster-gracefully.md @@ -0,0 +1,89 @@ +--- +title: "关闭和重启集群" +description: "了解如何平稳地关闭和重启集群。" +layout: "single" + +linkTitle: "关闭和重启集群" +weight: 89000 + +icon: "/images/docs/v3.3/docs.svg" +--- + +您可能需要临时关闭集群进行维护。本文介绍平稳关闭集群的流程以及如何重新启动集群。 + +{{< notice warning >}} +关闭集群是非常危险的操作。您必须完全了解该操作及其后果。请先进行 etcd 备份,然后再继续。通常情况下,建议您逐个维护节点,而不是重新启动整个集群。 +{{}} + +## 准备工作 + +- 请先进行 [etcd 备份](https://etcd.io/docs/current/op-guide/recovery/#snapshotting-the-keyspace),再关闭集群。 +- 主机之间已设置 SSH [免密登录](https://man.openbsd.org/ssh.1#AUTHENTICATION)。 + +## 关闭集群 + +{{< notice tip >}} + +- 关闭集群前,请您务必备份 etcd 数据,以便在重新启动集群时如果遇到任何问题,可以通过 etcd 还原集群。 +- 使用本教程中的方法可以平稳关闭集群,但数据损坏的可能性仍然存在。 + +{{}} + +### 步骤 1:获取节点列表 + +```bash +nodes=$(kubectl get nodes -o name) +``` + +### 步骤 2:关闭所有节点 + +```bash +for node in ${nodes[@]} +do + echo "==== Shut down $node ====" + ssh $node sudo shutdown -h 1 +done +``` + +然后,您可以关闭其他的集群依赖项,例如外部存储。 + +## 平稳重启集群 + +平稳关闭集群后,您可以平稳重启集群。 + +### 准备工作 + +您已平稳关闭集群。 + +{{< notice tip >}} +通常情况下,重新启动集群后可以继续正常使用,但是由于意外情况,该集群可能不可用。例如: + +- 关闭集群过程中 etcd 数据损坏。 +- 节点故障。 +- 不可预期的网络错误。 + +{{}} + +### 步骤 1:检查所有集群依赖项的状态 + +确保所有集群依赖项均已就绪,例如外部存储。 + +### 步骤 2:打开集群主机电源 + +等待集群启动并运行,这可能需要大约 10 分钟。 + +### 步骤 3:检查所有主节点的状态 + +检查核心组件(例如 etcd 服务)的状态,并确保一切就绪。 + +```bash +kubectl get nodes -l node-role.kubernetes.io/master +``` + +### 步骤 4:检查所有工作节点的状态 + +```bash +kubectl get nodes -l node-role.kubernetes.io/worker +``` + +如果您的集群重启失败,请尝试[恢复 etcd 集群](https://etcd.io/docs/current/op-guide/recovery/#restoring-a-cluster)。 diff --git a/content/zh/docs/v3.4/cluster-administration/snapshotclass.md b/content/zh/docs/v3.4/cluster-administration/snapshotclass.md new file mode 100644 index 000000000..1fd4b9811 --- /dev/null +++ b/content/zh/docs/v3.4/cluster-administration/snapshotclass.md @@ -0,0 +1,34 @@ +--- +title: "卷快照类" +keywords: 'KubeSphere, Kubernetes, 持久卷声明, 快照' +description: '了解如何在 KubeSphere 中管理卷快照类。' +linkTitle: "卷快照类" +weight: 8900 +--- + +卷快照类(Volume Snapshot Class)用于定义卷快照的存储种类。本教程演示如何创建和使用卷快照类。 + +## 准备工作 + +- 您需要创建一个企业空间、一个项目和一个用户(例如 `project-regular`)。该用户必须已邀请至该项目,并具有 `operator` 角色。有关更多信息,请参阅[创建企业空间、项目、用户和角色](../../quick-start/create-workspace-and-project/)。 + +- 您需要确保 Kubernetes 版本为 1.17 或更新版本。 + +- 您需要确保底层存储插件支持快照。 + +## 操作步骤 + +1. 以 `project-regular` 用户登录 KubeSphere Web 控制台并进入项目。在左侧导航栏选择**存储**下的**卷快照类**。 + +2. 在右侧的**卷快照类**页面,点击**创建**。 + +3. 在弹出的对话框中,设置卷快照类的名称,点击**下一步**。您也可以设置别名和添加描述信息。 + + +4. 在**卷快照类设置**页签,选择供应者和删除策略。 + + 删除策略目前支持以下两种: + + - Delete:底层的存储快照会和 VolumeSnapshotContent 对象一起删除。 + - Retain:底层快照和 VolumeSnapshotContent 对象都会被保留。 + diff --git a/content/zh/docs/v3.4/cluster-administration/storageclass.md b/content/zh/docs/v3.4/cluster-administration/storageclass.md new file mode 100644 index 000000000..27b975334 --- /dev/null +++ b/content/zh/docs/v3.4/cluster-administration/storageclass.md @@ -0,0 +1,195 @@ +--- +title: "存储类" +keywords: "存储, 持久卷声明, PV, PVC, 存储类, CSI, Ceph RBD, GlusterFS, 青云QingCloud, " +description: "了解 PV、PVC 和存储类的基本概念,并演示如何在 KubeSphere 中管理存储类和 PVC。" +linkTitle: "存储类" +weight: 8800 +--- + +本教程演示集群管理员如何管理 KubeSphere 中的存储类。 + +## 介绍 + +PV 是集群中的一块存储,可以由管理员事先供应,或者使用存储类来动态供应。和卷 (Volume) 一样,PV 通过卷插件实现,但是它的生命周期独立于任何使用该 PV 的容器组。PV 可以[静态](https://kubernetes.io/zh/docs/concepts/storage/persistent-volumes/#static)供应或[动态](https://kubernetes.io/zh/docs/concepts/storage/persistent-volumes/#dynamic)供应。 + +PVC 是用户对存储的请求。它与容器组类似,容器组会消耗节点资源,而 PVC 消耗 PV 资源。 + +KubeSphere 支持基于存储类的[动态卷供应](https://kubernetes.io/zh/docs/concepts/storage/dynamic-provisioning/),以创建 PV。 + +[存储类](https://kubernetes.io/zh/docs/concepts/storage/storage-classes/)是管理员描述其提供的存储类型的一种方式。不同的类型可能会映射到不同的服务质量等级或备份策略,或由集群管理员制定的任意策略。每个存储类都有一个 Provisioner,用于决定使用哪个卷插件来供应 PV。该字段必须指定。有关使用哪一个值,请参阅 [Kubernetes 官方文档](https://kubernetes.io/zh/docs/concepts/storage/storage-classes/#provisioner)或与您的存储管理员确认。 + +下表总结了各种 Provisioner(存储系统)常用的卷插件。 + +| 类型 | 描述信息 | +| -------------------- | ------------------------------------------------------------ | +| In-tree | 内置并作为 Kubernetes 的一部分运行,例如 [RBD](https://kubernetes.io/zh/docs/concepts/storage/storage-classes/#ceph-rbd) 和 [GlusterFS](https://kubernetes.io/zh/docs/concepts/storage/storage-classes/#glusterfs)。有关此类插件的更多信息,请参见 [Provisioner](https://kubernetes.io/zh/docs/concepts/storage/storage-classes/#provisioner)。 | +| External-provisioner | 独立于 Kubernetes 部署,但运行上类似于树内 (in-tree) 插件,例如 [NFS 客户端](https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/)。有关此类插件的更多信息,请参见 [External Storage](https://github.com/kubernetes-retired/external-storage)。 | +| CSI | 容器存储接口,一种将存储资源暴露给 CO(例如 Kubernetes)上的工作负载的标准,例如 [QingCloud-CSI](https://github.com/yunify/qingcloud-csi) 和 [Ceph-CSI](https://github.com/ceph/ceph-csi)。有关此类插件的更多信息,请参见 [Drivers](https://kubernetes-csi.github.io/docs/drivers.html)。 | + +## 准备工作 + +您需要一个拥有**集群管理**权限的帐户。例如,您可以直接以 `admin` 身份登录控制台,或者创建一个拥有该权限的新角色并将它分配至一个用户。 + +## 创建存储类 + +1. 点击左上角的**平台管理**,然后选择**集群管理**。 + +2. 如果您启用了[多集群功能](../../multicluster-management/)并导入了成员集群,可以选择一个特定集群。如果您未启用该功能,请直接参考下一步。 + +3. 在**集群管理**页面,点击**存储 > 存储类**。 + +4. 在右侧的**存储类**页面,点击**创建**。 + +5. 在弹出**创建存储类**对话框,输入存储类名称,点击**下一步**。您也可以设置别名和添加描述信息。 + +6. 在**存储系统**页签,选择一个存储系统,点击**下一步**。 + + 在 KubeSphere 中,您可以直接为 `QingCloud-CSI`、`GlusterFS` 和 `Ceph RBD` 创建存储类。您也可以为其他存储系统创建自定义存储类。 + +7. 在**存储类设置**页签,设置相关参数,点击**创建**以创建存储类。参数设置项随您选择的存储系统而异。 + +## 设置存储类 + +下表列举了几个通用存储类设置项。 + + +| 参数 | 描述信息 | +| :---- | :---- | +| 卷扩展 | 在 YAML 文件中由 `allowVolumeExpansion` 指定。 | +| 回收机制 | 在 YAML 文件中由 `reclaimPolicy` 指定。 | +| 访问模式 | 在 YAML 文件中由 `.metadata.annotations.storageclass.kubesphere.io/supported-access-modes` 指定。默认 `ReadWriteOnce`、`ReadOnlyMany` 和 `ReadWriteMany` 全选。 | +| 供应者 | 在 YAML 文件中由 `provisioner` 指定。如果您使用 [NFS-Subdir 的 Chart](https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/) 来安装存储类型,可以设为 `cluster.local/nfs-subdir-external-provisioner/`。 | +| 卷绑定模式 | 在 YAML 文件中由 `volumeBindingMode` 指定。它决定使用何种绑定模式。**延迟绑定**即持久性声明创建后,当使用此持久性声明的容器组被创建时,此持久性声明才绑定到一个持久卷。**立即绑定**即持久卷声明创建后,立即绑定到一个持久卷。 | +### QingCloud CSI + +QingCloud CSI 是 Kubernetes 上的 CSI 插件,供青云QingCloud 存储服务使用。KubeSphere 控制台上可以创建 QingCloud CSI 的存储类。 + +#### 准备工作 + +- QingCloud CSI 在青云QingCloud 的公有云和私有云上均可使用。因此,请确保将 KubeSphere 安装至二者之一,以便可以使用云存储服务。 +- KubeSphere 集群上已经安装 QingCloud CSI 插件。有关更多信息,请参见[安装 QingCloud CSI](https://github.com/yunify/qingcloud-csi#installation)。 + +#### 参数设置项 + + +| 参数 | 描述信息 | +| :---- | :---- | +| 类型 | 在青云云平台中,0 代表性能型硬盘;2 代表容量型硬盘;3 代表超高性能型硬盘;5 代表企业级分布式 SAN(NeonSAN)型硬盘;100 代表基础型硬盘;200 代表 SSD 企业型硬盘。 | +| 容量上限 | 卷容量上限。 | +| 步长 | 卷的增量值。 | +| 容量下限 | 卷容量下限。 | +| 文件系统类型 | 支持 ext3、ext4 和 XFS。默认类型为 ext4。 | +| 标签 | 为卷添加标签。使用半角逗号(,)分隔多个标签。 | + +有关存储类参数的更多信息,请参见 [QingCloud CSI 用户指南](https://github.com/yunify/qingcloud-csi/blob/master/docs/user-guide.md#set-storage-class)。 + +### GlusterFS + +GlusterFS 是 Kubernetes 上的一种树内存储插件,即您不需要额外安装卷插件。 + +#### 准备工作 + +已经安装 GlusterFS 存储系统。有关更多信息,请参见 [GlusterFS 安装文档](https://www.gluster.org/install/)。 + +#### 参数设置项 + + +| 参数 | 描述 | +| :---- | :---- | +| REST URL | 供应卷的 Heketi REST URL,例如,<Heketi 服务集群 IP 地址>:<Heketi 服务端口号>。 | +| 集群 ID | Gluster 集群 ID。 | +| 开启 REST 认证 | Gluster 启用对 REST 服务器的认证。 | +| REST 用户 | Gluster REST 服务或 Heketi 服务的用户名。 | +| 密钥所属项目 | Heketi 用户密钥的所属项目。 | +| 密钥名称 | Heketi 用户密钥的名称。 | +| GID 最小值 | 卷的 GID 最小值。 | +| GID 最大值 | 卷的 GID 最大值。 | +| 卷类型 | 卷的类型。该值可为 none,replicate:<副本数>,或 disperse:<数据>:<冗余数>。如果未设置该值,则默认卷类型为 replicate:3。 | + +有关存储类参数的更多信息,请参见 [Kubernetes 文档中的 GlusterFS](https://kubernetes.io/zh/docs/concepts/storage/storage-classes/#glusterfs)。 + +### Ceph RBD + +Ceph RBD 也是 Kubernetes 上的一种树内存储插件,即 Kubernetes 中已经安装该卷插件,但您必须在创建 Ceph RBD 的存储类之前安装其存储服务器。 + +由于 **hyperkube** 镜像[自 1.17 版本开始已被弃用](https://github.com/kubernetes/kubernetes/pull/85094),树内 Ceph RBD 可能无法在不使用 **hyperkube** 的 Kubernetes 上运行。不过,您可以使用 [RBD Provisioner](https://github.com/kubernetes-retired/external-storage/tree/master/ceph/rbd) 作为替代,它的格式与树内 Ceph RBD 相同。唯一不同的参数是 `provisioner`(即 KubeSphere 控制台上的**存储系统**)。如果您想使用 RBD Provisioner,`provisioner` 的值必须为 `ceph.com/rbd`(在**存储系统**中输入该值,如下图所示)。如果您使用树内 Ceph RBD,该值必须为 `kubernetes.io/rbd`。 + +#### 准备工作 + +- 已经安装 Ceph 服务器。有关更多信息,请参见 [Ceph 安装文档](https://docs.ceph.com/en/latest/install/)。 +- 如果您选择使用 RBD Provisioner,请安装插件。社区开发者提供了 [RBD Provisioner 的 Chart](https://github.com/kubesphere/helm-charts/tree/master/src/test/rbd-provisioner),您可以通过 Helm 用这些 Chart 安装 RBD Provisioner。 + +#### 参数设置项 + +| 参数 | 描述 | +| :---- | :---- | +| MONITORS| Ceph 集群 Monitors 的 IP 地址。 | +| ADMINID| Ceph 集群能够创建卷的用户 ID。 | +| ADMINSECRETNAME| `adminId` 的密钥名称。 | +| ADMINSECRETNAMESPACE| `adminSecret` 所在的项目。 | +| POOL | Ceph RBD 的 Pool 名称。 | +| USERID | Ceph 集群能够挂载卷的用户 ID。 | +| USERSECRETNAME | `userId` 的密钥名称。 | +| USERSECRETNAMESPACE | `userSecret` 所在的项目。 | +| 文件系统类型 | 卷的文件系统类型。 | +| IMAGEFORMAT | Ceph 卷的选项。该值可为 `1` 或 `2`,选择 `2` 后需要填写 `imageFeatures`。 | +| IMAGEFEATURES| Ceph 集群的额外功能。仅当设置 `imageFormat` 为 `2` 时,才需要填写该值。 | + +有关存储类参数的更多信息,请参见 [Kubernetes 文档中的 Ceph RBD](https://kubernetes.io/zh/docs/concepts/storage/storage-classes/#ceph-rbd)。 + +### 自定义存储类 + +如果 KubeSphere 不直接支持您的存储系统,您可以为存储系统创建自定义存储类型。下面的示例向您演示了如何在 KubeSphere 控制台上为 NFS 创建存储类。 + +#### NFS 介绍 + +NFS(网络文件系统)广泛用于带有 [nfs-subdir-external-provisioner](https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/)(External-Provisioner 卷插件)的 Kubernetes。您可以点击**自定义**来创建 NFS-Subdir 的存储类型。 + +{{< notice note >}} + +NFS 与部分应用不兼容(例如 Prometheus),可能会导致容器组创建失败。如果确实需要在生产环境中使用 NFS,请确保您了解相关风险或咨询 KubeSphere 技术支持 support@kubesphere.cloud。 + +{{}} + +#### 准备工作 + +- 有一个可用的 NFS 服务器。 +- 已经安装卷插件 NFS-Subdir。社区开发者提供了 [NFS-SUBDIR 的 Chart](https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/),您可以通过 Helm 用这些 Chart 安装 NFS-SUBDIR。 +#### 参数设置项 + +| 键 | 描述信息 | 值 | +| :---- | :---- | :----| +| archiveOnDelete | 删除时存档 PVC | `true` | + +## 管理存储类 + +创建存储类型后,点击此存储类型的名称前往其详情页。在详情页点击**编辑 YAML** 来编辑此存储类型的清单文件。您也可以点击**更多操作**并在下拉菜单中选择一项操作: + +- **设为默认存储类**:将此存储类设为集群的默认存储类。一个 KubeSphere 集群中仅允许设置一个默认存储类。 +- **设置授权规则**:只允许特定项目和企业空间使用该存储类。 +- **设置卷操作**:管理持久卷声明,包括**卷克隆**、**卷快照创建**、**卷扩容**。开启任意功能前,请联系系统管理员确认存储系统是否支持这些功能。 +- **设置自动扩展**:设置系统在卷剩余空间低于阈值时自动扩容卷。您也可以开启是否自动重启工作负载。 +- **删除**:删除此存储类。 + +在**持久卷声明**页签上,查看与此存储类相关联的持久卷声明。 diff --git a/content/zh/docs/v3.4/devops-user-guide/_index.md b/content/zh/docs/v3.4/devops-user-guide/_index.md new file mode 100644 index 000000000..a2e254051 --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/_index.md @@ -0,0 +1,14 @@ +--- +title: "DevOps 用户指南" +description: "开始使用 KubeSphere DevOps 项目" +layout: "second" + +linkTitle: "DevOps 用户指南" +weight: 11000 + +icon: "/images/docs/v3.3/docs.svg" +--- + +您可以使用 KubeSphere DevOps 系统在 Kubernetes 集群上部署和管理 CI/CD 任务以及相关的工作负载。本章演示如何在 DevOps 项目中进行管理和操作,包括运行流水线、创建凭证和集成工具等等。 + +您安装 DevOps 组件时,会自动部署 Jenkins。您可以在 KubeSphere 中像以前一样通过 Jenkinsfile 构建流水线,保持一致的用户体验。此外,KubeSphere 还提供图形编辑面板,可以将整个流程可视化,为您直观地呈现流水线在每个阶段的运行状态。 diff --git a/content/zh/docs/v3.4/devops-user-guide/devops-overview/_index.md b/content/zh/docs/v3.4/devops-user-guide/devops-overview/_index.md new file mode 100644 index 000000000..a7a86a172 --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/devops-overview/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "DevOps 项目" +weight: 11100 + +_build: + render: false +--- diff --git a/content/zh/docs/v3.4/devops-user-guide/devops-overview/devops-project-management.md b/content/zh/docs/v3.4/devops-user-guide/devops-overview/devops-project-management.md new file mode 100644 index 000000000..c1c719faa --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/devops-overview/devops-project-management.md @@ -0,0 +1,49 @@ +--- +title: "DevOps 项目管理" +keywords: 'Kubernetes, KubeSphere, DevOps, Jenkins' +description: '创建并管理 DevOps 项目,了解 DevOps 项目中的各项基本元素。' +linkTitle: "DevOps 项目管理" +weight: 11120 +--- + +本教程演示如何创建和管理 DevOps 项目。 + +## 准备工作 + +- 您需要创建一个企业空间和一个用户 (`project-admin`),必须邀请该用户至该企业空间并赋予 `workspace-self-provisioner` 角色。有关更多信息,请参考[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要启用 [KubeSphere DevOps 系统](../../../pluggable-components/devops/)。 + +## 创建 DevOps 项目 + +1. 以 `project-admin` 身份登录 KubeSphere 控制台,转到 **DevOps 项目**,然后点击**创建**。 + +2. 输入 DevOps 项目的基本信息,然后点击**确定**。 + + - **名称**:此 DevOps 项目的简明名称,便于用户识别,例如 `demo-devops`。 + - **别名**:此 DevOps 项目的别名。 + - **描述信息**:此 DevOps 项目的简要介绍。 + - **集群设置**:在当前版本中,DevOps 项目无法同时跨多个集群运行。如果您已启用[多集群功能](../../../multicluster-management/),则必须选择一个集群来运行 DevOps 项目。 + +3. DevOps 项目创建后,会显示在下图所示的列表中。 + +## 查看 DevOps 项目 + +点击刚刚创建的 DevOps 项目,转到其详情页面。具有不同权限的租户可以在 DevOps 项目中执行各种任务,包括创建 CI/CD 流水线和凭证以及管理帐户和角色。 + +### 流水线 + +流水线是一系列插件的集合,使您可以持续地测试和构建代码。流水线将持续集成 (CI) 和持续交付 (CD) 进行结合,提供精简的工作流,使您的代码可以自动交付给任何目标。 + +### 凭证 + +具有所需权限的 DevOps 项目用户可以为流水线配置凭证,以便与外部环境进行交互。用户在 DevOps 项目中添加凭证后,DevOps 项目就可以使用这些凭证与第三方应用程序(例如 GitHub、GitLab 和 Docker Hub)进行交互。有关更多信息,请参见[凭证管理](../../how-to-use/devops-settings/credential-management/)。 + +### 成员和角色 + +与项目相似,DevOps 项目也需要为用户授予不同的角色,然后用户才能在 DevOps 项目中工作。项目管理员(例如 `project-admin`)负责邀请租户并授予他们不同的角色。有关更多信息,请参见[角色和成员管理](../../how-to-use/devops-settings/role-and-member-management/)。 + +## 编辑或删除 DevOps 项目 + +1. 点击 **DevOps 项目设置**下的**基本信息**,您可以查看当前 DevOps 项目的概述,包括项目角色和项目成员的数量、项目名称和项目创建者。 + +2. 点击右侧的 **DevOps 管理**,您可以编辑此 DevOps 项目的基本信息或删除 DevOps 项目。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/devops-user-guide/devops-overview/overview.md b/content/zh/docs/v3.4/devops-user-guide/devops-overview/overview.md new file mode 100644 index 000000000..e5e5224b0 --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/devops-overview/overview.md @@ -0,0 +1,46 @@ +--- +title: "概述" +keywords: 'Kubernetes, KubeSphere, DevOps, 概述' +description: '了解 DevOps 的基本知识。' +linkTitle: "概述" +weight: 11110 +--- + +DevOps 是一系列做法和工具,可以使 IT 和软件开发团队之间的流程实现自动化。其中,随着敏捷软件开发日趋流行,持续集成 (CI) 和持续交付 (CD) 已经成为该领域一个理想的解决方案。在 CI/CD 工作流中,每次集成都通过自动化构建来验证,包括编码、发布和测试,从而帮助开发者提前发现集成错误,团队也可以快速、安全、可靠地将内部软件交付到生产环境。 + +不过,传统的 Jenkins Controller-Agent 架构(即多个 Agent 为一个 Controller 工作)有以下不足。 + +- 如果 Controller 宕机,整个 CI/CD 流水线会崩溃。 +- 资源分配不均衡,一些 Agent 的流水线任务 (Job) 出现排队等待,而其他 Agent 处于空闲状态。 +- 不同的 Agent 可能配置环境不同,并需要使用不同的编码语言。这种差异会给管理和维护带来不便。 + +## 了解 KubeSphere DevOps + +KubeSphere DevOps 项目支持源代码管理工具,例如 GitHub、Git 和 SVN。用户可以通过图形编辑面板 (Jenkinsfile out of SCM) 构建 CI/CD 流水线,或者从代码仓库 (Jenkinsfile in SCM) 创建基于 Jenkinsfile 的流水线。 + +### 功能 + +KubeSphere DevOps 系统为您提供以下功能: + +- 独立的 DevOps 项目,提供访问可控的 CI/CD 流水线。 +- 开箱即用的 DevOps 功能,无需复杂的 Jenkins 配置。 +- 支持 [Source-to-image (S2I)](../../../project-user-guide/image-builder/source-to-image/) 和 [Binary-to-image (B2I)](../../../project-user-guide/image-builder/binary-to-image/),快速交付镜像。 +- [基于 Jenkinsfile 的流水线](../../../devops-user-guide//how-to-use/pipelines/create-a-pipeline-using-jenkinsfile),提供一致的用户体验,支持多个代码仓库。 +- [图形编辑面板](../../../devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-graphical-editing-panel/),用于创建流水线,学习成本低。 +- 强大的工具集成机制,例如 [SonarQube](../../../devops-user-guide/how-to-integrate/sonarqube/),用于代码质量检查。 + +### KubeSphere CI/CD 流水线工作流 + +KubeSphere CI/CD 流水线基于底层 Kubernetes Jenkins Agent 而运行。这些 Jenkins Agent 可以动态扩缩,即根据任务状态进行动态供应或释放。Jenkins Controller 和 Agent 以 Pod 的形式运行在 KubeSphere 节点上。Controller 运行在其中一个节点上,其配置数据存储在一个持久卷声明中。Agent 运行在各个节点上,但可能不会一直处于运行状态,而是根据需求动态创建并自动删除。 + +当 Jenkins Controller 收到构建请求,会根据标签动态创建运行在 Pod 中的 Jenkins Agent 并注册到 Controller 上。当 Agent 运行完任务后,将会被释放,相关的 Pod 也会被删除。 + +### 动态供应 Jenkins Agent + +动态供应 Jenkins Agent 有以下优势: + +**资源分配合理**:KubeSphere 动态分配已创建的 Agent 至空闲节点,避免因单个节点资源利用率高而导致任务排队等待。 + +**高可扩缩性**:当 KubeSphere 集群因资源不足而导致任务长时间排队等待时,您可以向集群新增节点。 + +**高可用性**:当 Jenkins Controller 故障时,KubeSphere 会自动创建一个新的 Jenkins Controller 容器,并将持久卷挂载至新创建的容器,保证数据不会丢失,从而实现集群高可用。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/devops-user-guide/examples/_index.md b/content/zh/docs/v3.4/devops-user-guide/examples/_index.md new file mode 100644 index 000000000..aa3584278 --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/examples/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "示例" +weight: 11400 + +_build: + render: false +--- diff --git a/content/zh/docs/v3.4/devops-user-guide/examples/a-maven-project.md b/content/zh/docs/v3.4/devops-user-guide/examples/a-maven-project.md new file mode 100644 index 000000000..e8697c978 --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/examples/a-maven-project.md @@ -0,0 +1,162 @@ +--- +title: "构建和部署 Maven 项目" +keywords: 'Kubernetes, Docker, DevOps, Jenkins, Maven' +description: '学习如何使用 KubeSphere 流水线构建并部署 Maven 项目。' +linkTitle: "构建和部署 Maven 项目" +weight: 11430 +--- + +## 准备工作 + +- 您需要[启用 KubeSphere DevOps 系统](../../../pluggable-components/devops/)。 +- 您需要有一个 [Docker Hub](http://www.dockerhub.com/) 帐户。 +- 您需要创建一个企业空间、一个 DevOps 项目和一个用户,并需要邀请该用户至 DevOps 项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## Maven 项目的工作流 + +KubeSphere DevOps 中有针对 Maven 项目的工作流,如下图所示,它使用 Jenkins 流水线来构建和部署 Maven 项目。所有步骤均在流水线中进行定义。 + +![maven-project-jenkins](/images/docs/v3.3/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/maven-project-jenkins.png) + +首先,Jenkins Master 创建一个 Pod 来运行流水线。Kubernetes 创建 Pod 作为 Jenkins Master 的 Agent,该 Pod 会在流水线完成之后销毁。主要流程包括克隆代码、构建和推送镜像以及部署工作负载。 + +## Jenkins 中的默认配置 + +### Maven 版本 + +在 Maven 构建器 (Builder) 容器中执行以下命令获取版本信息。 + +```bash +mvn --version + +Apache Maven 3.5.3 (3383c37e1f9e9b3bc3df5050c29c8aff9f295297; 2018-02-24T19:49:05Z) +Maven home: /opt/apache-maven-3.5.3 +Java version: 1.8.0_232, vendor: Oracle Corporation +Java home: /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.232.b09-0.el7_7.i386/jre +Default locale: en_US, platform encoding: UTF-8 +``` + +### Maven 缓存 + +Jenkins Agent 通过节点上的 Docker 存储卷 (Volume) 挂载目录。流水线可以缓存一些特殊目录,例如 `/root/.m2`,这些特殊目录用于 Maven 构建并在 KubeSphere DevOps 中用作 Maven 工具的默认缓存目录,以便依赖项包下载和缓存到节点上。 + +### Jenkins Agent 中的全局 Maven 设置 + +Maven 设置的默认文件路径是 `maven`,配置文件路径是 `/opt/apache-maven-3.5.3/conf/settings.xml`。执行以下命令获取 Maven 的设置内容。 + +```bash +kubectl get cm -n kubesphere-devops-worker ks-devops-agent -o yaml +``` + +### Maven Pod 的网络 + +具有 `maven` 标签的 Pod 使用 docker-in-docker 网络来运行流水线,即节点中的 `/var/run/docker.sock` 被挂载至该 Maven 容器。 + +## Maven 流水线示例 + +### Maven 项目准备工作 + +- 确保您在开发设备上成功构建 Maven 项目。 +- 添加 Dockerfile 至项目仓库以构建镜像。有关更多信息,请参考 。 +- 添加 YAML 文件至项目仓库以部署工作负载。有关更多信息,请参考 。如果有多个不同环境,您需要准备多个部署文件。 + +### 创建凭证 + +| 凭证 ID | 类型 | 用途 | +| --------------- | ---------- | --------------------- | +| dockerhub-id | 用户名和密码 | 仓库,例如 Docker Hub | +| demo-kubeconfig | kubeconfig | 部署工作负载 | + +有关详细信息,请参考[凭证管理](../../how-to-use/devops-settings/credential-management/)。 + +### 为工作负载创建一个项目 + +在本示例中,所有工作负载都部署在 `kubesphere-sample-dev` 项目中。您必须事先创建 `kubesphere-sample-dev` 项目。 + +### 为 Maven 项目创建一个流水线 + +1. 在您的 DevOps 项目中,转到**流水线**页面并点击**创建**,创建一个名为 `maven` 的流水线。有关更多信息,请参见[使用图形编辑面板创建流水线](../../how-to-use/pipelines/create-a-pipeline-using-graphical-editing-panel/)。 + +2. 转到该流水线的详情页面,点击**编辑 Jenkinsfile**。 + +3. 复制粘贴以下内容至弹出窗口。您必须将 `DOCKERHUB_NAMESPACE` 的值替换为您自己的值,编辑完成后点击**确定**保存 Jenkinsfile。 + + ```groovy + pipeline { + agent { + label 'maven' + } + + parameters { + string(name:'TAG_NAME',defaultValue: '',description:'') + } + + environment { + DOCKER_CREDENTIAL_ID = 'dockerhub-id' + KUBECONFIG_CREDENTIAL_ID = 'demo-kubeconfig' + REGISTRY = 'docker.io' + // 需要更改为您自己的 Docker Hub Namespace + DOCKERHUB_NAMESPACE = 'Docker Hub Namespace' + APP_NAME = 'devops-maven-sample' + BRANCH_NAME = 'dev' + PROJECT_NAME = 'kubesphere-sample-dev' + } + + stages { + stage ('checkout scm') { + steps { + // 下方所用的 GitHub 仓库仅用作体验功能的示例,请避免向该仓库提交包含测试性改动的 PR + git branch: 'master', url: "https://github.com/kubesphere/devops-maven-sample.git" + } + } + + stage ('unit test') { + steps { + container ('maven') { + sh 'mvn clean test' + } + } + } + + stage ('build & push') { + steps { + container ('maven') { + sh 'mvn -Dmaven.test.skip=true clean package' + sh 'docker build -f Dockerfile-online -t $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER .' + withCredentials([usernamePassword(passwordVariable : 'DOCKER_PASSWORD' ,usernameVariable : 'DOCKER_USERNAME' ,credentialsId : "$DOCKER_CREDENTIAL_ID" ,)]) { + sh 'echo "$DOCKER_PASSWORD" | docker login $REGISTRY -u "$DOCKER_USERNAME" --password-stdin' + sh 'docker push $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER' + } + } + } + } + + stage('deploy to dev') { + steps { + container ('maven') { + withCredentials([ + kubeconfigFile( + credentialsId: env.KUBECONFIG_CREDENTIAL_ID, + variable: 'KUBECONFIG') + ]) { + sh 'envsubst < deploy/all-in-one/devops-sample.yaml | kubectl apply -f -' + } + } + } + } + } + } + ``` + +4. 您可以看到图形编辑面板上已自动创建阶段和步骤。 + +### 运行和测试 + +1. 点击**运行**并在弹出对话框的 **TAG_NAME** 中输入 `v1`,然后点击**确定**运行流水线。 + +2. 待流水线成功运行,您可以前往**运行记录**选项卡查看其详情。 + +3. 在 `kubesphere-sample-dev` 项目中,已创建新的工作负载。 + +4. 在**服务**页面,查看服务 (Service) 的外部访问信息。 + diff --git a/content/zh/docs/v3.4/devops-user-guide/examples/create-multi-cluster-pipeline.md b/content/zh/docs/v3.4/devops-user-guide/examples/create-multi-cluster-pipeline.md new file mode 100644 index 000000000..1c2b83adc --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/examples/create-multi-cluster-pipeline.md @@ -0,0 +1,246 @@ +--- +title: "创建多集群流水线" +keywords: 'KubeSphere, Kubernetes, 多集群, 流水线, DevOps' +description: '学习如何在 Kubesphere 上创建多集群流水线。' +linkTitle: "创建多集群流水线" +weight: 11440 +--- + +由于云场上提供不同的托管 Kubernetes 服务,DevOps 流水线必须处理涉及多个 Kubernetes 集群的使用场景。 + +本教程将演示如何在 KubeSphere 创建一个多集群流水线。 + +## 准备工作 + +- 准备三个已安装 KubeSphere 的 Kubernetes 集群,选择一个集群作为主集群,其余两个作为成员集群。更多关于集群角色与如何在 KubeSphere 上启用多集群环境,请参见[多集群管理](../../../multicluster-management/)。 +- 将成员集群设置为[公开集群](../../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/#将集群设置为公开集群)。或者,您可以[在创建企业空间之后设置集群可见性](../../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/#在创建企业空间后设置集群可见性)。 +- 在主集群上[启用 KubeSphere DevOps 系统](../../../pluggable-components/devops/)。 +- 整合 SonarQube 进入流水线。有关更多信息,请参见[将 SonarQube 集成到流水线](../../how-to-integrate/sonarqube/)。 +- 在主集群创建四个帐户: `ws-manager`、`ws-admin`、`project-admin` 和 `project-regular`,然后授予他们不同的角色。有关详细信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/#step-1-create-an-account)。 + +## 工作流程概述 + +本教程使用三个集群作为工作流中三个独立的环境。如下图所示: + +![use-case-for-multi-cluster](/images/docs/v3.3/devops-user-guide/examples/create-multi-cluster-pipeline/use-case-for-multi-cluster.png) + +三个集群分别用于开发,测试和生产。当代码被提交至 Git 仓库,就会触发流水线并执行以下几个阶段 — `单元测试`,`SonarQube 分析`,`构建 & 推送` 和 `部署到开发集群`。开发者使用开发集群进行自我测试和验证。当开发者批准后,流水线就会进入到下一个阶段 `部署到测试集群` 进行更严格的验证。最后,流水线在获得必要的批准之后,将会进入下一个阶段 `部署到生产集群`,并向外提供服务。 + +## 动手实验 + +### 步骤 1:准备集群 + +下图展示每个集群对应的角色。 + +| 集群名称 | 集群角色 | 用途 | +| -------- | ----------- | ---- | +| host | 主集群 | 测试 | +| shire | 成员集群 | 生产 | +| rohan | 成员集群 | 开发 | + +{{< notice note >}} + +这些 Kubernetes 集群可以被托管至不同的云厂商,也可以使用不同的 Kubernetes 版本。针对 KubeSphere 3.3 推荐的 Kubernetes 版本:v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如需使用边缘节点,推荐安装 v1.21.x。 + +{{}} + +### 步骤 2:创建企业空间 + +1. 使用 `ws-manager` 帐户登录主集群的 Web 控制台。在**企业空间**页面中,点击**创建**。 + +2. 在**基本信息**页面中,将企业空间命名为 `devops-multicluster`,选择 `ws-admin` 为**管理员**,然后点击**下一步**。 + +3. 在**集群设置**页面,选择所有集群(总共三个集群),然后点击**创建**。 + +4. 创建的企业空间会显示在列表。您需要登出控制台并以 `ws-admin` 身份重新登录,以邀请 `project-admin` 与 `project-regular` 至企业空间,然后分别授予他们 `work-space-self-provisioner` 和 `workspace-viwer` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/#step-2-create-a-workspace)。 + +### 步骤 3:创建 DevOps 项目 + +1. 您需要登出控制台,并以 `project-admin` 身份重新登录。转到 **DevOps 项目**页面并点击**创建**。 + +2. 在出现的对话框中,输入 `mulicluster-demo` 作为**名称**,在**集群设置**中选择 **host**,然后点击**确定**。 + + {{< notice note >}} + + 下拉列表中只有启用 DevOps 组件的集群可用。 + + {{}} + +3. 创建的 DevOps 项目将显示在列表中。请确保邀请用户 `project-regular` 至这个项目,并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/#step-1-create-an-account)。 + +### 步骤 4:在集群上创建项目 + +提前创建如下表所示的项目。请确保邀请 `project-regular` 用户到这些项目中,并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/#step-1-create-an-account)。 + +| 集群名 | 用途 | 项目名 | +| ------ | ---- | ---------------------- | +| host | 测试 | kubesphere-sample-prod | +| shire | 生产 | kubesphere-sample-prod | +| rohan | 开发 | kubesphere-sample-dev | + +### 步骤 5:创建凭证 + +1. 登出控制台,以 `project-regular` 身份重新登录。在 **DevOps 项目**页面,点击 DevOps 项目 `multicluster-demo`。 + +2. 在**凭证**页面,您需要创建如下表所示的凭证。有关如何创建凭证的更多信息,请参见[凭证管理](../../how-to-use/devops-settings/credential-management/#create-credentials)和[使用 Jenkinsfile 创建流水线](../../how-to-use/pipelines/create-a-pipeline-using-jenkinsfile/#step-1-create-credentials)。 + +| 凭证 ID | 类型 | 应用场所 | +| ------------ | ---------- | -------------------- | +| host | kubeconfig | 用于主集群测试 | +| shire | kubeconfig | 用于成员集群生产 | +| rohan | kubeconfig | 用于成员集群开发 | +| dockerhub-id | 用户名和密码 | Docker Hub | +| sonar-token | 访问令牌 | SonarQube | + +{{< notice note >}} + +在创建 kubeconfig 凭证 `shire` 和 `rohan` 时,必须手动输入成员集群的 kubeconfig。确保主集群可以访问成员集群的 API Server 地址。 + +{{}} + +3. 共创建五个凭证。 + +### 步骤 6:创建流水线 + +1. 在**流水线**页面点击**创建**。在显示的对话框中,输入 `build-and-deploy-application` 作为**名称**然后点击**下一步**。 + +2. 在**高级设置中**选项卡中,点击**创建**即使用默认配置。 + +3. 列表会展示被创建的流水线,点击流水线名称进入详情页面。 + +4. 点击**编辑 Jenkinsfile**,复制和粘贴以下内容。请确保将 DOCKERHUB_NAMESPACE 的值替换为您自己的值,然后点击**确定**。 + + ```groovy + pipeline { + agent { + node { + label 'maven' + } + + } + parameters { + string(name:'BRANCH_NAME',defaultValue: 'master',description:'') + } + environment { + DOCKER_CREDENTIAL_ID = 'dockerhub-id' + PROD_KUBECONFIG_CREDENTIAL_ID = 'shire' + TEST_KUBECONFIG_CREDENTIAL_ID = 'host' + DEV_KUBECONFIG_CREDENTIAL_ID = 'rohan' + + REGISTRY = 'docker.io' + DOCKERHUB_NAMESPACE = 'your Docker Hub account ID' + APP_NAME = 'devops-maven-sample' + SONAR_CREDENTIAL_ID = 'sonar-token' + TAG_NAME = "SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER" + } + stages { + stage('checkout') { + steps { + container('maven') { + git branch: 'master', url: 'https://github.com/kubesphere/devops-maven-sample.git' + } + } + } + stage('unit test') { + steps { + container('maven') { + sh 'mvn clean test' + } + } + } + stage('sonarqube analysis') { + steps { + container('maven') { + withCredentials([string(credentialsId: "$SONAR_CREDENTIAL_ID", variable: 'SONAR_TOKEN')]) { + withSonarQubeEnv('sonar') { + sh "mvn sonar:sonar -Dsonar.login=$SONAR_TOKEN" + } + } + } + } + } + stage('build & push') { + steps { + container('maven') { + sh 'mvn -Dmaven.test.skip=true clean package' + sh 'docker build -f Dockerfile-online -t $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER .' + withCredentials([usernamePassword(passwordVariable : 'DOCKER_PASSWORD' ,usernameVariable : 'DOCKER_USERNAME' ,credentialsId : "$DOCKER_CREDENTIAL_ID" ,)]) { + sh 'echo "$DOCKER_PASSWORD" | docker login $REGISTRY -u "$DOCKER_USERNAME" --password-stdin' + sh 'docker push $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER' + } + } + } + } + stage('push latest') { + steps { + container('maven') { + sh 'docker tag $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:latest ' + sh 'docker push $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:latest ' + } + } + } + stage('deploy to dev') { + steps { + container('maven') { + withCredentials([ + kubeconfigFile( + credentialsId: env.DEV_KUBECONFIG_CREDENTIAL_ID, + variable: 'KUBECONFIG') + ]) { + sh 'envsubst < deploy/dev-all-in-one/devops-sample.yaml | kubectl apply -f -' + } + } + } + } + stage('deploy to staging') { + steps { + container('maven') { + input(id: 'deploy-to-staging', message: 'deploy to staging?') + withCredentials([ + kubeconfigFile( + credentialsId: env.TEST_KUBECONFIG_CREDENTIAL_ID, + variable: 'KUBECONFIG') + ]) { + sh 'envsubst < deploy/prod-all-in-one/devops-sample.yaml | kubectl apply -f -' + } + } + } + } + stage('deploy to production') { + steps { + container('maven') { + input(id: 'deploy-to-production', message: 'deploy to production?') + withCredentials([ + kubeconfigFile( + credentialsId: env.PROD_KUBECONFIG_CREDENTIAL_ID, + variable: 'KUBECONFIG') + ]) { + sh 'envsubst < deploy/prod-all-in-one/devops-sample.yaml | kubectl apply -f -' + } + } + } + } + } + } + ``` + + {{< notice note >}} + + `mvn` 命令中的标志 `-o` 表示启用脱机模式。如果您在本地准备好了相关的 maven 依赖和缓存,可以保持脱机模式以节约时间。 + + {{}} + +5. 流水线创建之后,可以在图形编辑面板上查看流水线的阶段和步骤。 + +### 步骤7:运行流水线并查看结果 + +1. 点击**运行**按钮运行流水线。当流水线运行达到**部署到暂存**的阶段,将会暂停,因为资源已经被部署到集群进行开发。您需要手动点击**继续**两次,以将资源部署到测试集群 `host` 和生产集群 `shire`。 + +2. 一段时间过后,您可以看见流水线的状态展示为**成功**。 + +3. 在右上角点击**查看日志**,查看流水线运行日志。对于每个阶段,您可以点击显示日志以检查日志,同时日志可以被下载到本地进行进一步的分析。 + +4. 当流水线运行成功时,点击**代码检查**,通过 SonarQube 检查结果。 + +5. 转到**项目**页面,您可以通过从下拉列表中选择特定集群,来查看部署在各集群不同项目中的资源。 + diff --git a/content/zh/docs/v3.4/devops-user-guide/examples/go-project-pipeline.md b/content/zh/docs/v3.4/devops-user-guide/examples/go-project-pipeline.md new file mode 100644 index 000000000..65e4e0246 --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/examples/go-project-pipeline.md @@ -0,0 +1,134 @@ +--- +title: "构建和部署 Go 项目" +keywords: 'Kubernetes, docker, devops, jenkins, go, KubeSphere' +description: '学习如何使用 KubeSphere 流水线构建并部署 Go 项目。' +linkTitle: "构建和部署 Go 项目" +weight: 11410 +--- + +## 准备工作 + +- 您需要[启用 KubeSphere DevOps 系统](../../../pluggable-components/devops/)。 +- 您需要有一个 [Docker Hub](https://hub.docker.com/) 帐户。 +- 您需要创建一个企业空间、一个 DevOps 项目、一个项目和一个用户 (`project-regular`),需要邀请该用户至 DevOps 项目和项目中并赋予 `operator` 角色,以部署工作负载。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## 创建 Docker Hub 访问令牌 (Token) + +1. 登录 [Docker Hub](https://hub.docker.com/),点击右上角的帐户,并从菜单中选择 **Account Settings**。 + +2. 在左侧导航栏点击 **Security**,然后点击 **New Access Token**。 + +3. 在弹出的对话框中,输入令牌名称(`go-project-token`),点击 **Create**。 + +4. 点击 **Copy and Close** 并务必保存该访问令牌。 + +## 创建凭证 + +您需要在 KubeSphere 中为已创建的访问令牌创建凭证,以便流水线能够向 Docker Hub 推送镜像。此外,您还需要创建 kubeconfig 凭证,用于访问 Kubernetes 集群。 + +1. 以 `project-regular` 身份登录 KubeSphere Web 控制台。在您的 DevOps 项目中点击 **DevOps 项目设置**下的**凭证**,然后在**凭证**页面点击**创建**。 + +2. 在弹出的对话框中,设置**名称**,稍后会用于 Jenkinsfile 中,**类型**选择**用户名和密码**。**用户名**输入您的 Docker Hub 帐户名称,**密码/令牌**中输入刚刚创建的访问令牌。操作完成后,点击**确定**。 + + {{< notice tip >}} + +有关如何创建凭证的更多信息,请参见[凭证管理](../../../devops-user-guide/how-to-use/devops-settings/credential-management/)。 + + {{}} + +3. 再次点击**创建**,**类型**选择 **kubeconfig**。KubeSphere 会自动填充**内容**字段,即当前用户帐户的 kubeconfig。设置**名称**,然后点击**确定**。 + +## 创建流水线 + +创建完上述凭证后,您可以按照以下步骤使用示例 Jenkinsfile 创建流水线。 + +1. 要创建流水线,请在**流水线**页面点击**创建**。 + +2. 在弹出窗口中设置名称,然后点击**下一步**。 + +3. 在本教程中,您可以为所有字段使用默认值。在**高级设置**页面,点击**创建**。 + +## 编辑 Jenkinsfile + +1. 在流水线列表中,点击该流水线名称进入其详情页面。点击**编辑 Jenkinsfile** 定义一个 Jenkinsfile,流水线会基于它来运行。 + +2. 将以下所有内容复制并粘贴到弹出的对话框中,用作流水线的示例 Jenkinsfile。您必须将 `DOCKERHUB_USERNAME`、`DOCKERHUB_CREDENTIAL`、`KUBECONFIG_CREDENTIAL_ID` 和 `PROJECT_NAME` 的值替换成您自己的值。操作完成后,点击**确定**。 + + ```groovy + pipeline { + agent { + label 'go' + } + + environment { + // 您 Docker Hub 仓库的地址 + REGISTRY = 'docker.io' + // 您的 Docker Hub 用户名 + DOCKERHUB_USERNAME = 'Docker Hub Username' + // Docker 镜像名称 + APP_NAME = 'devops-go-sample' + // 'dockerhubid' 是您在 KubeSphere 用 Docker Hub 访问令牌创建的凭证 ID + DOCKERHUB_CREDENTIAL = credentials('dockerhubid') + // 您在 KubeSphere 创建的 kubeconfig 凭证 ID + KUBECONFIG_CREDENTIAL_ID = 'go' + // 您在 KubeSphere 创建的项目名称,不是 DevOps 项目名称 + PROJECT_NAME = 'devops-go' + } + + stages { + stage('docker login') { + steps{ + container ('go') { + sh 'echo $DOCKERHUB_CREDENTIAL_PSW | docker login -u $DOCKERHUB_CREDENTIAL_USR --password-stdin' + } + } + } + + stage('build & push') { + steps { + container ('go') { + sh 'git clone https://github.com/yuswift/devops-go-sample.git' + sh 'cd devops-go-sample && docker build -t $REGISTRY/$DOCKERHUB_USERNAME/$APP_NAME .' + sh 'docker push $REGISTRY/$DOCKERHUB_USERNAME/$APP_NAME' + } + } + } + + stage ('deploy app') { + steps { + container ('go') { + withCredentials([ + kubeconfigFile( + credentialsId: env.KUBECONFIG_CREDENTIAL_ID, + variable: 'KUBECONFIG') + ]) { + sh 'envsubst < devops-go-sample/manifest/deploy.yaml | kubectl apply -f -' + } + } + } + } + } + } + ``` + + {{< notice note >}} + +如果您的流水线成功运行,将会推送镜像至 Docker Hub。如果您使用 Harbor,则无法通过 Jenkins 凭证使用环境变量将参数传送到 `docker login -u`。这是因为每个 Harbor Robot 帐户的用户名都包含一个 `$` 字符,当用于环境变量时,Jenkins 会将其转换为 `$$`。[了解更多信息](https://number1.co.za/rancher-cannot-use-harbor-robot-account-imagepullbackoff-pull-access-denied/)。 + + {{}} + +## 运行流水线 + +1. Jenkinsfile 设置完成后,您可以在仪表板上查看图形面板。点击**运行**来运行流水线。 + +2. 在**运行记录**选项卡中,您可以查看流水线的状态。稍等片刻,流水线便会成功运行。 + + +## 验证结果 + +1. 如果流水线成功运行,则会在 Jenkinsfile 中指定的项目中创建一个**部署 (Deployment)**。 + +2. 查看已推送至 Docker Hub 的镜像。 + + + \ No newline at end of file diff --git a/content/zh/docs/v3.4/devops-user-guide/examples/multi-cluster-project-example.md b/content/zh/docs/v3.4/devops-user-guide/examples/multi-cluster-project-example.md new file mode 100644 index 000000000..202461475 --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/examples/multi-cluster-project-example.md @@ -0,0 +1,132 @@ +--- +title: "使用 Jenkinsfile 在多集群项目中部署应用" +keywords: 'Kubernetes, KubeSphere, Docker, DevOps, Jenkins, 多集群' +description: '学习如何使用基于 Jenkinsfile 的流水线在多集群项目中部署应用。' +linkTitle: "使用 Jenkinsfile 在多集群项目中部署应用" +weight: 11420 +--- + +## 准备工作 + +- 您需要[启用多集群功能](../../../multicluster-management/)并创建一个多集群企业空间。 +- 您需要有一个 [Docker Hub](https://hub.docker.com/) 帐户。 +- 您需要在主集群上[启用 KubeSphere DevOps 系统](../../../pluggable-components/devops/)。 +- 您需要使用具有 `workspace-self-provisioner` 角色的用户(例如 `project-admin`)创建一个多集群项目,并在主集群上创建一个 DevOps 项目。本教程中的多集群项目创建于主集群和一个成员集群上。 +- 您需要邀请一个用户(例如 `project-regular`)至 DevOps 项目中,赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)、[多集群管理](../../../multicluster-management/)和[多集群项目](../../../project-administration/project-and-multicluster-project/#多集群项目)。 + +## 创建 Docker Hub 访问令牌 (Token) + +1. 登录 [Docker Hub](https://hub.docker.com/),点击右上角的帐户,并从菜单中选择 **Account Settings**。 + +2. 在左侧导航栏点击 **Security**,然后点击 **New Access Token**。 + +3. 在弹出的对话框中,输入令牌名称(`go-project-token`),点击 **Create**。 + +4. 点击 **Copy and Close** 并务必保存该访问令牌。 + +## 创建凭证 + +您需要在 KubeSphere 中为已创建的访问令牌创建凭证,以便流水线能够向 Docker Hub 推送镜像。此外,您还需要创建 kubeconfig 凭证,用于访问 Kubernetes 集群。 + +1. 以 `project-regular` 身份登录 KubeSphere Web 控制台。在您的 DevOps 项目中点击 **DevOps 项目设置**下的**凭证**,然后在**凭证**页面点击**创建**。 + +2. 在弹出的对话框中,设置**名称**,稍后会用于 Jenkinsfile 中,**类型**选择**用户名和密码**。**用户名**输入您的 Docker Hub 帐户名称,**密码/令牌**中输入刚刚创建的访问令牌。操作完成后,点击**确定**。 + + {{< notice tip >}} + + 有关如何创建凭证的更多信息,请参见[凭证管理](../../../devops-user-guide/how-to-use/devops-settings/credential-management/)。 + + {{}} + +3. 登出 KubeSphere Web 控制台,再以 `project-admin` 身份登录。前往您的 DevOps 项目,在**凭证**页面点击**创建**。**类型**选择 **kubeconfig**,KubeSphere 会自动填充**内容**字段,即当前帐户的 kubeconfig。设置**名称**,然后点击**确定**。 + + {{< notice note >}} + + 在未来版本中,您可以邀请 `project-regular` 帐户至您的多集群项目中,并赋予必要角色,以使用此帐户创建 kubeconfig 凭证。 + + {{}} + +## 创建流水线 + +创建完上述凭证后,您可以使用 `project-regular` 帐户按照以下步骤使用示例 Jenkinsfile 创建流水线。 + +1. 要创建流水线,请在**流水线**页面点击**创建**。 + +2. 在弹出窗口中设置名称,然后点击**下一步**。 + +3. 在本教程中,您可以为所有字段使用默认值。在**高级设置**页面,点击**创建**。 + +## 编辑 Jenkinsfile + +1. 在流水线列表中,点击该流水线进入其详情页面。点击**编辑 Jenkinsfile** 定义一个 Jenkinsfile,流水线会基于它来运行。 + +2. 将以下所有内容复制并粘贴到弹出窗口中,用作流水线的示例 Jenkinsfile。您必须将 `DOCKERHUB_USERNAME`、`DOCKERHUB_CREDENTIAL`、`KUBECONFIG_CREDENTIAL_ID`、`MULTI_CLUSTER_PROJECT_NAME` 和 `MEMBER_CLUSTER_NAME` 的值替换成您自己的值。操作完成后,点击**确定**。 + + ```groovy + pipeline { + agent { + label 'go' + } + + environment { + REGISTRY = 'docker.io' + // Docker Hub 用户名 + DOCKERHUB_USERNAME = 'Your Docker Hub username' + APP_NAME = 'devops-go-sample' + // ‘dockerhub’ 即您在 KubeSphere 控制台上创建的 Docker Hub 凭证 ID + DOCKERHUB_CREDENTIAL = credentials('dockerhub') + // 您在 KubeSphere 控制台上创建的 kubeconfig 凭证 ID + KUBECONFIG_CREDENTIAL_ID = 'kubeconfig' + // 您企业空间中的多集群项目名称 + MULTI_CLUSTER_PROJECT_NAME = 'demo-multi-cluster' + // 您用来部署应用的成员集群名称 + // 本教程中,应用部署在主集群和一个成员集群上 + // 若需要部署在多个成员集群上, 请编辑 manifest/multi-cluster-deploy.yaml + MEMBER_CLUSTER_NAME = 'Your Member Cluster name' + } + + stages { + stage('docker login') { + steps { + container('go') { + sh 'echo $DOCKERHUB_CREDENTIAL_PSW | docker login -u $DOCKERHUB_CREDENTIAL_USR --password-stdin' + } + } + } + + stage('build & push') { + steps { + container('go') { + sh 'git clone https://github.com/yuswift/devops-go-sample.git' + sh 'cd devops-go-sample && docker build -t $REGISTRY/$DOCKERHUB_USERNAME/$APP_NAME .' + sh 'docker push $REGISTRY/$DOCKERHUB_USERNAME/$APP_NAME' + } + } + } + + stage('deploy app to multi cluster') { + steps { + container('go') { + withCredentials([ + kubeconfigFile( + credentialsId: env.KUBECONFIG_CREDENTIAL_ID, + variable: 'KUBECONFIG') + ]) { + sh 'envsubst < devops-go-sample/manifest/multi-cluster-deploy.yaml | kubectl apply -f -' + } + } + } + } + } + } + ``` + + {{< notice note >}} + + 如果您的流水线成功运行,将会推送镜像至 Docker Hub。如果您使用 Harbor,则无法通过 Jenkins 凭证使用环境变量将参数传送到 `docker login -u`。这是因为每个 Harbor Robot 帐户的用户名都包含一个 `$` 字符,当用于环境变量时,Jenkins 会将其转换为 `$$`。[了解更多信息](https://number1.co.za/rancher-cannot-use-harbor-robot-account-imagepullbackoff-pull-access-denied/)。 + + {{}} + +## 运行流水线 + +保存 Jenkinsfile 后,点击**运行**。如果一切顺利,您会在您的多集群项目中看到部署 (Deployment) 工作负载。 diff --git a/content/zh/docs/v3.4/devops-user-guide/examples/use-nexus-in-pipelines.md b/content/zh/docs/v3.4/devops-user-guide/examples/use-nexus-in-pipelines.md new file mode 100644 index 000000000..c8003efb8 --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/examples/use-nexus-in-pipelines.md @@ -0,0 +1,174 @@ +--- +title: "在流水线中使用 Nexus" +keywords: 'KubeSphere, Kubernetes, 流水线, Nexus, Jenkins' +description: '学习如何在 KubeSphere 流水线中使用 Nexus。' +linkTitle: "在流水线中使用 Nexus" +weight: 11450 + + +--- + +[Nexus](https://www.sonatype.com/products/repository-oss) 是存储、组织和分发制品的存储管理器。使用 Nexus 的开发者可以更好的控制开发过程中所需的工件。 + +本教程演示如何在 KubeSphere 流水线中使用 Nexus。 + +## 准备工作 + +- 准备一个[启用 KuberSphere DevOps 系统](../../../pluggable-components/devops/)。 + +- 准备一个 [Nexus 实例](https://help.sonatype.com/repomanager3/installation)。 + +- 准备一个[GitHub](https://github.com/) 帐户。 + +- 创建一个企业空间、一个 DevOps 项目(例如,`demo-devops`)和一个用户(例如,`project-regular`)。`project-regular` 需要被邀请至 DevOps 项目中并赋予 `operator` 角色。有关更多信息,请参考[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## 动手实验 + +### 步骤 1:获得 Nexus 上的仓库 URL + +1. 用 `admin` 帐户登录 Nexus 控制台,然后在顶部导航栏点击 。 + +2. 转到**仓库**页面,您可以看到 Nexus 提供了三种仓库类型。 + + - `proxy`:远程仓库代理,用于下载资源并将其作为缓存存储在 Nexus 上。 + + - `hosted`:在 Nexus 上存储制品的仓库。 + + - `group`:一组已配置好的 Nexus 仓库。 + +3. 点击仓库查看它的详细信息。例如:点击 **maven-public** 进去详情页面,并且查看它的 **URL**。 + +### 步骤 2:在 GitHub 仓库修改 `pom.xml` + +1. 登录 GitHub,Fork [示例仓库](https://github.com/devops-ws/learn-pipeline-java)到您的 GitHub 帐户。 + +2. 在您的 **learn-pipline-java** GitHub 仓库中,点击根目录下的文件 `pom.xml`。 + +3. 在文件中点击 icon 以修改 `` 代码片段。设置 `` 并使用您的 Nexus 仓库的 URL。 + + ![modify-pom](/images/docs/v3.3/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/modify-pom.png) + +4. 当您完成以上步骤,点击页面下方的 **Commit changes**。 + +### 步骤 3:修改 ConfigMap + +1. 使用 `admin` 帐户登录 KubeSphere Web 控制台,点击左上角的**平台管理**,选择**集群管理**。 + +2. 在**配置**下面选择 **配置**。在 **配置** 页面上的下拉列表中选择 `kubesphere-devops-worker` ,然后点击 `ks-devops-agent`。 + +3. 在详情页面,点击下拉菜单**更多操作**中的**编辑 YAML**。 + +4. 在弹出的对话框中,向下滚动,找到 `` 代码片段,输入下列代码: + + ```yaml + + + nexus + admin + admin + + + ``` + + ![enter-server-code](/images/docs/v3.3/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/enter-server-code.png) + + {{< notice note >}} + + `` 是您在步骤 2 设置给 Nexus 的唯一标识符。 `` 是您的 Nexus 用户名。 `` 是您的 Nexus 的密码。您也可以在 Nexus 上面配置 `NuGet API Key`,以获得更高的安全性。 + + {{}} + +5. 继续找到 `` 代码片段,然后输入一下代码: + + ```yaml + + + nexus + maven-public + http://135.68.37.85:8081/repository/maven-public/ + * + + + ``` + + ![enter-mirror-code](/images/docs/v3.3/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/enter-mirror-code.png) + + {{< notice note >}} + + `` 是您在步骤 2 设置给 Nexus 唯一标识符。 `` 是 Nexus 仓库的名称。 `` 是您 Nexus 仓库的 URL。 `` 是要镜像的 Maven 仓库。在本教程,输入 `*` 镜像所有 Maven 仓库。有关更多信息请参考[为仓库使用镜像](http://maven.apache.org/guides/mini/guide-mirror-settings.html)。 + + {{}} + +6. 当您完成,点击**确定**。 + +### 步骤 4:创建流水线 + +1. 登出 KubeSphere Web 控制台,使用帐户 `project-regular` 登录。转到 DevOps 项目,然后在**流水线**页面点击**创建**。 + +2. 在**基础信息**选项卡中,为流水线设置名称(例如,`nexus-pipeline`),然后点击**下一步**。 + +3. 在**高级设置**选项卡中,点击**创建**以使用默认配置。 + +4. 点击流水线名称进入它的详情页面,然后点击**编辑 Jenkinsfile**。 + +5. 在弹出的对话框中,输入以下 Jenkinsfile。完成后,点击**确定**。 + + ```groovy + pipeline { + agent { + label 'maven' + } + stages { + stage ('clone') { + steps { + git 'https://github.com/YANGMAO-ZHANG/learn-pipeline-java.git' + } + } + + stage ('build') { + steps { + container ('maven') { + sh 'mvn clean package' + } + } + } + + stage ('deploy to Nexus') { + steps { + container ('maven') { + sh 'mvn deploy -DaltDeploymentRepository=nexus::default::http://135.68.37.85:8081/repository/maven-snapshots/' + } + } + } + stage ('upload') { + steps { + archiveArtifacts artifacts: 'target/*.jar', followSymlinks: false + } + } + } + } + ``` + {{< notice note >}} + + 您需要用您自己的 GitHub 仓库地址替换原有的仓库地址。在 `deploy to Nexus` 阶段的步骤中的命令中,`nexus` 是您在 ConfigMap 上设置在 `` 上的名称,同时 `http://135.68.37.85:8081/repository/maven-snapshots/` 是您 Nexus 仓库的 URL。 + + {{}} + +### 步骤 5:运行流水线查看结果 + +1. 您可以在图形编辑面板中看到所有的阶段和步骤,点击**运行**去运行流水线。 + +2. 一段时间过后,你可以看到流水线的状态显示**成功**。点击**成功**的记录查看细节。 + +3. 您可以点击**查看日志**查看更详细的日志。 + +4. 登录 Nexus 点击**浏览**。点击 **maven-public**,可以看到已经下载所有依赖。 + + ![maven-public](/images/docs/v3.3/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/maven-public.png) + +5. 回到 **Browse** 页面,点击 **maven-sanpshots**。可以看到所有 JAR 包已经上传至仓库。 + + ![maven-snapshots](/images/docs/v3.3/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/maven-snapshots.png) + + + diff --git a/content/zh/docs/v3.4/devops-user-guide/how-to-integrate/_index.md b/content/zh/docs/v3.4/devops-user-guide/how-to-integrate/_index.md new file mode 100644 index 000000000..88f1f7031 --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/how-to-integrate/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "工具集成" +weight: 11300 + +_build: + render: false +--- diff --git a/content/zh/docs/v3.4/devops-user-guide/how-to-integrate/harbor.md b/content/zh/docs/v3.4/devops-user-guide/how-to-integrate/harbor.md new file mode 100644 index 000000000..1c8f96ffe --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/how-to-integrate/harbor.md @@ -0,0 +1,143 @@ +--- +title: "将 Harbor 集成到流水线" +keywords: 'Kubernetes, Docker, DevOps, Jenkins, Harbor' +description: '将 Harbor 集成到流水线中并向您的 Harbor 仓库推送镜像。' +linkTitle: "将 Harbor 集成到流水线" +weight: 11320 +--- + +本教程演示如何将 Harbor 集成到 KubeSphere 流水线。 + +## 准备工作 + +- 您需要启用 [KubeSphere DevOps 系统](../../../pluggable-components/devops/)。 +- 您需要创建一个企业空间、一个 DevOps 项目和一个用户 (`project-regular`)。需要邀请该用户至 DevOps 项目并赋予 `operator` 角色。如果尚未创建,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## 安装 Harbor + +强烈建议您通过 [KubeSphere 应用商店](../../../application-store/built-in-apps/harbor-app/)安装 Harbor。或者,您可以使用 Helm3 手动安装 Harbor。 + +```bash +helm repo add harbor https://helm.goharbor.io +# 如需快速安装,您可以通过 NodePort 暴露 Harbor 并禁用 tls。 +# 请将 externalURL 设置为您的一个节点 IP,并确保 Jenkins 能够访问它。 +helm install harbor-release harbor/harbor --set expose.type=nodePort,externalURL=http://$ip:30002,expose.tls.enabled=false +``` + +## 获取 Harbor 凭证 + +1. 安装 Harbor 后,请访问 `:30002` 并使用默认帐户和密码 (`admin/Harbor12345`) 登录控制台。在左侧导航栏中点击**项目**并在**项目**页面点击**新建项目**。 + +2. 在弹出的对话框中,设置项目名称 (`ks-devops-harbor`) 并点击**确定**。 + +3. 点击刚刚创建的项目,在**机器人帐户**选项卡下点击**添加机器人帐户**。 + +4. 在弹出的对话框中,为机器人帐户设置名称 (`robot-test`) 并点击**添加**。请确保在**权限**中勾选推送制品的权限选框。 + +5. 在弹出的对话框中,点击**导出到文件中**,保存该令牌。 + +## 启用 Insecure Registry + +您需要配置 Docker,使其忽略您 Harbor 仓库的安全性。 + +1. 在您的主机上运行 `vim /etc/docker/daemon.json` 命令以编辑 `daemon.json` 文件,输入以下内容并保存更改。 + + ```json + { + "insecure-registries" : ["103.61.38.55:30002"] + } + ``` + + {{< notice note >}} + + 请确保将 `103.61.38.55:30002` 替换为您自己的 Harbor 仓库地址。对于 Linux,`daemon.json` 文件的路径为 `/etc/docker/daemon.json`;对于 Windows,该文件的路径为 `C:\ProgramData\docker\config\daemon.json`。 + + {{}} + +2. 运行以下命令重启 Docker,使更改生效。 + + ```bash + sudo systemctl daemon-reload + sudo systemctl restart docker + ``` + + {{< notice note >}} + + 建议您在隔离的测试环境或者严格控制的离线环境中使用该方案。有关更多信息,请参考 [Deploy a plain HTTP registry](https://docs.docker.com/registry/insecure/#deploy-a-plain-http-registry)。完成上述操作后,即可在项目中部署工作负载时使用您 Harbor 仓库中的镜像。您需要为自己的 Harbor 仓库创建一个镜像密钥,然后在**容器镜像**选项卡下的**容器设置**中,选择您的 Harbor 仓库并输入镜像的绝对路径以搜索您的镜像。 + + {{}} + +## 创建凭证 + +1. 以 `project-regular` 身份登录 KubeSphere 控制台,转到您的 DevOps 项目,在 **DevOps 项目设置**下的**凭证**页面为 Harbor 创建凭证。 + +2. 在**创建凭证**页面,设置凭证 ID (`robot-test`),**类型**选择**用户名和密码**。**用户名**字段必须和您刚刚下载的 JSON 文件中 `name` 的值相同,并在**密码/令牌**中输入该文件中 `token` 的值。 + +3. 点击**确定**以保存。 + +## 创建流水线 + +1. 转到**流水线**页面,点击**创建**。在**基本信息**选项卡,输入名称 (`demo-pipeline`),然后点击**下一步**。 + +2. **高级设置**中使用默认值,点击**创建**。 + +## 编辑 Jenkinsfile + +1. 点击该流水线进入其详情页面,然后点击**编辑 Jenkinsfile**。 + +2. 将以下内容复制粘贴至 Jenkinsfile。请注意,您必须将 `REGISTRY`、`HARBOR_NAMESPACE`、`APP_NAME` 和 `HARBOR_CREDENTIAL` 替换为您自己的值。 + + ```groovy + pipeline { + agent { + node { + label 'maven' + } + } + + environment { + // 您 Harbor 仓库的地址。 + REGISTRY = '103.61.38.55:30002' + // 项目名称。 + // 请确保您的机器人帐户具有足够的项目访问权限。 + HARBOR_NAMESPACE = 'ks-devops-harbor' + // Docker 镜像名称。 + APP_NAME = 'docker-example' + // ‘robot-test’是您在 KubeSphere 控制台上创建的凭证 ID。 + HARBOR_CREDENTIAL = credentials('robot-test') + } + + stages { + stage('docker login') { + steps{ + container ('maven') { + // 请替换 -u 后面的 Docker Hub 用户名,不要忘记加上 ''。您也可以使用 Docker Hub 令牌。 + sh '''echo $HARBOR_CREDENTIAL_PSW | docker login $REGISTRY -u 'robot$robot-test' --password-stdin''' + } + } + } + + stage('build & push') { + steps { + container ('maven') { + sh 'git clone https://github.com/kstaken/dockerfile-examples.git' + sh 'cd dockerfile-examples/rethinkdb && docker build -t $REGISTRY/$HARBOR_NAMESPACE/$APP_NAME:devops-test .' + sh 'docker push $REGISTRY/$HARBOR_NAMESPACE/$APP_NAME:devops-test' + } + } + } + } + } + + + ``` + + {{< notice note >}} + + 您可以通过带有环境变量的 Jenkins 凭证来传送参数至 `docker login -u`。但是,每个 Harbor 机器人帐户的用户名都包含一个 `$` 字符,当用于环境变量时,Jenkins 会将其转换为 `$$`。(Harbor v2.2以后可以自定义机器人后缀,避免此类问题)[了解更多](https://number1.co.za/rancher-cannot-use-harbor-robot-account-imagepullbackoff-pull-access-denied/)。 + + {{}} + +## 运行流水线 + +保存该 Jenkinsfile,KubeSphere 会自动在图形编辑面板上创建所有阶段和步骤。点击**运行**来运行该流水线。如果一切运行正常,Jenkins 将推送镜像至您的 Harbor 仓库。 diff --git a/content/zh/docs/v3.4/devops-user-guide/how-to-integrate/sonarqube.md b/content/zh/docs/v3.4/devops-user-guide/how-to-integrate/sonarqube.md new file mode 100644 index 000000000..a70e9e2c2 --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/how-to-integrate/sonarqube.md @@ -0,0 +1,273 @@ +--- +title: "将 SonarQube 集成到流水线" +keywords: 'Kubernetes, KubeSphere, devops, jenkins, sonarqube, 流水线' +description: '将 SonarQube 集成到流水线中进行代码质量分析。' +linkTitle: "将 SonarQube 集成到流水线" +weight: 11310 +--- + +[SonarQube](https://www.sonarqube.org/) 是一种主流的代码质量持续检测工具。您可以将其用于代码库的静态和动态分析。SonarQube 集成到 KubeSphere 流水线后,如果在运行的流水线中检测到问题,您可以直接在仪表板上查看常见代码问题,例如 Bug 和漏洞。 + +本教程演示如何将 SonarQube 集成到流水线中。在[使用 Jenkinsfile 创建流水线](../../../devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-jenkinsfile/)之前,请先参考以下步骤。 + +## 准备工作 + +您需要[启用 KubeSphere DevOps 系统](../../../pluggable-components/devops/)。 + +## 安装 SonarQube 服务器 + +要将 SonarQube 集成到您的流水线,必须先安装 SonarQube 服务器。 + +1. 请先安装 Helm,以便后续使用该工具安装 SonarQube。例如,运行以下命令安装 Helm 3: + + ```bash + curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash + ``` + + 查看 Helm 版本。 + + ```bash + helm version + + version.BuildInfo{Version:"v3.4.1", GitCommit:"c4e74854886b2efe3321e185578e6db9be0a6e29", GitTreeState:"clean", GoVersion:"go1.14.11"} + ``` + + {{< notice note >}} + + 有关更多信息,请参见 [Helm 文档](https://helm.sh/zh/docs/intro/install/)。 + + {{}} + +2. 执行以下命令安装 SonarQube 服务器。 + + ```bash + helm upgrade --install sonarqube sonarqube --repo https://charts.kubesphere.io/main -n kubesphere-devops-system --create-namespace --set service.type=NodePort + ``` + + {{< notice note >}} + + 请您确保使用 Helm 3 安装 SonarQube Server。 + + {{}} + +3. 您会获取以下提示内容: + + ![安装 SonarQube](/images/docs/v3.3/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/sonarqube-install.png) + +## 获取 SonarQube 控制台地址 + +1. 执行以下命令以获取 SonarQube NodePort。 + + ```bash + export NODE_PORT=$(kubectl get --namespace kubesphere-devops-system -o jsonpath="{.spec.ports[0].nodePort}" services sonarqube-sonarqube) + export NODE_IP=$(kubectl get nodes --namespace kubesphere-devops-system -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT + ``` + +2. 您可以获得如下输出(本示例中端口号为 `31377`,可能与您的端口号不同): + + ```bash + http://10.77.1.201:31377 + ``` + +## 配置 SonarQube 服务器 + +### 步骤 1:访问 SonarQube 控制台 + +1. 执行以下命令查看 SonarQube 的状态。请注意,只有在 SonarQube 启动并运行后才能访问 SonarQube 控制台。 + + ```bash + $ kubectl get pod -n kubesphere-devops-system + NAME READY STATUS RESTARTS AGE + devops-jenkins-68b8949bb-7zwg4 1/1 Running 0 84m + s2ioperator-0 1/1 Running 1 84m + sonarqube-postgresql-0 1/1 Running 0 5m31s + sonarqube-sonarqube-bb595d88b-97594 1/1 Running 2 5m31s + ``` + +2. 在浏览器中访问 SonarQube 控制台 `http://:`。 + +3. 点击右上角的 **Log in**,然后使用默认帐户 `admin/admin` 登录。 + + {{< notice note >}} + + 取决于您的实例的部署位置,您可能需要设置必要的端口转发规则,并在您的安全组中放行该端口,以便访问 SonarQube。 + + {{}} + +### 步骤 2:创建 SonarQube 管理员令牌 (Token) + +1. 点击右上角字母 **A**,然后从菜单中选择 **My Account** 以转到 **Profile** 页面。 + + ![SonarQube 配置-1](/images/docs/v3.3/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/sonarqube-config-1.png) + +2. 点击 **Security** 并输入令牌名称,例如 `kubesphere`。 + + ![SonarQube 配置-2](/images/docs/v3.3/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/sonarqube-config-2.png) + +3. 点击 **Generate** 并复制此令牌。 + + ![SonarQube 配置-3](/images/docs/v3.3/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/sonarqube-config-3.png) + + {{< notice warning >}} + + 如提示所示,您无法再次查看此令牌,因此请确保复制成功。 + + {{}} + +### 步骤 3:创建 Webhook 服务器 + +1. 执行以下命令获取 SonarQube Webhook 的地址。 + + ```bash + export NODE_PORT=$(kubectl get --namespace kubesphere-devops-system -o jsonpath="{.spec.ports[0].nodePort}" services devops-jenkins) + export NODE_IP=$(kubectl get nodes --namespace kubesphere-devops-system -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT/sonarqube-webhook/ + ``` + +2. 预期输出结果: + + ```bash + http://10.77.1.201:30180/sonarqube-webhook/ + ``` + +3. 依次点击 **Administration**、**Configuration** 和 **Webhooks** 创建一个 Webhook。 + + ![SonarQube Webhook-1](/images/docs/v3.3/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/sonarqube-webhook-1.png) + +4. 点击 **Create**。 + + ![SonarQube Webhook-2](/images/docs/v3.3/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/sonarqube-webhook-2.png) + +5. 在弹出的对话框中输入 **Name** 和 **Jenkins Console URL**(即 SonarQube Webhook 地址)。点击 **Create** 完成操作。 + + ![Webhook 页面信息](/images/docs/v3.3/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/webhook-page-info.png) + +### 步骤 4:将 SonarQube 配置添加到 ks-installer + +1. 执行以下命令编辑 `ks-installer`。 + + ```bash + kubectl edit cc -n kubesphere-system ks-installer + ``` + +2. 搜寻至 `devops`。添加字段 `sonarqube` 并在其下方指定 `externalSonarUrl` 和 `externalSonarToken`。 + + ```yaml + devops: + enabled: true + jenkinsJavaOpts_MaxRAM: 2g + jenkinsJavaOpts_Xms: 512m + jenkinsJavaOpts_Xmx: 512m + jenkinsMemoryLim: 2Gi + jenkinsMemoryReq: 1500Mi + jenkinsVolumeSize: 8Gi + sonarqube: # Add this field manually. + externalSonarUrl: http://10.77.1.201:31377 # The SonarQube IP address. + externalSonarToken: 00ee4c512fc987d3ec3251fdd7493193cdd3b91d # The SonarQube admin token created above. + ``` + +3. 完成操作后保存此文件。 + +### 步骤 5:将 SonarQube 服务器添加至 Jenkins + +1. 执行以下命令获取 Jenkins 的地址。 + + ```bash + export NODE_PORT=$(kubectl get --namespace kubesphere-devops-system -o jsonpath="{.spec.ports[0].nodePort}" services devops-jenkins) + export NODE_IP=$(kubectl get nodes --namespace kubesphere-devops-system -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT + ``` + +2. 您可以获得以下输出,获取 Jenkins 的端口号。 + + ```bash + http://10.77.1.201:30180 + ``` + +3. 请使用地址 `http://:30180` 访问 Jenkins。安装 KubeSphere 时,默认情况下也会安装 Jenkins 仪表板。此外,Jenkins 还配置有 KubeSphere LDAP,这意味着您可以直接使用 KubeSphere 帐户(例如 `admin/P@88w0rd`)登录 Jenkins。有关配置 Jenkins 的更多信息,请参见 [Jenkins 系统设置](../../../devops-user-guide/how-to-use/pipelines/jenkins-setting/)。 + + {{< notice note >}} + + 取决于您的实例的部署位置,您可能需要设置必要的端口转发规则,并在您的安全组中放行端口 `30180`,以便访问 Jenkins。 + + {{}} + +4. 点击左侧导航栏中的**系统管理**。 + +5. 向下翻页找到并点击**系统配置**。 + +6. 搜寻到 **SonarQube servers**,然后点击 **Add SonarQube**。 + +7. 输入 **Name** 和 **Server URL** (`http://:`)。点击**添加**,选择 **Jenkins**,然后在弹出的对话框中用 SonarQube 管理员令牌创建凭证(如下方第二张截图所示)。创建凭证后,从 **Server authentication token** 旁边的下拉列表中选择该凭证。点击**应用**完成操作。 + + ![sonarqube-jenkins-settings](/images/docs/v3.3/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/sonarqube-jenkins-settings.png) + + ![add-credentials](/images/docs/v3.3/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/add-credentials.png) + + {{< notice note >}} + + 如果点击**添加**按钮无效(Jenkins 已知问题),您可以前往**系统管理**下的 **Manage Credentials** 并点击 **Stores scoped to Jenkins** 下的 **Jenkins**,再点击**全局凭据 (unrestricted)**,然后点击左侧导航栏的**添加凭据**,参考上方第二张截图用 SonarQube 管理员令牌添加凭证。添加凭证后,从 **Server authentication token** 旁边的下拉列表中选择该凭证。 + + {{}} + +### 步骤 6:将 sonarqubeURL 添加到 KubeSphere 控制台 + +您需要指定 `sonarqubeURL`,以便可以直接从 KubeSphere 控制台访问 SonarQube。 + +1. 执行以下命令: + + ```bash + kubectl edit cm -n kubesphere-system ks-console-config + ``` + +2. 搜寻到 `data.client.enableKubeConfig`,在下方添加 `devops` 字段并指定 `sonarqubeURL`。 + + ```bash + client: + enableKubeConfig: true + devops: # 手动添加该字段。 + sonarqubeURL: http://10.77.1.201:31377 # SonarQube IP 地址。 + ``` + +3. 保存该文件。 + +### 步骤 7:重启服务 + +执行以下命令。 + +```bash +kubectl -n kubesphere-devops-system rollout restart deploy devops-apiserver +``` + +```bash +kubectl -n kubesphere-system rollout restart deploy ks-console +``` + +## 为新项目创建 SonarQube Token + +您需要一个 SonarQube 令牌,以便您的流水线可以在运行时与 SonarQube 通信。 + +1. 在 SonarQube 控制台上,点击 **Create new project**。 + + ![SonarQube 创建项目](/images/docs/v3.3/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/sonarqube-create-project.png) + +2. 输入项目密钥,例如 `java-demo`,然后点击 **Set Up**。 + + ![Jenkins 项目密钥](/images/docs/v3.3/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/jenkins-projet-key.png) + +3. 输入项目名称,例如 `java-sample`,然后点击 **Generate**。 + + ![创建令牌](/images/docs/v3.3/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/generate-a-token.png) + +4. 创建令牌后,点击 **Continue**。 + + ![令牌已创建](/images/docs/v3.3/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/token-created.png) + +5. 分别选择 **Java** 和 **Maven**。复制下图所示绿色框中的序列号,如果要在流水线中使用,则需要在[凭证](../../../devops-user-guide/how-to-use/devops-settings/credential-management/#创建凭证)中添加此序列号。 + + ![sonarqube-example](/images/docs/v3.3/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/sonarqube-example.png) + +## 在 KubeSphere 控制台查看结果 + +您[使用 Jenkinsfile 创建流水线](../../../devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-jenkinsfile/)或[使用图形编辑面板创建流水线](../../../devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-graphical-editing-panel/)之后,可以查看代码质量分析的结果。 diff --git a/content/zh/docs/v3.4/devops-user-guide/how-to-use/_index.md b/content/zh/docs/v3.4/devops-user-guide/how-to-use/_index.md new file mode 100644 index 000000000..a0f69c8f5 --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/how-to-use/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "使用 DevOps" +weight: 11200 + +_build: + render: false +--- diff --git a/content/zh/docs/v3.4/devops-user-guide/how-to-use/code-repositories/_index.md b/content/zh/docs/v3.4/devops-user-guide/how-to-use/code-repositories/_index.md new file mode 100644 index 000000000..a9d9540f0 --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/how-to-use/code-repositories/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "代码仓库" +weight: 11230 + +_build: + render: false +--- \ No newline at end of file diff --git a/content/zh/docs/v3.4/devops-user-guide/how-to-use/code-repositories/import-code-repositories.md b/content/zh/docs/v3.4/devops-user-guide/how-to-use/code-repositories/import-code-repositories.md new file mode 100755 index 000000000..a30e1f655 --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/how-to-use/code-repositories/import-code-repositories.md @@ -0,0 +1,98 @@ +--- +title: "导入代码仓库" +keywords: 'Kubernetes, GitOps, KubeSphere, 代码仓库' +description: '介绍如何在 KubeSphere 中导入代码仓库。' +linkTitle: "导入代码仓库" +weight: 11231 + +--- + +KubeSphere 3.3 支持您导入 GitHub、GitLab、Bitbucket 或其它基于 Git 的代码仓库,如 Gitee。下面以 Github 仓库为例,展示如何导入代码仓库。 + +## 准备工作 + +- 您需要有一个企业空间、一个 DevOps 项目和一个用户 (`project-regular`),并已邀请此帐户至 DevOps 项目中且授予 `operator` 角色。如果尚未准备好,请参考[创建企业空间、项目、用户和角色](../../../../quick-start/create-workspace-and-project/)。 + +- 您需要启用 [KubeSphere DevOps 系统](../../../../devops-user-guide/devops-overview/devops-project-management/)。 + +## 操作步骤 + +1. 以 `project-regular` 用户登录 KubeSphere 控制台,在左侧导航树,点击 **DevOps 项目**。 + +2. 在右侧的 **DevOps 项目**页面,点击您创建的 DevOps 项目。 + +3. 在左侧的导航树,点击**代码仓库**。 + +4. 在右侧的**代码仓库**页面,点击**导入**。 + +5. 在**导入代码仓库**对话框,输入代码仓库名称,并选择代码仓库,此处以 GitHub 为例。您也可以为代码仓库设置别名和添加描述信息。 + + 下表列举了支持导入的代码仓库和参数设置项。 + + + + + + + + + + + + + + + + + + + + + + +
代码仓库参数
GitHub凭证:选择访问代码仓库的凭证。
GitLab +
    +
  • GitLab 服务器地址:选择 GitLab 服务器地址,默认值为 https:gitlab.com
  • +
  • 项目组/所有者:输入 GitLab 账号。
  • +
  • 凭证:选择访问代码仓库的凭证。 +
  • 代码仓库:选择代码仓库。
  • +
+
Bitbucket +
    +
  • Bitbucket 服务器地址:设置 Bitbucket 服务器地址。
  • +
  • 凭证:选择访问代码仓库的凭证。
  • +
+
Git +
    +
  • 代码仓库地址:输入代码仓库地址,如 https://gitee.com。
  • +
  • 凭证:选择访问代码仓库的凭证。
  • +
+
+ + {{< notice note >}} + + 如需使用 GitLab 私有仓库,请参阅[使用 GitLab 创建多分支流水线-步骤4](../../../../devops-user-guide/how-to-use/pipelines/gitlab-multibranch-pipeline/)。 + + {{}} + +6. 在**凭证**区域,点击**创建凭证**。在弹出的**创建凭证**对话框,设置以下参数: + + - **名称**:输入凭证名称,如 `github-id`。 + - **类型**:取值包括**用户名和密码**、**SSH 密钥**、**访问令牌**和 **kubeconfig**。在 DevOps 项目中,建议使用**用户名和密码**。 + - **用户名**:此处默认用户名为 `admin`。 + - **密码/令牌**:输入您的 GitHub 令牌。 + - **描述**:添加描述信息。 + + {{< notice note >}} + + 更多关于如何添加凭证的信息,请参阅[凭证管理](../../../../devops-user-guide/how-to-use/devops-settings/credential-management/)。 + + {{}} + +7. 在弹出的 GitHub 仓库中,选择代码仓库,点击**确定**。 + +8. 点击代码仓库右侧的 icon,您可以执行以下操作: + + - 编辑:修改代码仓库别名和描述信息以及重新选择代码仓库。 + - 编辑 YAML:编辑代码仓库 YAML 文件。 + - 删除:删除代码仓库。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/devops-user-guide/how-to-use/continuous-deployments/_index.md b/content/zh/docs/v3.4/devops-user-guide/how-to-use/continuous-deployments/_index.md new file mode 100644 index 000000000..a025fafa8 --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/how-to-use/continuous-deployments/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "持续部署" +weight: 11220 + +_build: + render: false +--- \ No newline at end of file diff --git a/content/zh/docs/v3.4/devops-user-guide/how-to-use/continuous-deployments/use-gitops-for-continous-deployment.md b/content/zh/docs/v3.4/devops-user-guide/how-to-use/continuous-deployments/use-gitops-for-continous-deployment.md new file mode 100755 index 000000000..80cc93016 --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/how-to-use/continuous-deployments/use-gitops-for-continous-deployment.md @@ -0,0 +1,404 @@ +--- +title: "使用 GitOps 实现应用持续部署" +keywords: 'Kubernetes, GitOps, KubeSphere, CI,CD, 持续集成,持续部署' +description: '介绍如何在 KubeSphere 中使用 GitOps 实现持续部署。' +linkTitle: "使用 GitOps 实现应用持续部署" +weight: 11221 +--- + +KubeSphere 3.3 引入了一种为云原生应用实现持续部署的理念 – GitOps。GitOps 的核心思想是拥有一个 Git 仓库,并将应用系统的申明式基础架构和应用程序存放在 Git 仓库中进行版本控制。GitOps 结合 Kubernetes 能够利用自动交付流水线将更改应用到指定的任意多个集群中,从而解决跨云部署的一致性问题。 + +本示例演示如何创建持续部署实现应用的部署。 + +## 准备工作 + +- 您需要有一个企业空间、一个 DevOps 项目和一个用户 (**project-regular**),并已邀请此帐户至 DevOps 项目中且授予 **operator** 角色。如果尚未准备好,请参考[创建企业空间、项目、用户和角色](../../../../quick-start/create-workspace-and-project/)。 + +- 您需要启用 [KubeSphere DevOps 系统](../../../../pluggable-components/devops/)。 + +## 导入代码仓库 + +1. 以 **project-regular** 用户登录 KubeSphere 控制台,在左侧导航树,点击 **DevOps 项目**。 + +2. 在右侧的 **DevOps 项目**页面,点击您创建的 DevOps 项目。 + +3. 在左侧的导航树,点击**代码仓库**。 + +4. 在右侧的**代码仓库**页面,点击**导入**。 + +5. 在**导入代码仓库**对话框,输入代码仓库名称,如 **open-podcasts**,并选择代码仓库。您也可以为代码仓库设置别名和添加描述信息。 + + +6. 在**选择代码仓库**对话框,点击 **Git**,在**代码仓库地址**区域,输入代码仓库地址,如 **https://github.com/kubesphere-sigs/open-podcasts**,点击**确定**。 + + {{< notice note >}} + + 此处导入的是公共仓库,因此不需要创建凭证。如果您添加的是私有仓库,则需要创建凭证。更多关于如何添加凭证的信息,请参阅[凭证管理](../../../../devops-user-guide/how-to-use/devops-settings/credential-management/)。 + + {{}} + +## 创建持续部署 + +1. 在左侧的导航树,点击**持续部署**。 + +2. 在右侧的**持续部署**页面,点击**创建**。 + +3. 在**基本信息**页签,输入持续部署名称,如 **open-podcasts**,并选择上一步创建的代码仓库,您也可以设置别名和添加描述信息,点击**下一步**。 + +4. 在**部署设置**页签,选择持续部署的部署集群和项目。 + +5. 在**代码仓库设置**区域,设置代码仓库的分支或标签以及 Kustomization 清单文件路径。 + + + + + + + + + + + + + + + + + + + + + + +
参数描述
+
+

修订版本

+
+
+
+

Git 仓库中的 commit ID、分支或标签。例如,master, v1.2.0, 0a1b2c3HEAD

+
+
+
+

清单文件路径

+
+
+
+

设置清单文件路径。例如,config/default

+
+
+ +6. 在**同步策略**区域,根据需要选择**自动同步**或**手动同步**。 + + - **自动同步**:在检测到 Git 仓库中的清单与部署资源的实时状态之间存在差异时,根据设置的同步选项,自动触发应用程序同步。具体参数如下表所示。 + + + + + + + + + + + + + + + + + + + + + + +
参数描述
+
+

清理资源

+
+
+
+

如果勾选,自动同步时会删除 Git 仓库中不存在的资源。不勾选时,自动同步触发时不会删除集群中的资源。

+
+
+
+

自纠正

+
+
+
+

如果勾选,当检测到 Git 仓库中定义的状态与部署资源中有偏差时,将强制应用 Git 仓库中的定义。不勾选时,对部署资源做更改时不会触发自动同步。

+
+
+ + - **手动同步**:根据设置的同步选项,手动触发应用程序同步。具体参数如下表所示。 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
参数描述
+
+

清理资源

+
+
+
+

如果勾选,同步会删除 Git 仓库中不存在的资源。不勾选时,同步不会删除集群中的资源,而是会显示 out-of-sync

+
+
+
+

模拟运行

+
+
+
+

模拟同步,不影响最终部署资源。

+
+
+
+

仅执行 Apply

+
+
+
+

如果勾选,同步应用资源时会跳过 pre/post 钩子,仅执行 kubectl apply

+
+
+
+

强制 Apply

+
+
+
+

如果勾选,同步时会执行 kubectl apply --force

+
+
+ + {{< notice note >}} + + 如需定义以上参数,持续部署创建完成后您需要在持续部署列表或详情页面手动点击**同步**,然后在弹出的**同步资源**对话框中设置各参数。 + + {{}} + + +7. 在**同步设置**区域,根据需要设置同步相关参数。 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
参数描述
+
+

跳过规范校验

+
+
+
+

跳过 kubectl 验证。执行 kubectl apply 时,增加 --validate=false 标识。

+
+
+
+

自动创建项目

+
+
+
+

在项目不存在的情况下自动为应用程序资源创建项目。

+
+
+
+

最后清理

+
+
+
+

同步操作时,其他资源都完成部署且处于健康状态后,再清理资源。

+
+
+
+

选择性同步

+
+
+
+

仅同步 out-of-sync 状态的资源。

+
+
+ +8. 在**依赖清理策略**区域,根据需要选择依赖清理策略。 + + + + + + + + + + + + + + + + + + + + + + + + + + +
参数描述
+
+

foreground

+
+
+
+

先删除依赖资源,再删除主资源。

+
+
+
+

background

+
+
+
+

先删除主资源,再删除依赖资源。

+
+
+
+

orphan

+
+
+
+

删除主资源,留下依赖资源成为孤儿。

+
+
+ +9. 在**替换资源**区域,选择是否需要替换已存在的资源。 + + {{< notice note >}} + + 如果勾选,将执行 **kubectl replace/create** 命令同步资源。不勾选时,使用 **kubectl apply** 命令同步资源。 + + {{}} + +10. 点击**创建**。资源创建完成后将显示在持续部署列表中。 + +## 查看已创建的持续部署信息 + +1. 在**持续部署**页面上查看到已创建的持续部署信息。具体参数如下表所示。 + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
参数描述信息
名称持续部署的名称。
健康状态持续部署的健康状态。主要包含以下几种状态:
+
    +
  • 健康:资源健康。
  • +
  • 已降级:资源已经被降级。
  • +
  • 进行中:资源正在同步。默认返回该状态。
  • +
  • 暂停:资源已经被暂停并等待恢复。
  • +
  • 未知:资源健康状态未知。
  • +
  • 丢失:资源已缺失。
同步状态持续部署的同步状态。主要包含以下几种状态:
+
    +
  • 已同步:资源同步已完成。
  • +
  • 未同步:资源的实际运行状态和期望状态不一致。
  • +
  • 未知:资源同步状态未知。
部署位置资源部署的集群和项目。
更新时间资源更新的时间。
+ +2. 点击持续部署右侧的 icon,您可以执行以下操作: + - **编辑信息**:编辑别名和描述信息。 + - **编辑 YAML**:编辑持续部署的 YAML 文件。 + - **同步**:触发资源同步。 + - **删除**:删除持续部署。 + + {{< notice warning >}} + + 删除持续部署的同时会删掉和该持续部署关联的资源。请谨慎操作。 + + {{}} + +3. 点击已创建的持续部署进入详情页面,可以查看同步状态和同步结果。 + +## 访问已创建的应用 + +1. 进入持续部署所在的项目,在左侧导航栏,点击**服务**。 + +2. 在右侧的**服务**区域,找到已部署的应用,并点击右侧 icon,选择**编辑外部访问**。 + +3. 在**访问模式**中选择 **NodePort**,点击**确定**。 + +4. 在服务列表页面的**外部访问**列,查看暴露的端口,通过 {Node IP}:{NodePort} 访问此应用。 + + {{< notice note >}} + 在访问服务之前,请确保安全组中的端口已打开。 + {{}} \ No newline at end of file diff --git a/content/zh/docs/v3.4/devops-user-guide/how-to-use/devops-settings/_index.md b/content/zh/docs/v3.4/devops-user-guide/how-to-use/devops-settings/_index.md new file mode 100644 index 000000000..b51076766 --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/how-to-use/devops-settings/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "DevOps 项目设置" +weight: 11240 + +_build: + render: false +--- \ No newline at end of file diff --git a/content/zh/docs/v3.4/devops-user-guide/how-to-use/devops-settings/add-cd-allowlist.md b/content/zh/docs/v3.4/devops-user-guide/how-to-use/devops-settings/add-cd-allowlist.md new file mode 100644 index 000000000..e768f8258 --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/how-to-use/devops-settings/add-cd-allowlist.md @@ -0,0 +1,28 @@ +--- +title: "添加持续部署白名单" +keywords: 'Kubernetes, GitOps, KubeSphere, 持续部署,白名单' +description: '介绍如何在 KubeSphere 中添加持续部署白名单。' +linkTitle: "添加持续部署白名单" +weight: 11243 +--- +在 KubeSphere 3.3 中,您可以通过设置白名单限制资源持续部署的目标位置。 + +## 准备工作 + +- 您需要有一个企业空间、一个 DevOps 项目和一个用户 (`project-regular`),并已邀请此帐户至 DevOps 项目中且授予 `operator` 角色。如果尚未准备好,请参考[创建企业空间、项目、用户和角色](../../../../quick-start/create-workspace-and-project/)。 + +- 您需要启用 [KubeSphere DevOps 系统](../../../../pluggable-components/devops/)。 + +- 您需要[导入代码仓库](../../../../devops-user-guide/how-to-use/code-repositories/import-code-repositories/)。 + +## 操作步骤 + +1. 以 `project-regular` 用户登录 KubeSphere 控制台,在左侧导航树,点击 **DevOps 项目**。 + +2. 在右侧的 **DevOps 项目**页面,点击您创建的 DevOps 项目。 + +3. 在左侧的导航树,选择 **DevOps 项目设置 > 基本信息**。 + +4. 在右侧**基本信息**下的**持续部署白名单**区域,点击**编辑白名单**。 + +5. 在弹出的**编辑白名单**对话框,选择代码仓库和部署集群和项目,点击**确定**。您可以继续点击**添加**以添加多个代码仓库和部署位置。 diff --git a/content/zh/docs/v3.4/devops-user-guide/how-to-use/devops-settings/credential-management.md b/content/zh/docs/v3.4/devops-user-guide/how-to-use/devops-settings/credential-management.md new file mode 100644 index 000000000..3214a8f89 --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/how-to-use/devops-settings/credential-management.md @@ -0,0 +1,93 @@ +--- +title: "凭证管理" +keywords: 'Kubernetes, Docker, 凭证, KubeSphere, DevOps' +description: '创建凭证以便您的流水线可以与第三方应用程序或网站进行交互。' +linkTitle: "凭证管理" +weight: 11241 +--- + +凭证是包含敏感信息的对象,例如用户名和密码、SSH 密钥和令牌 (Token)。当 KubeSphere DevOps 流水线运行时,会与外部环境中的对象进行交互,以执行一系列任务,包括拉取代码、推送和拉取镜像以及运行脚本等。此过程中需要提供相应的凭证,而这些凭证不会明文出现在流水线中。 + +具有必要权限的 DevOps 项目用户可以为 Jenkins 流水线配置凭证。用户在 DevOps 项目中添加或配置这些凭证后,便可以在 DevOps 项目中使用这些凭证与第三方应用程序进行交互。 + +目前,您可以在 DevOps 项目中创建以下 4 种类型的凭证: + +- **用户名和密码**:用户名和密码,可以作为单独的组件处理,或者作为用冒号分隔的字符串(格式为 `username:password`)处理,例如 GitHub 和 GitLab帐户。 +- **SSH 密钥**:带有私钥的用户名,SSH 公钥/私钥对。 +- **访问令牌**:具有访问权限的令牌。 +- **kubeconfig**:用于配置跨集群认证。 + +本教程演示如何在 DevOps 项目中创建和管理凭证。有关如何使用凭证的更多信息,请参见[使用 Jenkinsfile 创建流水线](../../../../devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-jenkinsfile/)和[使用图形编辑面板创建流水线](../../../../devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-graphical-editing-panel/)。 + +## 准备工作 + +- 您已启用 [KubeSphere DevOps 系统](../../../../pluggable-components/devops/)。 +- 您需要有一个企业空间、一个 DevOps 项目和一个用户 (`project-regular`),并已邀请此帐户至 DevOps 项目中且授予 `operator` 角色。如果尚未准备好,请参见[创建企业空间、项目、用户和角色](../../../../quick-start/create-workspace-and-project/)。 + +## 创建凭证 + +1. 以 `project-regular` 身份登录 KubeSphere 控制台。 + +2. 进入您的 DevOps 项目,在左侧导航栏,选择**DevOps 项目设置 > 凭证**。 + +3. 在右侧的**凭证**区域,点击**创建**。 + +4. 在弹出的**创建凭证**对话框,输入凭证名称,并选择凭证类型。不同的凭证类型需要设置的参数不同,具体请参考以下内容。 +### 创建用户名和密码凭证 + +以创建 GitHub 用户凭证为例,您需要设置以下参数: + +- 名称:设置凭证名称,如 `github-id`。 +- 类型:选择**用户名和密码**。 +- 用户名:输入您的 GitHub 用户名。 +- 密码/令牌:输入您的 GitHub 令牌。 +- 描述:凭证的简介。 + +{{< notice note >}} + +- 自 2021 年 8 月起,GitHub 要求使用基于令牌的身份验证,此处需要输入令牌,而非 GitHub 密码。关于如如何生成令牌,请参阅[创建个人访问令牌](https://docs.github.com/cn/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token)。 + +- 如果您的帐户或密码中包含任何特殊字符,例如 `@` 和 `$`,可能会因为无法识别而在流水线运行时导致错误。在这种情况下,您需要先在一些第三方网站(例如 [urlencoder](https://www.urlencoder.org/))上对帐户或密码进行编码,然后将输出结果复制粘贴作为您的凭证信息。 + +{{}} + +### 创建 SSH 密钥凭证 + +您需要设置以下参数: + +- 名称:设置凭证名称。 +- 类型:选择**SSH 密钥**。 +- 用户名:输入您的用户名。 +- 私钥:输入您的 SSH 密钥。 +- 密码短语:输入密码短语。为了更好保护您的账户安全,建议设置该参数。 +- 描述:凭证的简介。 + +### 创建访问令牌凭证 + +您需要设置以下参数: + +- 名称:设置凭证名称。 +- 类型:选择**访问令牌**。 +- 令牌:输入您的令牌。 +- 描述:凭证的简介。 + +### 创建 kubeconfig 凭证 + +您需要设置以下参数: + +- 名称:设置凭证名称,例如 `demo-kubeconfig`。 +- 类型:选择**kubeconfig**。 +- 内容:系统自动获取当前 Kubernetes 集群的 kubeconfig 文件内容,并自动填充该字段,您无须做任何更改。但是访问其他集群时,您可能需要更改 kubeconfig。 +- 描述:凭证的简介。 + +{{< notice info >}} + +用于配置集群访问的文件称为 kubeconfig 文件。这是引用配置文件的通用方法。有关更多信息,请参见 [Kubernetes 官方文档](https://kubernetes.io/zh/docs/concepts/configuration/organize-cluster-access-kubeconfig/)。 + +{{}} + +## 查看和管理凭证 + +1. 点击已创建的凭证,进入其详情页面,您可以查看帐户详情和与此凭证相关的所有事件。 + +2. 您也可以在此页面上编辑或删除凭证。请注意,编辑凭证时,KubeSphere 不会显示现有用户名或密码信息。如果输入新的用户名和密码,则前一个将被覆盖。 diff --git a/content/zh/docs/v3.4/devops-user-guide/how-to-use/devops-settings/role-and-member-management.md b/content/zh/docs/v3.4/devops-user-guide/how-to-use/devops-settings/role-and-member-management.md new file mode 100644 index 000000000..82fbcb4ec --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/how-to-use/devops-settings/role-and-member-management.md @@ -0,0 +1,76 @@ +--- +title: "角色和成员管理" +keywords: 'Kubernetes, KubeSphere, DevOps, 角色, 成员' +description: '在 DevOps 项目中创建并管理各种角色和成员。' +linkTitle: "角色和成员管理" +weight: 11242 +--- + +本教程演示如何在 DevOps 项目中管理角色和成员。 + +在 DevOps 项目范围内,您可以向角色授予以下资源的权限: + +- 流水线 +- 凭证 +- DevOps 项目设置 +- 访问控制 + +## 准备工作 + +至少已创建一个 DevOps 项目,例如 `demo-devops`。此外,您需要一个在 DevOps 项目级别具有 `admin` 角色的用户(例如 `devops-admin`)。 + +## 内置角色 + +在 **DevOps 项目角色**中,有三个可用的内置角色,如下所示。创建 DevOps 项目时,KubeSphere 会自动创建内置角色,并且无法编辑或删除这些角色。 + +| 内置角色 | 描述信息 | +| ------------------ | ------------------------------------------------------------ | +| viewer | DevOps 项目观察者,可以查看 DevOps 项目下所有的资源。 | +| operator | DevOps 项目普通成员,可以在 DevOps 项目下创建流水线凭证等。 | +| admin | DevOps 项目管理员,可以管理 DevOps 项目下所有的资源。 | + +## 创建 DevOps 项目角色 + +1. 以 `devops-admin` 身份登录控制台,然后前往 **DevOps 项目**页面选择一个 DevOps 项目(例如 `demo-devops`)。 + + {{< notice note >}} + + 本教程使用 `devops-admin` 帐户作为示例。只要您使用的帐户被授予的角色包含 DevOps 项目级别**访问控制**中的**成员查看**、**角色管理**和**角色查看**的权限,此帐户便可以创建 DevOps 项目角色。 + + {{}} + +2. 转到 **DevOps 项目设置**中的 **DevOps 项目角色**,点击**创建**并设置**名称**。在本示例中,将创建一个名为 `pipeline-creator` 的角色。点击**编辑权限**继续。 + +3. 在**流水线管理**中,选择您希望授予该角色的权限。例如,为此角色选择了**流水线管理**和**流水线查看**。点击**确定**完成操作。 + + {{< notice note >}} + + **依赖于**表示首先需要选择主要权限(**依赖于**之后列出的),以便可以分配关联权限。 + + {{}} + +4. 新创建的角色将列在 **DevOps 项目角色**中。您可以点击右侧的 icon 对其进行编辑。 + + {{< notice note >}} + + `pipeline-creator` 角色仅被授予**流水线管理**和**流水线查看**权限,可能无法满足您的实际需求。本示例仅用于演示,您可以根据实际需要创建自定义角色。 + + {{}} + +## 邀请新成员 + +1. 在 **DevOps 项目设置**中选择 **DevOps 项目成员**,然后点击**邀请**。 + +2. 点击 icon 邀请帐户加入此 DevOps 项目,并向此帐户授予 `pipeline-creator` 角色。 + + {{< notice note >}} + + 必须先邀请用户加入 DevOps 项目所在的企业空间。 + + {{}} + +3. 点击**确定**将用户添加到此 DevOps 项目。在 **DevOps 项目成员**中,您可以看到列出了新邀请的成员。 + +4. 您还可以通过编辑现有成员来更改其角色或将其从 DevOps 项目中删除。 + + diff --git a/content/zh/docs/v3.4/devops-user-guide/how-to-use/devops-settings/set-ci-node.md b/content/zh/docs/v3.4/devops-user-guide/how-to-use/devops-settings/set-ci-node.md new file mode 100644 index 000000000..a84330ffa --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/how-to-use/devops-settings/set-ci-node.md @@ -0,0 +1,49 @@ +--- +title: "为依赖项缓存设置 CI 节点" +keywords: 'Kubernetes, Docker, KubeSphere, Jenkins, CICD, 流水线, 依赖项缓存' +description: '配置专门用于持续集成 (CI) 的一个或一组节点,加快流水线中的构建过程。' +linkTitle: "为依赖项缓存设置 CI 节点" +weight: 11245 +--- + +通常情况下,构建应用程序的过程中需要拉取不同的依赖项。这可能会导致某些问题,例如拉取时间长和网络不稳定,这会进一步导致构建失败。要为您的流水线提供更可靠和稳定的环境,您可以配置一个节点或一组节点,专门用于持续集成 (CI)。这些 CI 节点可以通过使用缓存来加快构建过程。 + +本教程演示如何设置 CI 节点,以便 KubeSphere 将流水线的任务以及 S2I/B2I 构建的任务调度到这些节点。 + +## 准备工作 + +您需要一个具有**集群管理**权限的帐户。例如,您可以直接以 `admin` 身份登录控制台或者创建一个具有该权限的新角色并将该新角色其分配给一个用户。 + +## 标记 CI 节点 + +1. 点击左上角的**平台管理**,然后选择**集群管理**。 + +2. 如果您已经启用[多集群功能](../../../../multicluster-management/)并已导入成员集群,那么您可以选择一个特定集群以查看其节点。如果尚未启用该功能,请直接参考下一步。 + +3. 转到**节点**下的**集群节点**,您可以在其中查看当前集群中的现有节点。 + +4. 从列表中选择一个节点用来运行 CI 任务。点击节点名称以转到其详情页面。点击**更多操作**,然后选择**编辑标签**。 + +5. 在弹出的对话框中,您可以看到一个标签的键是 `node-role.kubernetes.io/worker`。输入 `ci` 作为此标签的值,然后点击**保存**。 + + {{< notice note >}} + + 您也可以点击**添加**来按需添加新标签。 + + {{}} + +## 给 CI 节点添加污点 + +流水线和 S2I/B2I 工作流基本上会根据[节点亲和性](https://kubernetes.io/zh/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity)调度到该节点。如果要将节点专用于 CI 任务,即不允许将其他工作负载调度到该节点,您可以在该节点上添加[污点](https://kubernetes.io/zh/docs/concepts/scheduling-eviction/taint-and-toleration/)。 + +1. 点击**更多操作**,然后选择**编辑污点**。 + +2. 点击**添加污点**,然后输入键 `node.kubernetes.io/ci` 而不指定值。您可以根据需要选择 `阻止调度`、`尽可能阻止调度`或`阻止调度并驱逐现有容器组` 。 + +3. 点击**保存**。KubeSphere 将根据您设置的污点调度任务。您现在可以回到 DevOps 流水线上进行操作。 + + {{< notice tip >}} + + 本教程还涉及与节点管理有关的操作。有关详细信息,请参见[节点管理](../../../../cluster-administration/nodes/)。 + + {{}} diff --git a/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/_index.md b/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/_index.md new file mode 100644 index 000000000..a74613157 --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "流水线" +weight: 11210 + +_build: + render: false +--- \ No newline at end of file diff --git a/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/choose-jenkins-agent.md b/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/choose-jenkins-agent.md new file mode 100644 index 000000000..2dc40dfe1 --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/choose-jenkins-agent.md @@ -0,0 +1,134 @@ +--- +title: "选择 Jenkins Agent" +keywords: 'Kubernetes, KubeSphere, Docker, DevOps, Jenkins, Agent' +description: '指定 Jenkins agent 并为流水线使用内置的 podTemplate。' +linkTitle: "选择 Jenkins Agent" +weight: 112190 +--- + +`agent` 部分指定整个流水线或特定阶段 (Stage) 将在 Jenkins 环境中执行的位置,具体取决于该 `agent` 部分的放置位置。该部分必须在 `pipeline` 块的顶层进行定义,但是阶段级别的使用为可选。有关更多信息,请参见 [Jenkins 官方文档](https://www.jenkins.io/zh/doc/book/pipeline/syntax/#代理)。 + +## 内置 podTemplate + +podTemplate 是一种 Pod 模板,该 Pod 用于创建 Agent。用户可以定义在 Kubernetes 插件中使用的 podTemplate。 + +当流水线运行时,每个 Jenkins Agent Pod 必须具有一个名为 `jnlp` 的容器,用于 Jenkins Controller 和 Jenkins Agent 之间进行通信。另外,用户可以在 podTemplate 中添加容器以满足自己的需求。用户可以选择使用自己的 Pod YAML 来灵活地控制运行时环境 (Runtime),并且可以通过 `container` 命令来切换容器。请参见以下示例。 + +```groovy +pipeline { + agent { + kubernetes { + //cloud 'kubernetes' + label 'mypod' + yaml """ +apiVersion: v1 +kind: Pod +spec: + containers: + - name: maven + image: maven:3.3.9-jdk-8-alpine + command: ['cat'] + tty: true +""" + } + } + stages { + stage('Run maven') { + steps { + container('maven') { + sh 'mvn -version' + } + } + } + } +} +``` + +同时,KubeSphere 内置了一些 podTemplate,用户无需编写 YAML 文件,极大降低学习成本。 + +在目前版本中,KubeSphere 内置了 4 种类型的 podTemplate:`base`、`nodejs`、`maven` 和 `go`,并且在 Pod 中提供隔离的 Docker 环境。 + +您可以通过指定 Agent 的标签来使用内置 podTempalte。例如,要使用 nodejs 的 podTemplate,您可以在创建流水线时指定标签为 `nodejs`,具体参见以下示例。 + +![Jenkins Agent](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/choose-jenkins-agent/jenkins-agent.PNG) + +```groovy +pipeline { + agent { + node { + label 'nodejs' + } + } + + stages { + stage('nodejs hello') { + steps { + container('nodejs') { + sh 'yarn -v' + sh 'node -v' + sh 'docker version' + sh 'docker images' + } + } + } + } +} +``` + +### podTemplate base + +| 名称 | 类型 / 版本 | +| --- | --- | +|Jenkins Agent 标签 | base | +|容器名称 | base | +| 操作系统 | centos-7 | +|Docker| 18.06.0| +|Helm | 2.11.0 | +|Kubectl| 稳定版 | +|内置工具 | unzip、which、make、wget、zip、bzip2、git | + + +### podTemplate nodejs + +| 名称 | 类型 / 版本 | +| --- | --- | +|Jenkins Agent 标签 | nodejs | +|容器名称 | nodejs | +| 操作系统 | centos-7 | +|Node | 9.11.2 | +|Yarn | 1.3.2 | +| Docker | 18.06.0 | +| Helm | 2.11.0 | +|Kubectl | 稳定版 | +|内置工具| unzip、which、make、wget、zip、bzip2、git | + + +### podTemplate maven + +| 名称 | 类型 / 版本 | +| --- | --- | +| Jenkins Agent 标签 | maven | +| 容器名称 | maven | +| 操作系统 | centos-7 | +| Jdk | openjdk-1.8.0 | +| Maven | 3.5.3| +| Docker| 18.06.0 | +| Helm | 2.11.0 | +| Kubectl| 稳定版 | +| 内置工具 | unzip、which、make、wget、zip、bzip2、git | + + +### podTemplate go + +| 名称 | 类型 / 版本 | +| --- | --- | +| Jenkins Agent 标签 | go | +| 容器名称 | go | +| 操作系统 | centos-7 | +| Go | 1.11 | +| GOPATH | /home/jenkins/go | +| GOROOT | /usr/local/go | +| Docker | 18.06.0 | +| Helm | 2.11.0 | +| Kubectl | 稳定版 | +| 内置工具 | unzip、which、make、wget、zip、bzip2、git | diff --git a/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-graphical-editing-panel.md b/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-graphical-editing-panel.md new file mode 100644 index 000000000..f712d34c9 --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-graphical-editing-panel.md @@ -0,0 +1,389 @@ +--- +title: "使用图形编辑面板创建流水线" +keywords: 'KubeSphere, Kubernetes, Jenkins, CICD, 图形化流水线' +description: '学习如何使用 KubeSphere 图形编辑面板创建并运行流水线。' +linkTitle: '使用图形编辑面板创建流水线' +weight: 11211 +--- + +KubeSphere 中的图形编辑面板包含用于 Jenkins [阶段 (Stage)](https://www.jenkins.io/zh/doc/book/pipeline/#阶段) 和[步骤 (Step)](https://www.jenkins.io/zh/doc/book/pipeline/#步骤) 的所有必要操作。您可以直接在交互式面板上定义这些阶段和步骤,无需创建任何 Jenkinsfile。 + +本教程演示如何在 KubeSphere 中使用图形编辑面板创建流水线。KubeSphere 在整个过程中将根据您在编辑面板上的设置自动生成 Jenkinsfile,您无需手动创建 Jenkinsfile。待流水线成功运行,它会相应地在您的开发环境中创建一个部署 (Deployment) 和一个服务 (Service),并将镜像推送至 Docker Hub。 + +## 准备工作 + +- 您需要[启用 KubeSphere DevOps 系统](../../../../pluggable-components/devops/)。 +- 您需要有一个 [Docker Hub](http://www.dockerhub.com/) 帐户。 +- 您需要创建一个企业空间、一个 DevOps 项目和一个用户 (`project-regular`),必须邀请该用户至 DevOps 项目中并赋予 `operator` 角色。如果尚未创建,请参见[创建企业空间、项目、用户和角色](../../../../quick-start/create-workspace-and-project/)。 +- 设置 CI 专用节点来运行流水线。有关更多信息,请参见[为缓存依赖项设置 CI 节点](../../../../devops-user-guide/how-to-use/devops-settings/set-ci-node/)。 +- 配置您的电子邮件服务器用于接收流水线通知(可选)。有关更多信息,请参见[为 KubeSphere 流水线设置电子邮件服务器](../../../../devops-user-guide/how-to-use/pipelines/jenkins-email/)。 +- 配置 SonarQube 将代码分析纳入流水线中(可选)。有关更多信息,请参见[将 SonarQube 集成到流水线](../../../../devops-user-guide/how-to-integrate/sonarqube/)。 + +## 流水线概述 + +本示例流水线包括以下六个阶段。 + +![Pipeline](https://pek3b.qingstor.com/kubesphere-docs/png/20190516091714.png#align=left&display=inline&height=1278&originHeight=1278&originWidth=2190&search=&status=done&width=2190) + +{{< notice note >}} + +- **阶段 1:Checkout SCM**:从 GitHub 仓库拉取源代码。 +- **阶段 2:单元测试**:待该测试通过后才会进行下一阶段。 +- **阶段 3:代码分析**:配置 SonarQube 用于静态代码分析。 +- **阶段 4:构建并推送**:构建镜像并附上标签 `snapshot-$BUILD_NUMBER` 推送至 Docker Hub,其中 `$BUILD_NUMBER` 是流水线活动列表中的记录的序列号。 +- **阶段 5:制品**:生成一个制品(JAR 文件包)并保存。 +- **阶段 6:部署至开发环境**:在开发环境中创建一个部署和一个服务。该阶段需要进行审核,部署成功运行后,会发送电子邮件通知。 + +{{}} + +## 动手实验 + +### 步骤 1:创建凭证 + +1. 以 `project-regular` 身份登录 KubeSphere 控制台。转到您的 DevOps 项目,在 **DevOps 项目设置**下的**凭证**页面创建以下凭证。有关如何创建凭证的更多信息,请参见[凭证管理](../../../../devops-user-guide/how-to-use/devops-settings/credential-management/)。 + + {{< notice note >}} + + 如果您的帐户或密码中有任何特殊字符,例如 `@` 和 `$`,可能会因为无法识别而在流水线运行时导致错误。在这种情况下,您需要先在一些第三方网站(例如 [urlencoder](https://www.urlencoder.org/))上对帐户或密码进行编码,然后将输出结果复制粘贴作为您的凭证信息。 + + {{}} + + | 凭证 ID | 类型 | 用途 | + | --------------- | ------------ | ---------- | + | dockerhub-id | 用户名和密码 | Docker Hub | + | demo-kubeconfig | kubeconfig | Kubernetes | + +2. 您还需要为 SonarQube 创建一个凭证 ID (`sonar-token`),用于上述的阶段 3(代码分析)。请参阅[为新项目创建 SonarQube 令牌 (Token)](../../../../devops-user-guide/how-to-integrate/sonarqube/#create-sonarqube-token-for-new-project),在**访问令牌**类型的凭证的**令牌**字段中输入 SonarQube 令牌。点击**确定**完成操作。 + +3. 您可以在列表中看到已创建的三个凭证。 + +### 步骤 2:创建项目 + +在本教程中,示例流水线会将 [sample](https://github.com/kubesphere/devops-maven-sample/tree/sonarqube) 应用部署至一个项目。因此,您必须先创建一个项目(例如 `kubesphere-sample-dev`)。待流水线成功运行,会在该项目中自动创建该应用的部署和服务。 + +您可以使用 `project-admin` 帐户创建项目。此外,该用户也是 CI/CD 流水线的审核员。请确保将 `project-regular` 帐户邀请至该项目并授予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../../quick-start/create-workspace-and-project/)。 + +### 步骤 3:创建流水线 + +1. 请确保以 `project-regular` 身份登录 KubeSphere 控制台,转到您的 DevOps 项目。在**流水线**页面点击**创建**。 + +2. 在弹出的对话框中,将它命名为 `graphical-pipeline`,点击**下一步**。 + +3. 在**高级设置**页面,点击**添加**,添加以下三个字符串参数。这些参数将用于流水线的 Docker 命令。添加完成后,点击**创建**。 + + | 参数类型 | 名称 | 值 | 描述信息 | + | -------- | ------------------- | --------------- | ------------------------------------------ | + | 字符串 | REGISTRY | `docker.io` | 镜像仓库地址。本示例使用 `docker.io`。 | + | 字符串 | DOCKERHUB_NAMESPACE | Docker ID | 您的 Docker Hub 帐户或该帐户下的组织名称。 | + | 字符串 | APP_NAME | `devops-sample` | 应用名称。 | + + {{< notice note >}} + + 有关其他字段,请直接使用默认值或者参考[流水线设置](../pipeline-settings/)以自定义配置。 + + {{}} + +4. 创建的流水线会显示在列表中。 + +### 步骤 4:编辑流水线 + +- 点击流水线进入其详情页面。要使用图形编辑面板,请点击**任务状态**选项卡下的**编辑流水线**。在弹出的对话框中,点击**自定义流水线**。该流水线包括六个阶段,请按照以下步骤设置每个阶段。 + +- 您也可以使用 KubeSphere 提供的[内置流水线模板](../use-pipeline-templates/)。 + +{{< notice note >}} + +流水线详情页面会显示**同步状态**,它是 KubeSphere 和 Jenkins 之间的同步结果。若同步成功,您会看到**成功**图标。您也可以点击**编辑 Jenkinsfile** 手动为流水线创建一个 Jenkinsfile。 + +{{}} + +#### 阶段 1:拉取源代码 (Checkout SCM) + +图形编辑面板包括两个区域:左侧的**画布**和右侧的**内容**。它会根据您对不同阶段和步骤的配置自动生成一个 Jenkinsfile,为开发者提供更加用户友好的操作体验。 + +{{< notice note >}} + +流水线包括[声明式流水线](https://www.jenkins.io/zh/doc/book/pipeline/syntax/#声明式流水线)和[脚本化流水线](https://www.jenkins.io/zh/doc/book/pipeline/syntax/#脚本化流水线)。目前,您可以使用该面板创建声明式流水线。有关流水线语法的更多信息,请参见 [Jenkins 文档](https://www.jenkins.io/zh/doc/book/pipeline/syntax/)。 + +{{}} + +1. 在图形编辑面板上,从**类型**下拉列表中选择 **node**,从 **Label** 下拉列表中选择 **maven**。 + + {{< notice note >}} + + `agent` 用于定义执行环境。`agent` 指令指定 Jenkins 执行流水线的位置和方式。有关更多信息,请参见[选择 Jenkins Agent](../choose-jenkins-agent/)。 + + {{}} + + ![图形面板](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/graphical_panel.png) + +2. 请点击左侧的加号图标来添加阶段。点击**添加步骤**上方的文本框,然后在右侧的**名称**字段中为该阶段设置名称(例如 `Checkout SCM`)。 + + ![编辑面板](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/edit_panel.png) + +3. 点击**添加步骤**。在列表中选择 **git**,以从 GitHub 拉取示例代码。在弹出的对话框中,填写必需的字段。点击**确定**完成操作。 + + - **URL**:输入 GitHub 仓库地址 `https://github.com/kubesphere/devops-maven-sample.git`。请注意,这里是示例地址,您需要使用您自己的仓库地址。 + - **凭证 ID**:本教程中无需输入凭证 ID。 + - **分支**:如果您将其留空,则默认为 master 分支。请输入 `sonarqube`,或者如果您不需要代码分析阶段,请将其留空。 + + ![输入仓库 URL](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/enter_repo_url.png) + +4. 第一阶段设置完成。 + + ![第一阶段设置完成](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/first_stage_set.png) + +#### 阶段 2:单元测试 + +1. 点击阶段 1 右侧的加号图标添加新的阶段,以在容器中执行单元测试。将它命名为 `Unit Test`。 + + ![单元测试](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/unit_test.png) + +2. 点击**添加步骤**,在列表中选择**指定容器**。将其命名为 `maven` 然后点击**确定**。 + + ![指定容器](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/container_maven.png) + +3. 点击**添加嵌套步骤**,在 `maven` 容器下添加一个嵌套步骤。在列表中选择 **shell** 并在命令行中输入以下命令。点击**确定**保存操作。 + + ```shell + mvn clean -gs `pwd`/configuration/settings.xml test + ``` + + {{< notice note >}} + + 您可以在图形编辑面板上指定在给定阶段指令中执行的一系列[步骤](https://www.jenkins.io/zh/doc/book/pipeline/syntax/#steps)。 + + {{}} + + +#### 阶段 3:代码分析(可选) + +本阶段使用 SonarQube 来测试您的代码。如果您不需要代码分析,可以跳过该阶段。 + +1. 点击 `Unit Test` 阶段右侧的加号图标添加一个阶段,以在容器中进行 SonarQube 代码分析。将它命名为 `Code Analysis`。 + + ![代码分析阶段](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/code_analysis_stage.png) + +2. 在 **Code Analysis** 中,点击**任务**下的**添加步骤**,选择**指定容器**。将其命名为 `maven` 然后点击**确定**。 + + ![Maven 容器](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/maven_container.png) + +3. 点击 `maven` 容器下的**添加嵌套步骤**,以添加一个嵌套步骤。点击**添加凭证**并从**凭证 ID** 列表中选择 SonarQube 令牌 (`sonar-token`)。在**文本变量**中输入 `SONAR_TOKEN`,然后点击**确定**。 + + ![SonarQube 凭证](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/sonarqube_credentials.png) + +4. 在**添加凭证**步骤下,点击**添加嵌套步骤**为其添加一个嵌套步骤。 + + ![嵌套步骤](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/nested_step.png) + +5. 点击 **Sonarqube 配置**,在弹出的对话框中保持默认名称 `sonar` 不变,点击**确定**保存操作。 + + ![sonar](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/sonar_env.png) + +6. 在 **Sonarqube 配置**步骤下,点击**添加嵌套步骤**为其添加一个嵌套步骤。 + + ![添加嵌套步骤](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/add_nested_step.png) + +7. 点击 **shell** 并在命令行中输入以下命令,用于 sonarqube 分支和认证,点击**确定**完成操作。 + + ```shell + mvn sonar:sonar -Dsonar.login=$SONAR_TOKEN + ``` + + ![新的 SonarQube shell](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/sonarqube_shell_new.png) + +8. 点击**指定容器**步骤下的**添加嵌套步骤**(第三个),选择**超时**。在时间中输入 `1` 并将单位选择为**小时**,点击**确定**完成操作。 + + ![添加嵌套步骤-2](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/add_nested_step_2.png) + + ![超时](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/timeout_set.png) + +9. 点击**超时**步骤下的**添加嵌套步骤**,选择**代码质量检查 (SonarQube)**。在弹出的对话框中选择**检查通过后开始后续任务**。点击**确定**保存操作。 + + ![waitforqualitygate](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/waitforqualitygate_set.png) + + ![sonar 就绪](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/sonar_ready.png) + +#### 阶段 4:构建并推送镜像 + +1. 点击前一个阶段右侧的加号图标添加一个新的阶段,以构建并推送镜像至 Docker Hub。将其命名为 `Build and Push`。 + + ![构建并推送镜像](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/build_and_push_image.png) + +2. 点击**任务**下的**添加步骤**,选择**指定容器**,将其命名为 `maven`,然后点击**确定**。 + + ![maven 设置完成](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/maven_set_2.png) + +3. 点击 `maven` 容器下的**添加嵌套步骤**添加一个嵌套步骤。在列表中选择 **shell** 并在弹出窗口中输入以下命令,点击**确定**完成操作。 + + ```shell + mvn -Dmaven.test.skip=true clean package + ``` + + ![maven 嵌套步骤](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/nested_step_maven.png) + +4. 再次点击**添加嵌套步骤**,选择 **shell**。在命令行中输入以下命令,以根据 [Dockerfile](https://github.com/kubesphere/devops-maven-sample/blob/sonarqube/Dockerfile-online) 构建 Docker 镜像。点击**确定**确认操作。 + + {{< notice note >}} + + 请勿遗漏命令末尾的点 `.`。 + + {{}} + + ```shell + docker build -f Dockerfile-online -t $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BUILD_NUMBER . + ``` + + ![shell 命令](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/shell_command.png) + +5. 再次点击**添加嵌套步骤**,选择**添加凭证**。在弹出的对话框中填写以下字段,点击**确定**确认操作。 + + - **凭证名称**:选择您创建的 Docker Hub 凭证,例如 `dockerhub-id`。 + - **密码变量**:输入 `DOCKER_PASSWORD`。 + - **用户名变量**:输入 `DOCKER_USERNAME`。 + + {{< notice note >}} + + 出于安全原因,帐户信息在脚本中显示为变量。 + + {{}} + + ![docker 凭证](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/docker_credential.png) + +6. 在**添加凭证**步骤中点击**添加嵌套步骤**(第一个)。选择 **shell** 并在弹出窗口中输入以下命令,用于登录 Docker Hub。点击**确定**确认操作。 + + ```shell + echo "$DOCKER_PASSWORD" | docker login $REGISTRY -u "$DOCKER_USERNAME" --password-stdin + ``` + + ![Docker 登录命令](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/login_docker_command.png) + +7. 在**添加凭证**步骤中点击**添加嵌套步骤**。选择 **shell** 并输入以下命令,将 SNAPSHOT 镜像推送至 Docker Hub。点击**确定**完成操作。 + + ```shell + docker push $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BUILD_NUMBER + ``` + + ![推送 snapshot 至 Docker](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/push_to_docker.png) + +#### 阶段 5:生成制品 + +1. 点击 **Build and Push** 阶段右侧的加号图标添加一个新的阶段,以保存制品,将其命名为 `Artifacts`。本示例使用 JAR 文件包。 + + ![添加制品阶段](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/add_artifact_stage.png) + +2. 选中 **Artifacts** 阶段,点击**任务**下的**添加步骤**,选择**保存制品**。在弹出的对话框中输入 `target/*.jar`,用于设置 Jenkins 中制品的保存路径。点击**确定**完成操作。 + + ![制品信息](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/artifact_info.png) + +#### 阶段 6:部署至开发环境 + +1. 点击 **Artifacts** 阶段右侧的加号图标添加最后一个阶段,将其命名为 `Deploy to Dev`。该阶段用于将资源部署至您的开发环境(即 `kubesphere-sample-dev` 项目)。 + + ![部署至开发环境](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/deploy_to_dev.png) + +2. 点击 **Deploy to Dev** 阶段下的**添加步骤**,在列表中选择**审核**,然后在**消息**字段中填入 `@project-admin`,即 `project-admin` 帐户在流水线运行到该阶段时会进行审核。点击**确定**保存操作。 + + ![输入信息](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/input_message.png) + + {{< notice note >}} + + 在 KubeSphere 3.3 中,能够运行流水线的帐户也能够继续或终止该流水线。此外,流水线创建者或者您指定的帐户也有权限继续或终止流水线。 + + {{}} + +3. 再次点击 **Deploy to Dev** 阶段下的**添加步骤**。在列表中选择**指定容器**,将其命名为 `maven` 然后点击**确定**。 + +4. 点击 `maven` 容器步骤下的**添加嵌套步骤**。在列表中选择**添加凭证**,在弹出的对话框中填写以下字段,然后点击**确定**。 + + - 凭证名称:选择您创建的 kubeconfig 凭证,例如 `demo-kubeconfig`。 + - kubeconfig 变量:输入 `KUBECONFIG_CONTENT`。 + +5. 点击**添加凭证**步骤下的**添加嵌套步骤**。在列表中选择 **shell**,在弹出的对话框中输入以下命令,然后点击**确定**。 + + ```shell + mkdir ~/.kube + echo "$KUBECONFIG_CONTENT" > ~/.kube/config + envsubst < deploy/dev-ol/devops-sample-svc.yaml | kubectl apply -f - + envsubst < deploy/dev-ol/devops-sample.yaml | kubectl apply -f - + ``` + +6. 如果您想在流水线成功运行时接收电子邮件通知,请点击**添加步骤**,选择**邮件**,以添加电子邮件信息。请注意,配置电子邮件服务器是可选操作,如果您跳过该步骤,依然可以运行流水线。 + + {{< notice note >}} + + 有关配置电子邮件服务器的更多信息,请参见[为 KubeSphere 流水线设置电子邮件服务器](../jenkins-email/)。 + + {{}} + +7. 待您完成上述步骤,请在右下角点击**保存**。随后,您可以看到该流水线有完整的工作流,并且每个阶段也清晰列示。当您用图形编辑面板定义流水线时,KubeSphere 会自动创建相应的 Jenkinsfile。点击**编辑 Jenkinsfile** 查看该 Jenkinsfile。 + + {{< notice note >}} + + 在**流水线**页面,您可以点击该流水线右侧的 icon,然后选择**复制**来创建该流水线的副本。如果您需要同时运行多个不包含多分支的流水线,您可以全部选中这些流水线,然后点击**运行**来批量运行它们。 + + {{}} + +### 步骤 5:运行流水线 + +1. 您需要手动运行使用图形编辑面板创建的流水线。点击**运行**,您可以在弹出的对话框中看到步骤 3 中已定义的三个字符串参数。点击**确定**来运行流水线。 + + ![运行流水线](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/run_pipeline.png) + +2. 要查看流水线的状态,请转到**运行记录**选项卡,点击您想查看的记录。 + +3. 稍等片刻,流水线如果成功运行,则会在 **Deploy to Dev** 阶段停止。`project-admin` 作为流水线的审核员,需要进行审批,然后资源才会部署至开发环境。 + + ![流水线成功运行](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/pipeline_successful.png) + +4. 登出 KubeSphere 控制台,以 `project-admin` 身份重新登录。转到您的 DevOps 项目,点击 `graphical-pipeline` 流水线。在**运行记录**选项卡下,点击要审核的记录。要批准流水线,请点击**继续**。 + +### 步骤 6:查看流水线详情 + +1. 以 `project-regular` 身份重新登录控制台。转到您的 DevOps 项目,点击 `graphical-pipeline` 流水线。在**运行记录**选项卡下,点击**状态**下标记为**成功**的记录。 + +2. 如果所有配置都成功运行,您可以看到所有阶段都已完成。 + +3. 在右上角点击**查看日志**,查看所有日志。点击每个阶段查看其详细日志。您可以根据日志排除故障和问题,也可以将日志下载到本地进行进一步分析。 + +### 步骤 7:下载制品 + +点击**制品**选项卡,然后点击右侧的图标下载该制品。 + +![下载制品](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/download_artifact.png) + +### 步骤 8:查看代码分析结果 + +在**代码检查**页面,可以查看由 SonarQube 提供的本示例流水线的代码分析结果。如果您没有事先配置 SonarQube,则该部分不可用。有关更多信息,请参见[将 SonarQube 集成到流水线](../../../how-to-integrate/sonarqube/)。 + +![SonarQube 详细结果](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/sonarqube_result_detail.png) + +### 步骤 9:验证 Kubernetes 资源 + +1. 如果流水线的每个阶段都成功运行,则会自动构建一个 Docker 镜像并推送至您的 Docker Hub 仓库。最终,流水线将在您事先设置的项目中自动创建一个部署和一个服务。 + +2. 前往该项目(本教程中即 `kubesphere-sample-dev`),请点击**应用负载**下的**工作负载**,您可以看到列表中显示的部署。 + +3. 在**服务**页面,您可以看到示例服务通过 NodePort 暴露其端口号。要访问服务,请访问 `:`。 + + {{< notice note >}} + + 访问服务前,您可能需要配置端口转发规则并在安全组中放行该端口。 + + {{}} + +4. 现在流水线已成功运行,将会推送一个镜像至 Docker Hub。登录 Docker Hub 查看结果。 + + ![DockerHub 镜像](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/dockerhub_image.png) + +5. 该应用的名称为 `devops-sample`,即 `APP_NAME` 的值,标签即 `SNAPSHOT-$BUILD_NUMBER` 的值。`$BUILD_NUMBER` 即**运行记录**选项卡列示的记录的序列号。 + +6. 如果您在最后一个阶段设置了电子邮件服务器并添加了电子邮件通知的步骤,您还会收到电子邮件消息。 + +## 另请参见 + +[使用 Jenkinsfile 创建流水线](../create-a-pipeline-using-jenkinsfile/) + +[选择 Jenkins Agent](../choose-jenkins-agent/) + +[为 KubeSphere 流水线设置电子邮件服务器](../jenkins-email/) \ No newline at end of file diff --git a/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-jenkinsfile.md b/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-jenkinsfile.md new file mode 100644 index 000000000..4abf2d145 --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-jenkinsfile.md @@ -0,0 +1,281 @@ +--- +title: "使用 Jenkinsfile 创建流水线" +keywords: 'KubeSphere, Kubernetes, Docker, Spring Boot, Jenkins, DevOps, CI/CD, 流水线' +description: "学习如何使用示例 Jenkinsfile 创建并运行流水线。" +linkTitle: "使用 Jenkinsfile 创建流水线" +weight: 11212 +--- + +Jenkinsfile 是一个文本文件,它包含 Jenkins 流水线的定义,并被检入源代码控制仓库。Jenkinsfile 将整个工作流存储为代码,因此它是代码审查和流水线迭代过程的基础。有关更多信息,请参见 [Jenkins 官方文档](https://www.jenkins.io/zh/doc/book/pipeline/jenkinsfile/)。 + +本教程演示如何基于 GitHub 仓库中的 Jenkinsfile 创建流水线。您可以使用该流水线将示例应用程序分别部署到可从外部访问的开发环境和生产环境。 + +{{< notice note >}} + +KubeSphere 中可以创建两种类型的流水线:一种是本教程中介绍的基于 SCM 中 Jenkinsfile 创建的流水线,另一种是[通过图形编辑面板创建的流水线](../create-a-pipeline-using-graphical-editing-panel/)。Jenkinsfile in SCM 需要源代码管理 (SCM) 中有内置 Jenkinsfile,换句话说,Jenkinsfile 作为 SCM 的一部分。KubeSphere DevOps 系统会根据代码仓库的现有 Jenkinsfile 自动构建 CI/CD 流水线。您可以定义工作流,例如 `stage` 和 `step`。 + +{{}} + +## 准备工作 + +- 您需要有一个 [Docker Hub](https://hub.docker.com/) 帐户和一个 [GitHub](https://github.com/) 帐户。 +- 您需要[启用 KubeSphere DevOps 系统](../../../../pluggable-components/devops/)。 +- 您需要创建一个企业空间、一个 DevOps 项目和一个用户 (`project-regular`),需要邀请该用户至 DevOps 项目中并赋予 `operator` 角色。如果尚未准备就绪,请参见[创建企业空间、项目、用户和角色](../../../../quick-start/create-workspace-and-project/)。 +- 您需要设置 CI 专用节点用于运行流水线。请参考[为依赖项缓存设置 CI 节点](../../../how-to-use/devops-settings/set-ci-node/)。 +- 您需要安装和配置 SonarQube。请参考[将 SonarQube 集成到流水线](../../../how-to-integrate/sonarqube/)。如果您跳过这一部分,则没有下面的 **SonarQube 分析**阶段。 + +## 流水线概述 + +本示例流水线包括以下八个阶段。 + +![流水线概览](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-overview.png) + +{{< notice note >}} + +- **阶段 1:Checkout SCM**:从 GitHub 仓库检出源代码。 +- **阶段 2:单元测试**:待该测试通过后才会进行下一阶段。 +- **阶段 3:SonarQube 分析**:SonarQube 代码质量分析。 +- **阶段 4:构建并推送快照镜像**:根据**策略设置**中选定的分支来构建镜像,并将 `SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER` 标签推送至 Docker Hub,其中 `$BUILD_NUMBER` 为流水线活动列表中的运行序号。 +- **阶段 5:推送最新镜像**:将 SonarQube 分支标记为 `latest`,并推送至 Docker Hub。 +- **阶段 6:部署至开发环境**:将 SonarQube 分支部署到开发环境,此阶段需要审核。 +- **阶段 7:带标签推送**:生成标签并发布到 GitHub,该标签会推送到 Docker Hub。 +- **阶段 8:部署至生产环境**:将已发布的标签部署到生产环境。 + +{{}} + +## 动手实验 + +### 步骤 1:创建凭证 + +1. 以 `project-regular` 身份登录 KubeSphere 控制台。转到您的 DevOps 项目,在 **DevOps 项目设置**下的**凭证**页面创建以下凭证。有关如何创建凭证的更多信息,请参见[凭证管理](../../../../devops-user-guide/how-to-use/devops-settings/credential-management/)。 + + {{< notice note >}} + + 如果您的帐户或密码中包含任何特殊字符,例如 `@` 和 `$`,可能会因为无法识别而在流水线运行时导致错误。在这种情况下,您需要先在一些第三方网站(例如 [urlencoder](https://www.urlencoder.org/))上对帐户或密码进行编码,然后将输出结果复制粘贴作为您的凭证信息。 + + {{}} + + | 凭证 ID | 类型 | 用途 | + | --------------- | ---------- | ---------- | + | dockerhub-id | 用户名和密码 | Docker Hub | + | github-id | 用户名和密码 | GitHub | + | demo-kubeconfig | kubeconfig | Kubernetes | + +2. 您还需要为 SonarQube 创建一个凭证 (`sonar-token`),用于上述的阶段 3(SonarQube 分析)。请参阅[为新项目创建 SonarQube 令牌 (Token)](../../../../devops-user-guide/how-to-integrate/sonarqube/#为新项目创建-sonarqube-token),在**访问令牌**类型的凭证的**令牌**字段中输入 SonarQube 令牌。点击**确定**完成操作。 + +3. 您还需要创建具有如下图所示权限的 GitHub 个人访问令牌 (PAT),然后在 DevOps 项目中,使用生成的令牌创建用于 GitHub 认证的帐户凭证(例如,`github-token`)。 + + ![github-token-scope](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/github-token-scope.png) + + {{< notice note >}} + + 如需创建 GitHub 个人访问令牌,请转到您 GitHub 帐户的 **Settings**,点击 **Developer settings**,选择 **Personal access tokens**,然后点击 **Generate new token**。 + + {{}} + +4. 您可以在列表页中看到已创建的五个凭证。 + +### 步骤 2:在 GitHub 仓库中修改 Jenkinsfile + +1. 登录 GitHub 并 Fork GitHub 仓库 [devops-maven-sample](https://github.com/kubesphere/devops-maven-sample) 至您的 GitHub 个人帐户。 + +2. 在您自己的 GitHub 仓库 **devops-maven-sample** 中,点击根目录中的文件 `Jenkinsfile-online`。 + +3. 点击右侧的编辑图标,编辑环境变量。 + + ![jenkins-edit--2](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/jenkins-edit--2.png) + + | 条目 | 值 | 描述信息 | + | :--- | :--- | :--- | + | DOCKER\_CREDENTIAL\_ID | dockerhub-id | 您在 KubeSphere 中为 Docker Hub 帐户设置的**名称**。 | + | GITHUB\_CREDENTIAL\_ID | github-id | 您在 KubeSphere 中为 GitHub 帐户设置的**名称**,用于将标签推送至您的 GitHub 仓库。 | + | KUBECONFIG\_CREDENTIAL\_ID | demo-kubeconfig | 您在 KubeSphere 中为 kubeconfig 设置的**名称**,用于访问运行中的 Kubernetes 集群。 | + | REGISTRY | docker.io | 默认为 `docker.io`,用作推送镜像的地址。 | + | DOCKERHUB\_NAMESPACE | your-dockerhub-account | 请替换为您的 Docker Hub 帐户名,也可以替换为该帐户下的 Organization 名称。 | + | GITHUB\_ACCOUNT | your-github-account | 请替换为您的 GitHub 帐户名。例如,如果您的 GitHub 地址是 `https://github.com/kubesphere/`,则您的 GitHub 帐户名为 `kubesphere`,也可以替换为该帐户下的 Organization 名称。 | + | APP\_NAME | devops-maven-sample | 应用名称。 | + | SONAR\_CREDENTIAL\_ID | sonar-token | 您在 KubeSphere 中为 SonarQube 令牌设置的**名称**,用于代码质量检测。 | + + {{< notice note >}} + + Jenkinsfile 中 `mvn` 命令的参数 `-o` 表示开启离线模式。本教程中已下载相关依赖项,以节省时间并适应某些环境中的网络干扰。离线模式默认开启。 + + {{}} + +4. 编辑环境变量后,点击页面底部的 **Commit changes**,更新 SonarQube 分支中的文件。 + +### 步骤 3:创建项目 + +您需要创建两个项目,例如 `kubesphere-sample-dev` 和 `kubesphere-sample-prod`,分别代表开发环境和生产环境。待流水线成功运行,将在这两个项目中自动创建应用程序的相关部署 (Deployment) 和服务 (Service)。 + +{{< notice note >}} + +您需要提前创建 `project-admin` 帐户,用作 CI/CD 流水线的审核者。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../../quick-start/create-workspace-and-project/)。 + +{{}} + +1. 以 `project-admin` 身份登录 KubeSphere。在您创建 DevOps 项目的企业空间中创建以下两个项目。请确保邀请 `project-regular` 帐户至这两个项目中并赋予 `operator` 角色。 + + | 项目名称 | 别名 | + | ---------------------- | ----------------------- | + | kubesphere-sample-dev | development environment | + | kubesphere-sample-prod | production environment | + +2. 项目创建后,会显示在项目列表中。 + +### 步骤 4:创建流水线 + +1. 登出 KubeSphere,然后以 `project-regular` 身份重新登录,转到 DevOps 项目 `demo-devops`,点击**创建**。 + +2. 在弹出的对话框中填入基本信息,将其命名为 `jenkinsfile-in-scm` 并在**代码仓库**下指定一个代码仓库。 + +3. 在 **GitHub** 选项卡,从**凭证**的下拉菜单中选择 **github-token**,然后点击**确定**来选择您的仓库。 + +4. 选择您的 GitHub 帐户,与该令牌相关的所有仓库将在右侧列出。选择 **devops-maven-sample** 并点击**选择**,点击**下一步**继续。 + +5. 在**高级设置**中,选中**删除旧分支**旁边的方框。本教程中,您可以为**分支保留天数(天)**和**分支最大数量**使用默认值。 + + 删除旧分支意味着您将一并丢弃分支记录。分支记录包括控制台输出、已归档制品以及特定分支的其他相关元数据。更少的分支意味着您可以节省 Jenkins 正在使用的磁盘空间。KubeSphere 提供两个选项来确定何时丢弃旧分支: + + - 分支保留天数(天):超过保留期限的分支将被删除。 + + - 分支最大数量:分支数量超过最大数量时,删除最旧的分支。 + + {{< notice note >}} + + **分支保留天数(天)**和**分支最大数量**可以同时应用于分支。只要某个分支满足其中一个字段所设置的条件,则会删除该分支。例如,如果您将保留天数和最大分支数分别指定为 2 和 3,待某个分支的保留天数超过 2 或者分支保留数量超过 3,则会删除该分支。KubeSphere 默认用 7 和 5 预填充这两个字段。 + + {{}} + +6. 在**策略设置**中,KubeSphere 默认提供四种策略。本示例中不会使用**从 Fork 仓库中发现 PR** 这条策略,因此您可以删除该策略。您无需修改设置,可以直接使用默认值。 + + Jenkins 流水线运行时,开发者提交的 Pull Request (PR) 也将被视为一个单独的分支。 + + **发现分支** + + - **排除已提交 PR 的分支**:不扫描源分支,例如源仓库的 master 分支。需要合并这些分支。 + - **只包括已提交 PR 的分支**:仅扫描 PR 分支。 + - **包括所有分支**:拉取源仓库中的所有分支。 + + **从原仓库发现 PR** + + - **拉取 PR 合并后的代码**:PR 合并到目标分支后,基于源代码创建并运行流水线。 + - **拉取 PR 提交时的代码**:根据 PR 本身的源代码创建并运行流水线。 + - **分别创建两个流水线**:KubeSphere 创建两个流水线,一个流水线使用 PR 与目标分支合并后的源代码版本,另一个使用 PR 本身的源代码版本。 + + {{< notice note >}} + + 您需要选择 GitHub 作为代码仓库才能启用此处的**策略设置**设置。 + + {{}} + +7. 向下滚动到**脚本路径**。该字段指定代码仓库中的 Jenkinsfile 路径。它表示仓库的根目录。如果文件位置变更,则脚本路径也需要更改。请将其更改为 `Jenkinsfile-online`,这是示例仓库中位于根目录下的 Jenkinsfile 的文件名。 + +8. 在**扫描触发器**中,点击**定时扫描**并设置时间间隔为 **5 分钟**。点击**创建**完成配置。 + + {{< notice note >}} + + 您可以设置特定的时间间隔让流水线扫描远程仓库,以便根据您在**策略设置**中设置的策略来检测代码更新或新的 PR。 + + {{}} + +### 步骤 5:运行流水线 + +1. 流水线创建后,点击该流水线名称进入其详情页面。 + + {{< notice note >}} + + - 您可以点击该流水线右侧的 icon,然后选择**复制**来创建该流水线的副本。如需并发运行不包含多分支的多个流水线,您可以将这些流水线全选,然后点击**运行**来批量运行它们。 + - 流水线详情页显示**同步状态**,即 KubeSphere 和 Jenkins 的同步结果。若同步成功,您会看到**成功**图标中打上绿色的对号。 + + {{}} + +2. 在**运行记录**选项卡下,正在扫描三个分支。点击右侧的**运行**,流水线将根据您设置的行为策略来运行。从下拉列表中选择 **sonarqube**,然后添加标签号,例如 `v0.0.2`。点击**确定**触发新活动。 + + {{< notice note >}} + + - 如果您在此页面上未看到任何运行记录,则需要手动刷新浏览器或点击下拉菜单(**更多操作**按钮)中的**扫描远程分支**。 + - 标签名称用于在 GitHub 和 Docker Hub 中指代新生成的发布版本和镜像。现有标签名称不能再次用于字段 `TAG_NAME`。否则,流水线将无法成功运行。 + + {{}} + +3. 稍等片刻,您会看到一些运行停止,一些运行失败。点击第一个活动查看其详细信息。 + + {{< notice note >}} + + 活动失败可能由不同因素所引起。本示例中,在上述步骤中编辑分支环境变量时,仅更改了 sonarqube 分支的 Jenkinsfile。相反地,dependency 和 master 分支中的这些变量保持不变(使用了错误的 GitHub 和 Docker Hub 帐户),从而导致失败。您可以点击该活动,查看其日志中的详细信息。导致失败的其他原因可能是网络问题、Jenkinsfile 中的编码不正确等等。 + + {{}} + +4. 流水线在 `deploy to dev` 阶段暂停,您需要手动点击**继续**。请注意,在 Jenkinsfile 中分别定义了三个阶段 `deploy to dev`、`push with tag` 和 `deploy to production`,因此将对流水线进行三次审核。 + + 在开发或生产环境中,可能需要具有更高权限的人员(例如版本管理员)来审核流水线、镜像以及代码分析结果。他们有权决定流水线是否能进入下一阶段。在 Jenkinsfile 中,您可以使用 `input` 来指定由谁审核流水线。如果您想指定一个用户(例如 `project-admin`)来审核,您可以在 Jenkinsfile 中添加一个字段。如果有多个用户,则需要通过逗号进行分隔,如下所示: + + ```groovy + ··· + input(id: 'release-image-with-tag', message: 'release image with tag?', submitter: 'project-admin,project-admin1') + ··· + ``` + + {{< notice note >}} + + 在 KubeSphere 3.3 中,如果不指定审核员,那么能够运行流水线的帐户也能够继续或终止该流水线。如果指定审核员,流水线创建者或者您指定的审核员账户均有权限继续或终止流水线。 + + {{}} + +### 步骤 6:检查流水线状态 + +1. 在**运行状态**中,您可以查看流水线的运行状态。请注意,流水线在刚创建后将继续初始化几分钟。示例流水线有八个阶段,它们已在 [Jenkinsfile-online](https://github.com/kubesphere/devops-maven-sample/blob/sonarqube/Jenkinsfile-online) 中单独定义。 + +2. 点击右上角的**查看日志**来查看流水线运行日志。您可以看到流水线的动态日志输出,包括可能导致流水线无法运行的错误。对于每个阶段,您都可以点击该阶段来查看其日志,而且可以将日志下载到本地计算机进行进一步分析。 + +### 步骤 7:验证结果 + +1. 流水线成功运行后,点击**代码检查**通过 SonarQube 查看结果,如下所示。 + +2. 按照 Jenkinsfile 中的定义,通过流水线构建的 Docker 镜像也已成功推送到 Docker Hub。在 Docker Hub 中,您会看到带有标签 `v0.0.2` 的镜像,该标签在流水线运行之前已指定。 + +3. 同时,GitHub 中已生成一个新标签和一个新发布版本。 + +4. 示例应用程序将部署到 `kubesphere-sample-dev` 和 `kubesphere-sample-prod`,并创建相应的部署和服务。转到这两个项目,预期结果如下所示: + + | 环境 | URL | 命名空间 | 部署 | 服务 | + | :--- | :--- | :--- | :--- | :--- | + | Development | `http://{$NodeIP}:{$30861}` | kubesphere-sample-dev | ks-sample-dev | ks-sample-dev | + | Production | `http://{$NodeIP}:{$30961}` | kubesphere-sample-prod | ks-sample | ks-sample | + + {{< notice note >}} + + 您可能需要在您的安全组中放行该端口,以便通过 URL 访问应用程序。 + + {{}} + +### 步骤 8:访问示例服务 + +1. 以 `admin` 身份登录 KubeSphere 并使用**工具箱**中的 **kubectl** 访问该服务。转到 `kubesphere-sample-dev` 项目,然后在**应用负载**下的**服务**中点击 `ks-sample-dev`。在详情页获取 Endpoint 用于访问该服务。 + +2. 在右下角的**工具箱**中使用 **kubectl** 执行以下命令: + + ```bash + curl 10.233.120.230:8080 + ``` + +3. 预期输出: + + ```bash + Really appreciate your star, that's the power of our life. + ``` + + {{< notice note >}} + + 使用 `curl` 访问 Endpoint,或者访问 {$Virtual IP}:{$Port} 或 {$Node IP}:{$NodePort}。 + + {{}} + +4. 同样地,您可以在项目 `kubesphere-sample-prod` 中测试服务,您将看到相同的输出结果。 + + ```bash + $ curl 10.233.120.236:8080 + Really appreciate your star, that's the power of our life. + ``` + diff --git a/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/customize-jenkins-agent.md b/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/customize-jenkins-agent.md new file mode 100644 index 000000000..c1dd180c1 --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/customize-jenkins-agent.md @@ -0,0 +1,70 @@ +--- +title: "自定义 Jenkins Agent" +keywords: "KubeSphere, Kubernetes, DevOps, Jenkins, Agent" +description: "了解如何在 KubeSphere 上自定义 Jenkins Agent。" +linkTitle: "自定义 Jenkins Agent" +Weight: 112191 +--- + +如果您需要使用运行特定环境(例如 JDK 11)的 Jenkins Agent,您可以在 KubeSphere 上自定义 Jenkins Agent。 + +本文档描述如何在 KubeSphere 上自定义 Jenkins Agent。 + +## 准备工作 + +- 您需要启用 [KubeSphere DevOps 系统](../../../../pluggable-components/devops/)。 + +## 自定义 Jenkins Agent + +1. 以 `admin` 用户登录 KubeSphere Web 控制台。 + +2. 点击左上角的**平台管理**,选择**集群管理**,然后在左侧导航栏点击**配置**下的**配置字典**。 + +3. 在**配置字典**页面的搜索框中输入 `jenkins-casc-config` 并按**回车键**。 + +4. 点击 `jenkins-casc-config` 进入其详情页面,点击**更多操作**,选择**编辑 YAML**。 + +5. 在弹出的对话框中,搜寻至 `data.jenkins_user.yaml:jenkins.clouds.kubernetes.templates` 下方并输入以下代码,点击**确定**。 + + ```yaml + - name: "maven-jdk11" # 自定义 Jenkins Agent 的名称。 + label: "maven jdk11" # 自定义 Jenkins Agent 的标签。若要指定多个标签,请用空格来分隔标签。 + inheritFrom: "maven" # 该自定义 Jenkins Agent 所继承的现有容器组模板的名称。 + containers: + - name: "maven" # 该自定义 Jenkins Agent 所继承的现有容器组模板中指定的容器名称。 + image: "kubespheredev/builder-maven:v3.2.0jdk11" # 此镜像只用于测试。您可以使用自己的镜像。 + ``` + + {{< notice note >}} + + 请确保遵守 YAML 文件中的缩进。 + + {{}} + +6. 请至少等待 70 秒,您的改动会自动重新加载。 + +7. 要使用自定义 Jenkins Agent,请参考下方的示例 Jenkinsfile,在创建流水线时指定自定义 Jenkins Agent 对应的标签和容器名。 + + ```groovy + pipeline { + agent { + node { + label 'maven && jdk11' + } + } + stages { + stage('Print Maven and JDK version') { + steps { + container('maven') { + sh ''' + mvn -v + java -version + ''' + } + } + } + } + } + ``` + + diff --git a/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/gitlab-multibranch-pipeline.md b/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/gitlab-multibranch-pipeline.md new file mode 100644 index 000000000..e22d39590 --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/gitlab-multibranch-pipeline.md @@ -0,0 +1,129 @@ +--- +title: "使用 GitLab 创建多分支流水线" +keywords: 'KubeSphere, Kubernetes, GitLab, Jenkins, 流水线' +description: '了解如何使用 GitLab 在 KubeSphere 上创建多分支流水线。' +linkTitle: "使用 GitLab 创建多分支流水线" +weight: 11215 +--- + +[GitLab](https://about.gitlab.com/) 是一个提供公开和私有仓库的开源代码仓库平台。它也是一个完整的 DevOps 平台,专业人士能够使用 GitLab 在项目中执行任务。 + +在 KubeSphere 3.3 中,您可以使用 GitLab 在 DevOps 项目中创建多分支流水线。本教程介绍如何使用 GitLab 创建多分支流水线。 + +## 准备工作 + +- 您需要准备一个 [GitLab](https://gitlab.com/users/sign_in) 帐户以及一个 [Docker Hub](https://hub.docker.com/) 帐户。 +- 您需要[启用 KubeSphere DevOps 系统](../../../../pluggable-components/devops/)。 +- 您需要创建一个企业空间、一个 DevOps 项目以及一个用户 (`project-regular`),该用户必须被邀请至该 DevOps 项目中并赋予 `operator` 角色。有关更多信息,请参考[创建企业空间、项目、用户和角色](../../../../quick-start/create-workspace-and-project/)。 + +## 动手实验 + +### 步骤 1:创建凭证 + +1. 使用 `project-regular` 用户登录 KubeSphere 控制台。转到您的 DevOps 项目,在 **DevOps 项目设置**下的**凭证**中创建以下凭证。有关更多如何创建凭证的信息,请参见[凭证管理](../../../../devops-user-guide/how-to-use/devops-settings/credential-management/)。 + + {{< notice note >}} + + 如果您的帐户或密码中包含任何特殊字符,例如 `@` 和 `$`,则可能会因为无法识别而在流水线运行时导致错误。在此情况下,您需要先在第三方网站(例如 [urlencoder](https://www.urlencoder.org/))上对帐户或密码进行编码,然后将输出结果复制粘贴作为您的凭证信息。 + + {{}} + + | 凭证 ID | 类型 | 用途 | + | --------------- | ---------- | ---------- | + | dockerhub-id | 用户名和密码 | Docker Hub | + | gitlab-id | 用户名和密码 | GitLab | + | demo-kubeconfig | kubeconfig | Kubernetes | + +2. 创建完成后,您可以在列表中看到创建的凭证。 + +### 步骤 2:在 GitLab 仓库中编辑 Jenkinsfile + +1. 登录 GitLab 并创建一个公开项目。点击**导入项目**,选择**从 URL 导入仓库**,然后输入 [devops-maven-sample](https://github.com/kubesphere/devops-maven-sample) 的 URL。可见性级别选择**公开**,然后点击**新建项目**。 + +2. 在刚刚创建的项目中,从 master 分支中创建一个新分支,命名为 `gitlab-demo`。 + +3. 在 `gitlab-demo` 分支中,点击根目录中的 `Jenkinsfile-online` 文件。 + +4. 点击**编辑**,分别将 `GITHUB_CREDENTIAL_ID`、`GITHUB_ACCOUNT` 以及 `@github.com` 更改为 `GITLAB_CREDENTIAL_ID`、`GITLAB_ACCOUNT` 以及 `@gitlab.com`,然后编辑下表所列条目。您还需要将 `push latest` 和 `deploy to dev` 中 `branch` 的值更改为 `gitlab-demo`。 + + | 条目 | 值 | 描述信息 | + | -------------------- | --------- | ------------------------------------------------------------ | + | GITLAB_CREDENTIAL_ID | gitlab-id | 您在 KubeSphere 中为自己的 GitLab 帐户设置的**名称**,用于推送标签至您的 GitLab 仓库。 | + | DOCKERHUB_NAMESPACE | felixnoo | 请将其替换为您自己的 Docker Hub 帐户名称,也可以使用该帐户下的组织名称。 | + | GITLAB_ACCOUNT | felixnoo | 请将其替换为您自己的 GitLab 帐户名称,也可以使用该帐户的用户组名称。 | + + {{< notice note >}} + + 有关 Jenkinsfile 中环境变量的更多信息,请参考[使用 Jenkinsfile 创建流水线](../create-a-pipeline-using-jenkinsfile/#步骤-2在-github-仓库中修改-jenkinsfile)。 + + {{}} + +5. 点击 **Commit changes** 更新该文件。 + +### 步骤 3:创建项目 + +您需要创建两个项目,例如 `kubesphere-sample-dev` 和 `kubesphere-sample-prod`,这两个项目分别代表开发环境和测试环境。有关更多信息,请参考[使用 Jenkinsfile 创建流水线](../create-a-pipeline-using-jenkinsfile/#步骤-3创建项目)。 + +### 步骤 4:创建流水线 + +1. 使用 `project-regular` 用户登录 KubeSphere Web 控制台。转到您的 DevOps 项目,点击**创建**来创建新流水线。 + +2. 在出现的对话框中填写基本信息。将流水线的名称设置为 `gitlab-multi-branch` 并选择一个代码仓库。 + +3. 在 **GitLab** 选项卡下的 **GitLab 服务器地址**中选择默认选项 `https://gitlab.com`,在**项目组/所有者**中输入该 GitLab 项目所属组的名称,然后从**代码仓库**的下拉菜单中选择 `devops-maven-sample` 仓库。点击右下角的 **√**,然后点击**下一步**。 + + {{< notice note >}} + + 如需使用 GitLab 私有仓库,请参考以下步骤: + + - 在 GitLab 上前往**用户设置 > 访问令牌**,创建拥有 API 和 read_repository 权限的个人访问令牌。 + - [登录 Jenkins 面板](../../../how-to-integrate/sonarqube/#步骤-5将-sonarqube-服务器添加至-jenkins),前往**系统管理 > Manage Credentials**,使用您的 GitLab 令牌创建 Jenkins 凭证,用于访问 GitLab。然后前往**系统管理 > 系统配置**,在 **GitLab 服务**中添加该凭证。 + - 在您的 DevOps 项目中,选择 **DevOps 项目设置 > 凭证**,使用您的 GitLab 令牌创建一个凭证。然后在创建流水线时,您需要在 **GitLab** 页签上的**凭证**中指定该凭证,以便流水线能够从您的 GitLab 私有仓库中拉取代码。 + + {{}} + +4. 在**高级设置**选项卡中,下滑到**脚本路径**。将其更改为 `Jenkinsfile-online` 然后点击**创建**。 + + {{< notice note >}} + + 该字段指定代码仓库中的 Jenkinsfile 路径,它表示该仓库的根目录。如果文件位置变更,则脚本路径也需要更改。 + + {{}} + +### 步骤 5:运行流水线 + +1. 流水线创建后,会展示在列表中。点击流水线名称查看其详情页。 + +2. 点击右侧的**运行**。在出现的对话框中,从下拉菜单中选择 **gitlab-demo** 并添加一个标签号,比如 `v0.0.2`。点击**确定**来触发一个新运行。 + + {{< notice note >}} + + 流水线在 `deploy to dev` 阶段暂停,您需要手动点击**继续**。请注意,在 Jenkinsfile 中分别定义了三个阶段 `deploy to dev`、`push with tag` 和 `deploy to production`,因此将对流水线进行三次审核。 + + {{}} + +### 步骤 6:检查流水线状态 + +1. 在**运行状态**选项卡,您可以看到流水线的运行过程。点击右上角的**查看日志**来查看流水线运行日志。 + +2. 您可以看到流水线的动态日志输出,包括可能导致流水线无法运行的错误。对于每个阶段,您都可以点击该阶段来查看日志,而且可以将日志下载到本地计算机进行进一步分析。 + +### 步骤 7:验证结果 + +1. 如在 Jenkinsfile 中定义的那样,通过流水线构建的 Docker 镜像已成功推送到 Docker Hub。在 Docker Hub 中,您将看到在流水线运行前指定的带有标签 `v0.0.2` 的镜像。 + +2. 同时,GitLab 中也已生成一个新标签。 + +3. 示例应用程序将会被部署至 `kubesphere-sample-dev` 和 `kubesphere-sample-prod` 中,也会创建相应的部署和服务。 + + | 环境 | URL | 命名空间 | 部署 | 服务 | + | -------- | --------------------------- | ---------------------- | ------------- | ------------- | + | 开发环境 | `http://{$NodeIP}:{$30861}` | kubesphere-sample-dev | ks-sample-dev | ks-sample-dev | + | 生产环境 | `http://{$NodeIP}:{$30961}` | kubesphere-sample-prod | ks-sample | ks-sample | + + {{< notice note >}} + + 您可能需要在安全组中打开端口,以便使用 URL 访问该应用。有关更多信息,请参考[访问示例服务](../create-a-pipeline-using-jenkinsfile/#步骤-8访问示例服务)。 + + {{}} + diff --git a/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/jenkins-email.md b/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/jenkins-email.md new file mode 100644 index 000000000..caf914543 --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/jenkins-email.md @@ -0,0 +1,42 @@ +--- +title: "为 KubeSphere 流水线设置电子邮件服务器" +keywords: 'KubeSphere, Kubernetes, 通知, Jenkins, DevOps, CI/CD, 流水线, 电子邮件服务器' +description: '设置电子邮件服务器以接收有关您 Jenkins 流水线的通知。' +linkTitle: "为 KubeSphere 流水线设置电子邮件服务器" +Weight: 11218 +--- + + +内置 Jenkins 无法与平台通知系统共享相同的电子邮件配置。因此,您需要单独为 KubeSphere DevOps 流水线配置电子邮件服务器设置。 + +## 准备工作 + +- 您需要启用 [KubeSphere DevOps 系统](../../../../pluggable-components/devops/)。 +- 您需要一个具有**集群管理**权限的帐户。例如,您可以直接以 `admin` 身份登录控制台或者创建具有该权限的新角色并将该角色分配给一个用户。 + +## 设置电子邮件服务器 + +1. 点击左上角的**平台管理**,然后选择**集群管理**。 + +2. 如果您已经启用[多集群功能](../../../../multicluster-management/)并已导入成员集群,那么您可以选择一个特定集群以查看其节点。如果尚未启用该功能,请直接参考下一步。 + +3. 转到**应用负载**下的**工作负载**,然后从下拉列表中选择 **kubesphere-devops-system** 项目。点击 `devops-jenkins` 右侧的 icon 并选择**编辑 YAML** 以编辑其 YAML 配置文件。 + +4. 向下滚动到下图所示的需要指定的字段。完成修改后,点击**确定**以保存。 + + {{< notice warning >}} + + 在 `devops-jenkins` 部署 (Deployment) 中修改电子邮件服务器后,它会重新启动。因此,DevOps 系统将在几分钟内不可用,请在适当的时候进行此类修改。 + + {{}} + + ![设置电子邮件](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/set-email-server-for-kubesphere-pipelines/set-jenkins-email.png) + + | 环境变量名称 | 描述信息 | + | ----------------- | ------------------------- | + | EMAIL\_SMTP\_HOST | SMTP 服务器地址 | + | EMAIL\_SMTP\_PORT | SMTP 服务器端口(如:25) | + | EMAIL\_FROM\_ADDR | 电子邮件发件人地址 | + | EMAIL\_FROM\_NAME | 电子邮件发件人姓名 | + | EMAIL\_FROM\_PASS | 电子邮件发件人密码 | + | EMAIL\_USE\_SSL | 是否启用 SSL 配置 | diff --git a/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/jenkins-setting.md b/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/jenkins-setting.md new file mode 100644 index 000000000..3671f7f18 --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/jenkins-setting.md @@ -0,0 +1,50 @@ +--- +title: "设置 Jenkins 系统" +keywords: 'Kubernetes, KubeSphere, Jenkins, CasC' +description: '了解如何自定义您的 Jenkins 设置。' +linkTitle: '设置 Jenkins 系统' +Weight: 11216 +--- + +Jenkins 强大而灵活,已经成为 CI/CD 工作流的事实标准。但是,许多插件要求用户先设置系统级配置,然后才能使用。 + +KubeSphere DevOps 系统提供基于 Jenkins 的容器化 CI/CD 功能。为了向用户提供可调度的 Jenkins 环境,KubeSphere 使用 **Configuration as Code** 进行 Jenkins 系统设置,这要求用户登录 Jenkins 仪表板并在修改配置后重新加载。Jenkins 系统设置在 KubeSphere 当前版本的控制台上不可用,即将发布的版本将支持该设置。 + +本教程演示如何在 Jenkins 仪表板上设置 Jenkins 并重新加载配置。 + +## 准备工作 + +您已启用 [KubeSphere DevOps 系统](../../../../pluggable-components/devops/)。 + +## Jenkins Configuration as Code + +KubeSphere 默认安装 Jenkins Configuration as Code 插件,您可以通过 YAML 文件定义 Jenkins 的期望状态,便于再现 Jenkins 的配置(包括插件配置)。您可以[在该目录中](https://github.com/jenkinsci/configuration-as-code-plugin/tree/master/demos)查看具体的 Jenkins 配置和示例 YAML 文件。 + +此外,您可以在 [ks-jenkins](https://github.com/kubesphere/ks-jenkins) 仓库中找到 `formula.yaml` 文件,查看插件版本并按需自定义这些版本。 + +![plugin-version](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/plugin-version.png) + +## 修改 ConfigMap + +建议您通过 Configuration as Code (CasC) 在 KubeSphere 中配置 Jenkins。内置 Jenkins CasC 文件存储为 [ConfigMap](../../../../project-user-guide/configuration/configmaps/)。 + +1. 以 `admin` 身份登录 KubeSphere,点击左上角的**平台管理**,然后选择**集群管理**。 + +2. 如果您已经启用[多集群功能](../../../../multicluster-management/)并已导入成员集群,您可以选择一个特定集群来编辑 ConfigMap。如果您尚未启用多集群功能,请直接参考下一步。 + +3. 在左侧导航栏中选择**配置**下的**配置字典**。在**配置字典**页面上,从下拉列表中选择 `kubesphere-devops-system`,然后点击 `jenkins-casc-config`。 + +4. 在详情页面上,点击**更多操作**,在下拉列表中选择**编辑 YAML**。 + +5. `jenkins-casc-config` 的配置模板是一个 YAML 文件,位于 `data.jenkins_user.yaml:` 部分。您可以在 ConfigMap 的代理 (Kubernetes Jenkins Agent) 中修改容器镜像、标签、资源请求 (Request) 和限制 (Limit) 等内容,或者在 podTemplate 中添加容器。完成操作后,点击**确定**。 + +6. 请至少等待 70 秒,您的改动会自动重新加载。 + +7. 有关如何通过 CasC 设置 Jenkins 的更多信息,请参见 [Jenkins 文档](https://github.com/jenkinsci/configuration-as-code-plugin)。 + + {{< notice note >}} + + 在当前版本中,并非所有插件都支持 CasC 设置。CasC 仅会覆盖通过 CasC 设置的插件配置。 + + {{}} + diff --git a/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/jenkins-shared-library.md b/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/jenkins-shared-library.md new file mode 100644 index 000000000..56d505e81 --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/jenkins-shared-library.md @@ -0,0 +1,122 @@ +--- +title: "在流水线中使用 Jenkins 共享库" +keywords: 'KubeSphere, Kubernetes, Jenkins, 共享库, 流水线' +description: '学习如何在流水线中使用 Jenkins 共享库。' +linkTitle: "在流水线中使用 Jenkins 共享库" +weight: 11217 +--- + +对于包含相同阶段或步骤的 Jenkins 流水线,在 Jenkinsfile 中使用 Jenkins 共享库避免流水线代码重复。 + +本教程演示如何在 KubeSphere DevOps 流水线中使用 Jenkins 共享库。 + +## 准备工作 + +- [启用 KubeSphere DevOps 系统](../../../../pluggable-components/devops/)。 +- 您需要创建一个企业空间、一个 DevOps 项目和一个用户 (`project-regular`)。必须邀请此帐户至 DevOps 项目中,并且授予 `operator` 角色。有关详细信息,请参阅[创建企业空间、项目、用户和角色](https://kubesphere.io/zh/docs/quick-start/create-workspace-and-project/)。 +- 您需要一个可用 Jenkins 共享库。本教程以 [GitHub 仓库](https://github.com/devops-ws/jenkins-shared-library)中的 Jenkins 共享库为例。 + +## 在 Jenkins 仪表盘配置共享库 + +1. [登录 Jenkins 仪表板](../../../how-to-integrate/sonarqube/#步骤-5将-sonarqube-服务器添加至-jenkins)并点击左侧导航栏中的**系统管理**。 + +2. 向下滚动并点击**系统配置**。 + +3. 向下滚动到 **Global Pipeline Libraries**,然后点击**新增**。 + +4. 配置字段如下所示。 + + - **Name:** 为共享库设置名称(例如,``demo-shared-library``),以便在 Jenkinsfile 中引用此名称来导入共享库。 + + - **Default version:** 设置共享库所在仓库的一个分支名称,将其作为导入共享库的默认分支。本教程将使用 master。 + + - 在 **Retrieval method** 下,选择 **Modern SCM**。 + + - 在 **Source Code Management** 下,选择 **Git** 并为**项目仓库**输入示例仓库的 URL 。如果您使用自己的仓库且访问此仓库需要凭据,则需要配置**凭据**。 + +5. 当您结束编辑,请点击**应用**。 + + {{< notice note >}} + + 您还可以配置[文件夹级别的共享库](https://www.jenkins.io/zh/doc/book/pipeline/shared-libraries/#folder-level-shared-libraries)。 + + {{}} + +## 在流水线中使用共享库 + + +### 步骤 1: 创建流水线 + +1. 用 `project-regular` 帐户登录 KubeSphere web 控制台。进入 DevOps 项目并点击**流水线**页面上的**创建**。 + +2. 在弹出窗口中设置名称(例如,``demo-shared-library``),点击**下一步**。 + +3. 在**高级设置**中,直接点击**创建**,使用默认设置创建流水线。 + +### 步骤 2:编辑流水线 + +1. 在流水线列表中,点击流水线以转到其详细信息页面,然后点击**编辑 Jenkinsfile**。 + +2. 在显示的对话框中,输入以下示例 Jenkinsfile。完成编辑后,点击**确定**。 + + ```groovy + library identifier: 'devops-ws-demo@master', retriever: modernSCM([ + $class: 'GitSCMSource', + remote: 'https://github.com/devops-ws/jenkins-shared-library', + traits: [[$class: 'jenkins.plugins.git.traits.BranchDiscoveryTrait']] + ]) + + pipeline { + agent any + + stages { + stage('Demo') { + steps { + script { + mvn.fake() + } + } + } + } + } + ``` + + {{< notice note >}} + + 您可以根据需要为 `agent` 指定 `label`。 + + {{}} + +3. 或者,您可以使用以 `@Library('<配置好的共享库名称>') _ ` 开头的 Jenkinsfile。如果使用这种类型的 Jenkinsfile,则需要提前在 Jenkins 仪表板上配置共享库。在本教程中,您可以使用以下示例 Jenkinsfile。 + + ```groovy + @Library('demo-shared-library') _ + + pipeline { + agent any + + stages { + stage('Demo') { + steps { + script { + mvn.fake() + } + } + } + } + } + ``` + + {{< notice note >}} + + 您可以使用 `@Library('demo-shared-library@<分支名称>') _` 来指定特定的分支。 + + {{}} + +### 步骤 3:运行流水线 + +1. 您可以在**任务状态**选项卡下查看该阶段。点击**运行**运行它。 + +2. 在一段时间后,流水线将成功运行。 + +3. 您可以点击**运行记录**下的**成功**记录,然后点击**查看日志**查看日志详细信息。 diff --git a/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/pipeline-settings.md b/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/pipeline-settings.md new file mode 100644 index 000000000..6cc3a8a75 --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/pipeline-settings.md @@ -0,0 +1,170 @@ +--- +title: "流水线设置" +keywords: 'KubeSphere, Kubernetes, Docker, Jenkins, 流水线' +description: '了解 DevOps 项目中流水线的各个属性。' +linkTitle: "流水线设置" +weight: 11214 +--- + +创建流水线时,可以通过各种设置来自定义流水线配置。本文档对这些设置进行详细阐述。 + +## 准备工作 + +- 您需要创建一个企业空间、一个 DevOps 项目以及一个用户 (`project-regular`),必须邀请该用户至该 DevOps 项目中并赋予 `operator` 角色。有关更多信息,请参考[创建企业空间、项目、用户和角色](../../../../quick-start/create-workspace-and-project/)。 +- 您需要[启用 KubeSphere DevOps 系统](../../../../pluggable-components/devops/)。 + +## 基本信息 + +在**基本信息**选项卡,您可以自定义以下信息: + +- **名称**:流水线的名称,同一个 DevOps 项目内的流水线不能重名。 + +- **DevOps 项目**:流水线所属的 DevOps 项目。 + +- **描述**:描述流水线的附加信息,描述信息不超过 256 个字符。 + +- **代码仓库(可选)**:您可以选择一个代码仓库作为流水线的代码源。您可以选择 GitHub、GitLab、Bitbucket、Git 以及 SVN 作为代码源。 + + {{< tabs >}} + + {{< tab "GitHub" >}} + + 如果选择 **GitHub**,则必须指定用于访问 GitHub 的凭证。如果您已预先使用您的 GitHub 令牌创建了凭证,则可以从下拉菜单中选择已有凭证,或者点击**创建凭证**来创建新凭证。选择凭证后,点击**确定**,即可在右侧查看您的仓库。完成所有操作后,请点击 **√** 图标。 + + {{}} + + {{< tab "GitLab" >}} + + 如果选择 **GitLab**,则必须指定 GitLab 服务器地址、项目组/所有者和代码仓库。如果访问代码仓库需要凭证,则需要指定一个凭证。完成所有操作后,请点击 **√** 图标。 + + {{}} + + {{< tab "Bitbucket" >}} + + 如果选择 **Bitbucket**,则需要输入您的 Bitbucket 服务器地址。您可以预先使用您的 Bitbucket 用户名和密码创建一个凭证,或者点击**创建凭证**来创建一个新凭证。输入信息后点击**确定**,即可在右侧看到您的仓库。完成所有操作后,请点击 **√** 图标。 + + {{}} + + {{< tab "Git" >}} + + 如果选择 **Git**,则需要指定仓库 URL。如果访问代码仓库需要凭证,则需要指定一个凭证。您也可以点击**创建凭证**来添加新凭证。完成所有操作后,请点击 **√** 图标。 + + {{}} + + {{< tab "SVN" >}} + + 如果选择 **SVN**,则需要指定仓库地址和凭证。您也可以按需指定包括分支和排除分支。完成所有操作后,请点击 **√** 图标。 + + {{}} + + {{}} + +## 指定代码仓库时的高级设置 + +如果您指定一个代码仓库,则可以在**高级设置**选项卡上自定义以下配置: + +### 分支设置 + +**删除旧分支**:自动删除旧分支。分支记录将一起被删除。分支记录包括控制台输出、存档制品以及与特定分支相关的其他元数据。保留较少的分支可以节省 Jenkins 所使用的磁盘空间。KubeSphere 提供两个选项来确定何时丢弃旧的分支: + +- **分支保留天数(天)**:超过保留期限的分支将被删除。 + +- **分支最大数量**:如果分支数量超过最大数量,将删除最旧的分支。 + + {{< notice note >}} + + **分支保留天数(天)**和**分支最大数量**同时适用于分支。只要分支满足任一字段的条件,则将被丢弃。例如,如果将保留分支的天数指定为 2,将保留分支的最大个数指定为 3,那么超过任一数目的分支将被丢弃。KubeSphere 默认用 7 和 5 预先填充这两个字段。 + + {{}} + +### 策略设置 + +在**策略设置**中,KubeSphere 默认提供四种策略。Jenkins 流水线运行时,开发者提交的 PR (Pull Request) 也将被视为单独的分支。 + +**发现分支** + +- **排除已提交 PR 的分支**:已提交 PR 的分支将被排除。 +- **只包括已提交 PR 的分支**:只拉取已提交 PR 的分支。 +- **包括所有分支**:从仓库中拉取所有分支。 + +**发现标签** + +- **开启标签发现**:拥有指定标签的分支将被扫描。 +- **关闭标签发现**:拥有指定标签的分支不会被扫描。 + +**从原仓库发现 PR** + +- **拉取 PR 合并后的代码**:PR 合并到目标分支后,将基于源代码创建并运行流水线。 +- **拉取 PR 提交时的代码**:根据 PR 本身的源代码创建并运行流水线。 +- **分别创建两个流水线**:KubeSphere 会创建两个流水线,一个流水线使用 PR 本身的源代码版本,一个流水线使用 PR 与目标分支合并后的源代码版本。 + +**从 Fork 仓库发现 PR** + +- **拉取 PR 合并后的代码**:PR 合并到目标分支后,将基于源代码创建并运行流水线。 +- **拉取 PR 提交时的代码**:根据 PR 本身的源代码创建并运行流水线。 +- **分别创建两个流水线**:KubeSphere 会创建两个流水线,一个流水线使用 PR 本身的源代码版本,一个流水线使用 PR 与目标分支合并后的源代码版本。 +- **贡献者**:对 PR 做出贡献的用户。 +- **所有人**:每个可以访问 PR 的用户。 +- **具有管理员或有编辑权限的用户**:仅限于对 PR 具有管理员或编辑权限的用户。 +- **无**:如果选择此选项,那么无论在**拉取策略**中选择了哪个选项,都不会发现 PR。 + +### 正则过滤 + +勾选选框以指定正则表达式来过滤分支、PR 和标签。 + +### 脚本路径 + +**脚本路径**参数指定代码仓库中的 Jenkinsfile 路径,它指代仓库的根目录。如果文件位置发生更改,则脚本路径也需要更改。 + +### 扫描触发器 + +勾选**定时扫描**,并从下拉列表中设置扫描时间间隔。 + +### 构建触发器 + +您可以从**创建流水线时触发**和**删除流水线时触发**的下拉列表中选择一个流水线,以便在创建新的流水线或删除流水线时自动触发指定流水线中的任务。 + +### 克隆设置 + +- **克隆深度**:克隆时需要提取的 commit 数量。 +- **克隆超时时间(min)**:完成克隆过程所需要的时长(以分钟为单位)。 +- **开启浅克隆**:如果您开启浅克隆,则克隆的代码不会包含标签。 + +### Webhook + +**Webhook** 能有效地让流水线发现远程代码仓库中的更改,并自动触发新一轮运行。Webhook 应成为触发 Jenkins 自动扫描 GitHub 和 Git(例如 GitLab)的主要方法。 + +## 不指定代码仓库时的高级设置 + +如果不指定代码仓库,则可以在**高级设置**选项卡上自定义以下配置: + +### 构建设置 + +**删除过期构建记录**:确定何时删除分支下的构建记录。构建记录包括控制台输出、存档制品以及与特定构建相关的其他元数据。保留较少的构建可以节省 Jenkins 所使用的磁盘空间。KubeSphere 提供两个选项来确定应何时删除旧的构建: + +- **构建记录保留期限(天)**:超过保留期限的构建记录将被删除。 + +- **构建记录最大数量**:当构建记录数量超过允许的最大数量,最早的构建记录将被删除。 + + {{< notice note >}} + + 这两个条件同时适用于构建。如果首先满足任一条件,构建将会被删除。 + + {{}} + +- **不允许并发构建**:如果勾选此选项,则不能并发运行多个构建。 + +### 构建参数 + +参数化的构建过程允许您在开始运行流水线时传入一个或多个参数。KubeSphere 默认提供五种参数类型,包括**字符串**、**多行字符串**、**布尔值**、**选项** 以及**密码**。当参数化项目时,构建会被替换为参数化构建,其中将提示用户为每个定义的参数输入值。 + +### 构建触发器 + +**定时构建**:允许定期执行构建。点击**了解更多**来参照详细的 CRON 语法。 + + + + + + + diff --git a/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/pipeline-webhook.md b/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/pipeline-webhook.md new file mode 100644 index 000000000..0288f5a5c --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/pipeline-webhook.md @@ -0,0 +1,66 @@ +--- +title: "使用 Webhook 触发流水线" +keywords: 'Kubernetes, DevOps, Jenkins, 流水线, Webhook' +description: '学习如何使用 webhook 触发 Jenkins 流水线。' +linkTitle: "使用 Webhook 触发流水线" +weight: 11219 +--- + +如果通过远程代码仓库创建基于 Jenkinsfile 的流水线,则可以在远程仓库中配置 webhook,以便对远程仓库进行变更时,自动触发流水线。 + +本教程演示如何用 webhook 触发流水线。 + +## 准备工作 + +- [启用 KubeSphere DevOps 系统](../../../../pluggable-components/devops/)。 +- 创建一个企业空间、一个 DevOps 项目和一个用户(例如,`project-regular`)。`project-regular` 需要被邀请至 DevOps 项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../../quick-start/create-workspace-and-project/)。 +- 通过远程代码仓库创建一个基于 Jenkinsfile 的流水线。有关更多信息,请参见[使用 Jenkinsfile 创建流水线](../create-a-pipeline-using-jenkinsfile/)。 + +## 配置 Webhook + +### 获取 webhook URL + +1. 使用 `project-regular` 帐户登录 Kubesphere Web 控制台。转到 DevOps 项目,点击流水线(例如,`jenkins-in-scm`)以查看详情页面。 + +2. 点击**更多**,在下拉菜单中选择**编辑设置**。 + +3. 在出现的会话框中,滑动至 **Webhook** 以获得 webhook push URL。 + +### 在 GitHub 仓库中设置 webhook + +1. 登录您的 GitHub,并转到 `devops-maven-sample` 仓库。 + +2. 点击 **Settings**,然后点击 **Webhooks**,然后点击 **Add webhook**。 + +3. 在 **Payload URL** 中输入流水线中的 webhook push URL,然后点击 **Add webhook**。出于演示需要,本教程选择 **Just the push event**。您可以根据需要进行配置。有关更多信息,请参见 [GitHub 文档](https://docs.github.com/en/developers/webhooks-and-events/webhooks/creating-webhooks)。 + +4. 配置好的 webhook 会展示在 **Webhooks** 页面。 + +## 使用 Webhook 触发流水线 + +### 提交拉取请求到仓库 + +1. 在您仓库的 **Code** 页面,点击 **master** 然后选择 **sonarqube** 分支。 + +2. 转到 `/deploy/dev-ol` 然后点击文件 `devops-sample.yaml`。 + +3. 点击 icon 以编辑文件。 例如,将 `spec.replicas` 的值改变为 `3`。 + +4. 在页面底部点击 **Commit changes**。 + +### 检查 webhook 交付 + +1. 在您仓库的 **Webhooks** 页面,点击 webhook。 + +2. 点击 **Recent Deliveries**,然后点击一个具体交付记录查看详情。 + +### 检查流水线 + +1. 使用 `project-regular` 帐户登录 Kubesphere Web 控制台。转到 DevOps 项目,点击流水线。 + +2. 在**运行记录**选项卡,检查提交到远程仓库 `sonarqube` 分支的拉取请求是否触发了新的运行。 + +3. 转到 `kubesphere-sample-dev` 项目的 **Pods** 页面,检查 3 个 Pods 的状态。如果 3 个 Pods 为运行状态,表示流水线运行正常。 + + + diff --git a/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/use-pipeline-templates.md b/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/use-pipeline-templates.md new file mode 100644 index 000000000..4d7d6b9a2 --- /dev/null +++ b/content/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/use-pipeline-templates.md @@ -0,0 +1,92 @@ +--- +title: "使用流水线模板" +keywords: 'KubeSphere, Kubernetes, Jenkins, 图形化流水线, 流水线模板' +description: '了解如何在 KubeSphere 上使用流水线模板。' +linkTitle: "使用流水线模板" +weight: 11213 +--- + +KubeSphere 提供图形编辑面板,您可以通过交互式操作定义 Jenkins 流水线的阶段和步骤。KubeSphere 3.3 中提供了内置流水线模板,如 Node.js、Maven 以及 Golang,使用户能够快速创建对应模板的流水线。同时,KubeSphere 3.3 还支持自定义流水线模板,以满足企业不同的需求。 + +本文档演示如何在 KubeSphere 上使用流水线模板。 + +## 准备工作 + +- 您需要有一个企业空间、一个 DevOps 项目和一个用户 (`project-regular`),并已邀请此帐户至 DevOps 项目中且授予 `operator` 角色。如果尚未准备好,请参考[创建企业空间、项目、用户和角色](../../../../quick-start/create-workspace-and-project/)。 + +- 您需要启用 [KubeSphere DevOps 系统](../../../../pluggable-components/devops/)。 + +- 您需要[创建流水线](../../../how-to-use/pipelines/create-a-pipeline-using-graphical-editing-panel/)。 + +## 使用内置流水线模板 + +下面以 Node.js 为例演示如何使用内置流水线模板。如果需要使用 Maven 以及 Golang 流水线模板,可参考该部分内容。 + +1. 以 `project-regular` 用户登录 KubeSphere 控制台,在左侧导航树,点击 **DevOps 项目**。 + +2. 在右侧的 **DevOps 项目**页面,点击您创建的 DevOps 项目。 + +3. 在左侧的导航树,点击**流水线**。 + +4. 在右侧的**流水线**页面,点击已创建的流水线。 + +5. 在右侧的**任务状态**页签,点击**编辑流水线**。 + + +6. 在**创建流水线**对话框,点击 **Node.js**,然后点击**下一步**。 + +7. 在**参数设置**页签,按照实际情况设置以下参数,点击**创建**。 + + | 参数 | 参数解释 | + | ----------- | ------------------------- | + | GitURL | 需要克隆的项目仓库的地址。 | + | GitRevision | 需要检出的分支。 | + | NodeDockerImage | Node.js 的 Docker 镜像版本。 | + | InstallScript | 安装依赖项的 Shell 脚本。 | + | TestScript | 项目测试的 Shell 脚本。 | + | BuildScript | 构建项目的 Sell 脚本。 | + | ArtifactsPath | 归档文件所在的路径。 | + +8. 在左侧的可视化编辑页面,系统默认已添加一系列步骤,您可以添加步骤或并行阶段. + +9. 点击指定步骤,在页面右侧,您可以执行以下操作: + - 修改阶段名称。 + - 删除阶段。 + - 设置代理类型。 + - 添加条件。 + - 编辑或删除某一任务。 + - 添加步骤或嵌套步骤。 + + {{< notice note >}} + + 您还可以按需在流水线模板中自定义步骤和阶段。有关如何使用图形编辑面板的更多信息,请参考[使用图形编辑面板创建流水线](../create-a-pipeline-using-graphical-editing-panel/)。 + + {{}} + +10. 在右侧的**代理**区域,选择代理类型,默认值为 **kubernetes**,点击**确定**。 + + | 代理类型 | 说明 | + | ----------- | ------------------------- | + | any | 调用默认的 base pod 模板创建 Jenkins agent 运行流水线。 | + | node | 调用指定类型的 pod 模板创建 Jenkins agent 运行流水线,可配置的 label 标签为 base、java、nodejs、maven、go 等。 | + | kubernetes | 通过 yaml 文件自定义标准的 kubernetes pod 模板运行 agent 执行流水线任务。 | + +11. 在弹出的页面,您可以查看已创建的流水线模板详情,点击**运行**即可运行该流水线。 + +在之前的版本中,KubeSphere 还提供了 CI 以及 CI & CD 流水线模板,但是由于这两个模板难以满足定制化需求,因为建议您采用其它内置模板或直接自定义模板。下面分别介绍了这两个模板。 + +- CI 流水线模板 + + ![ci-template](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/use-pipeline-templates/ci-template.png) + + ![ci-stages](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/use-pipeline-templates/ci-stages.png) + + CI 流水线模板包含两个阶段。**clone code** 阶段用于检出代码,**build & push** 阶段用于构建镜像并将镜像推送至 Docker Hub。您需要预先为代码仓库和 Docker Hub 仓库创建凭证,然后在相应的步骤中设置仓库的 URL 以及凭证。完成编辑后,流水线即可开始运行。 + +- CI & CD 流水线模板 + + ![cicd-template](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/use-pipeline-templates/cicd-template.png) + + ![cicd-stages](/images/docs/v3.3/zh-cn/devops-user-guide/use-devops/use-pipeline-templates/cicd-stages.png) + + CI & CD 流水线模板包含六个阶段。有关每个阶段的更多信息,请参考[使用 Jenkinsfile 创建流水线](../create-a-pipeline-using-jenkinsfile/#流水线概述),您可以在该文档中找到相似的阶段及描述。您需要预先为代码仓库、Docker Hub 仓库和集群的 kubeconfig 创建凭证,然后在相应的步骤中设置仓库的 URL 以及凭证。完成编辑后,流水线即可开始运行。 diff --git a/content/zh/docs/v3.4/faq/_index.md b/content/zh/docs/v3.4/faq/_index.md new file mode 100644 index 000000000..af2e47209 --- /dev/null +++ b/content/zh/docs/v3.4/faq/_index.md @@ -0,0 +1,12 @@ +--- +title: "常见问题" +description: "FAQ is designed to answer and summarize the questions users ask most frequently about KubeSphere." +layout: "second" + +linkTitle: "常见问题" +weight: 16000 + +icon: "/images/docs/v3.3/docs.svg" +--- + +本章节总结并回答了有关 KubeSphere 最常见的问题,问题根据 KubeSphere 的功能进行分类,您可以在对应部分找到有关的问题和答案。 diff --git a/content/zh/docs/v3.4/faq/access-control/_index.md b/content/zh/docs/v3.4/faq/access-control/_index.md new file mode 100644 index 000000000..95af6334a --- /dev/null +++ b/content/zh/docs/v3.4/faq/access-control/_index.md @@ -0,0 +1,7 @@ +--- +title: "访问控制和帐户管理" +keywords: 'Kubernetes, KubeSphere, 帐户, 访问控制' +description: '关于访问控制和帐户管理的常见问题' +layout: "second" +weight: 16400 +--- diff --git a/content/zh/docs/v3.4/faq/access-control/add-kubernetes-namespace-to-kubesphere-workspace.md b/content/zh/docs/v3.4/faq/access-control/add-kubernetes-namespace-to-kubesphere-workspace.md new file mode 100644 index 000000000..d51d22ad7 --- /dev/null +++ b/content/zh/docs/v3.4/faq/access-control/add-kubernetes-namespace-to-kubesphere-workspace.md @@ -0,0 +1,38 @@ +--- +title: "添加现有 Kubernetes 命名空间至 KubeSphere 企业空间" +keywords: "命名空间, 项目, KubeSphere, Kubernetes" +description: "将您现有 Kubernetes 集群中的命名空间添加至 KubeSphere 的企业空间。" +linkTitle: "添加现有 Kubernetes 命名空间至 KubeSphere 企业空间" +Weight: 16430 +--- + +Kubernetes 命名空间即 KubeSphere 项目。如果您不是在 KubeSphere 控制台创建命名空间对象,则该命名空间不会直接在企业空间中显示。不过,集群管理员依然可以在**集群管理**页面查看该命名空间。同时,您也可以将该命名空间添加至企业空间。 + +本教程演示如何添加现有 Kubernetes 命名空间至 KubeSphere 企业空间。 + +## 准备工作 + +- 您需要有一个具有**集群管理**权限的用户。例如,您可以直接以 `admin` 身份登录控制台,或者创建一个具有该权限的新角色并将其分配至一个用户。 + +- 您需要有一个可用的企业空间,以便将命名空间分配至该企业空间。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## 创建 Kubernetes 命名空间 + +首先,创建一个示例 Kubernetes 命名空间,以便稍后将其添加至企业空间。执行以下命令: + +```bash +kubectl create ns demo-namespace +``` + +有关创建 Kubernetes 命名空间的更多信息,请参见[命名空间演练](https://kubernetes.io/zh/docs/tasks/administer-cluster/namespaces-walkthrough/)。 + +## 添加命名空间至 KubeSphere 企业空间 + +1. 以 `admin` 身份登录 KubeSphere 控制台,转到**集群管理**页面。点击**项目**,您可以查看在当前集群中运行的所有项目),包括前述刚刚创建的项目。 + +2. 通过 kubectl 创建的命名空间不属于任何企业空间。请点击右侧的 ,选择**分配企业空间**。 + +3. 在弹出的对话框中,为该项目选择一个**企业空间**和**项目管理员**,然后点击**确定**。 + +4. 转到您的企业空间,可以在**项目**页面看到该项目已显示。 + diff --git a/content/zh/docs/v3.4/faq/access-control/cannot-login.md b/content/zh/docs/v3.4/faq/access-control/cannot-login.md new file mode 100644 index 000000000..266a96f96 --- /dev/null +++ b/content/zh/docs/v3.4/faq/access-control/cannot-login.md @@ -0,0 +1,143 @@ +--- +title: "用户无法登录" +keywords: "无法登录, 用户不活跃, KubeSphere, Kubernetes" +description: "如何解决无法登录的问题" +linkTitle: "用户无法登录" +Weight: 16440 +--- + +KubeSphere 安装时会自动创建默认用户 (`admin/P@88w0rd`),密码错误或者用户状态不是**活跃**会导致无法登录。 + +下面是用户无法登录时,一些常见的问题: + +## Account Not Active + +登录失败时,您可能看到以下提示。请根据以下步骤排查并解决问题: + +![account-not-active](/images/docs/v3.3/faq/access-control-and-account-management/cannot-login/account-not-active.png) + +1. 执行以下命令检查用户状态: + + ```bash + $ kubectl get users + NAME EMAIL STATUS + admin admin@kubesphere.io Active + ``` + +2. 检查 `ks-controller-manager` 是否正常运行,是否有异常日志: + + ```bash + kubectl -n kubesphere-system logs -l app=ks-controller-manager + ``` + +以下是导致此问题的可能原因。 + +### Kubernetes 1.19 中的 admission webhook 无法正常工作 + +Kubernetes 1.19 使用了 Golang 1.15 进行编译,需要更新 admission webhook 用到的证书,该问题导致 `ks-controller` admission webhook 无法正常使用。 + +相关错误日志: + +```bash +Internal error occurred: failed calling webhook "validating-user.kubesphere.io": Post "https://ks-controller-manager.kubesphere-system.svc:443/validate-email-iam-kubesphere-io-v1alpha2-user?timeout=30s": x509: certificate relies on legacy Common Name field, use SANs or temporarily enable Common Name matching with GODEBUG=x509ignoreCN=0 +``` + +有关该问题和解决方式的更多信息,请参见[此 GitHub Issue](https://github.com/kubesphere/kubesphere/issues/2928)。 + +### ks-controller-manager 无法正常工作 + +`ks-controller-manager` 依赖 openldap、Jenkins 这两个有状态服务,当 openldap 或 Jenkins 无法正常运行时会导致 `ks-controller-manager` 一直处于 `reconcile` 状态。 + +可以通过以下命令检查 openldap 和 Jeknins 服务是否正常: + +``` +kubectl -n kubesphere-devops-system get po | grep -v Running +kubectl -n kubesphere-system get po | grep -v Running +kubectl -n kubesphere-system logs -l app=openldap +``` + +相关错误日志: + +```bash +failed to connect to ldap service, please check ldap status, error: factory is not able to fill the pool: LDAP Result Code 200 \"Network Error\": dial tcp: lookup openldap.kubesphere-system.svc on 169.254.25.10:53: no such host +``` + +```bash +Internal error occurred: failed calling webhook “validating-user.kubesphere.io”: Post https://ks-controller-manager.kubesphere-system.svc:443/validate-email-iam-kubesphere-io-v1alpha2-user?timeout=4s: context deadline exceeded +``` + +**解决方式** + +您需要先恢复 openldap、Jenkins 这两个服务并保证网络的连通性,重启 `ks-controller-manager`。 + +``` +kubectl -n kubesphere-system rollout restart deploy ks-controller-manager +``` + +### 使用了错误的代码分支 + +如果您使用了错误的 ks-installer 版本,会导致安装之后各组件版本不匹配。 + +通过以下方式检查各组件版本是否一致,正确的 image tag 应该是 v3.3.2。 + +``` +kubectl -n kubesphere-system get deploy ks-installer -o jsonpath='{.spec.template.spec.containers[0].image}' +kubectl -n kubesphere-system get deploy ks-apiserver -o jsonpath='{.spec.template.spec.containers[0].image}' +kubectl -n kubesphere-system get deploy ks-controller-manager -o jsonpath='{.spec.template.spec.containers[0].image}' +``` + +## 用户名或密码错误 + +![incorrect-password](/images/docs/v3.3/faq/access-control-and-account-management/cannot-login/wrong-password.png) + +通过以下命令检查用户密码是否正确: + +``` +curl -u : "http://`kubectl -n kubesphere-system get svc ks-apiserver -o jsonpath='{.spec.clusterIP}'`/api/v1/nodes" +``` + +### Redis 异常 + +`ks-console` 和 `ks-apiserver` 需要借助 Redis 在多个副本之间共享数据。您可以通过以下命令检查 Redis 服务是否正常: + +``` +kubectl -n kubesphere-system logs -l app=ks-console +kubectl -n kubesphere-system get po | grep -v Running +# High Availability +kubectl -n kubesphere-system exec -it redis-ha-server-0 redis-cli info replication +kubectl -n kubesphere-system exec -it redis-ha-server-0 -- sh -c 'for i in `seq 0 2`; do nc -vz redis-ha-server-$i.redis-ha.kubesphere-system.svc 6379; done' +kubectl -n kubesphere-system logs -l app=redis-ha-haproxy +kubectl -n kubesphere-system logs -l app=redis-ha +# Single Replica +kubectl -n kubesphere-system logs -l app=redis +``` + +相关错误日志: + +```bash +1344:C 17 Sep 2020 17:13:18.099 # Failed opening the RDB file dump.rdb (in server root dir /data) for saving: Stale file handle +1:M 17 Sep 2020 17:13:18.198 # Background saving error +1:M 17 Sep 2020 17:13:24.014 * 1 changes in 3600 seconds. Saving... +1:M 17 Sep 2020 17:13:24.015 * Background saving started by pid 1345 +1345:C 17 Sep 2020 17:13:24.016 # Failed opening the RDB file dump.rdb (in server root dir /data) for saving: Stale file handle +1:M 17 Sep 2020 17:13:24.115 # Background saving error +``` + +```bash +E0909 07:05:22.770468 1 redis.go:51] unable to reach redis host EOF +``` + +```bash +[WARNING] 252/094143 (6) : Server check_if_redis_is_master_0/R0 is DOWN, reason: Layer7 timeout, info: " at step 5 of tcp-check (expect string '10.223.2.232')", check duration: 1000ms. 2 active and 0 backup servers left. 0 sessions active, 0 requeued, 0 remaining in queue. +[WARNING] 252/094143 (6) : Server check_if_redis_is_master_0/R1 is DOWN, reason: Layer7 timeout, info: " at step 5 of tcp-check (expect string '10.223.2.232')", check duration: 1000ms. 1 active and 0 backup servers left. 0 sessions active, 0 requeued, 0 remaining in queue. +[WARNING] 252/094143 (6) : Server check_if_redis_is_master_0/R2 is DOWN, reason: Layer7 timeout, info: " at step 5 of tcp-check (expect string '10.223.2.232')", check duration: 1000ms. 0 active and 0 backup servers left. 0 sessions active, 0 requeued, 0 remaining in queue. +[ALERT] 252/094143 (6) : backend 'check_if_redis_is_master_0' has no server available! +``` + +**解决方式** + +您需要先恢复 Redis 服务,保证其正常运行并且 Pod 之间网络可以正常联通,稍后重启 `ks-console`。 + +``` +kubectl -n kubesphere-system rollout restart deploy ks-console +``` diff --git a/content/zh/docs/v3.4/faq/access-control/forgot-password.md b/content/zh/docs/v3.4/faq/access-control/forgot-password.md new file mode 100644 index 000000000..9d479f663 --- /dev/null +++ b/content/zh/docs/v3.4/faq/access-control/forgot-password.md @@ -0,0 +1,33 @@ +--- +title: "重置帐户密码" +keywords: "忘记, 密码, KubeSphere, Kubernetes" +description: "重置任意一个用户的密码。" +linkTitle: "重置帐户密码" +Weight: 16410 +--- + +## 重置普通用户密码 + +1. 使用具有用户管理权限的用户登录 KubeSphere Web 控制台。 + +2. 点击左上角的**平台管理**,选择**访问**控制。点击**用户**。 + +3. 在**用户**页面,点击需要修改密码的用户进入详情页。 + +4. 在用户的详情页,点击**更多操作**并选择**修改密码**。 + +5. 在出现的对话框中,输入新的密码并重复输入新的密码。完成后点击**确定**。 + +## 重置管理员密码 + +在 Host 集群执行以下命令修改指定帐户的密码: + +```bash +kubectl patch users -p '{"spec":{"password":""}}' --type='merge' && kubectl annotate users iam.kubesphere.io/password-encrypted- +``` + +{{< notice note >}} + +请将命令中的 `` 修改为实际的用户名,将 `` 修改为实际的新密码。 + +{{}} \ No newline at end of file diff --git a/content/zh/docs/v3.4/faq/access-control/session-timeout.md b/content/zh/docs/v3.4/faq/access-control/session-timeout.md new file mode 100644 index 000000000..b0c8ed899 --- /dev/null +++ b/content/zh/docs/v3.4/faq/access-control/session-timeout.md @@ -0,0 +1,21 @@ +--- +title: "会话超时" +keywords: "会话超时, KubeSphere, Kubernetes" +description: "理解会话超时并自定义超时时间。" +linkTitle: "会话超时" +Weight: 16420 +--- + +当用户登录 KubeSphere 的控制台时会话开始。当会话过期时,您会看到信息“**会话超时或此帐户在其他地方登录,请重新登录**”。 + +## 会话超时 + +您可以控制会话超时时间,默认超时时间为两小时,即达到会话超时时间后,用户会从控制台自动登出。您可以为会话超时配置 [accessTokenMaxAge 和 accessTokenInactivityTimeout](../../../access-control-and-account-management/external-authentication/set-up-external-authentication)。 + +## 签名校验失败 + +在[多集群环境](../../../multicluster-management/enable-multicluster/direct-connection/#prepare-a-member-cluster)下,您必须正确设置 `clusterRole` 和 `jwtSecret`。 + +## 节点时钟偏移 + +节点时钟偏移会影响时间敏感性操作,例如用户令牌 (Token) 过期时间的验证。您可以将服务器时间和 NTP 服务器进行同步。[MaximumClockSkew](../../../access-control-and-account-management/external-authentication/set-up-external-authentication) 也可设置,默认为 10 秒。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/faq/applications/_index.md b/content/zh/docs/v3.4/faq/applications/_index.md new file mode 100644 index 000000000..1a6a055ed --- /dev/null +++ b/content/zh/docs/v3.4/faq/applications/_index.md @@ -0,0 +1,7 @@ +--- +title: "应用程序" +keywords: 'Kubernetes, KubeSphere, OpenPitrix, 应用程序, 应用' +description: '关于 KubeSphere 中应用程序的常见问题' +layout: "second" +weight: 16900 +--- diff --git a/content/zh/docs/v3.4/faq/applications/remove-built-in-apps.md b/content/zh/docs/v3.4/faq/applications/remove-built-in-apps.md new file mode 100644 index 000000000..e236c9b4c --- /dev/null +++ b/content/zh/docs/v3.4/faq/applications/remove-built-in-apps.md @@ -0,0 +1,33 @@ +--- +title: "移除 KubeSphere 中的内置应用" +keywords: "KubeSphere, OpenPitrix, 应用程序, 应用" +description: "了解如何下架 KubeSphere 中的内置应用。" +linkTitle: "移除 KubeSphere 中的内置应用" +Weight: 16910 +--- + +作为一个以应用为中心的开源容器平台,KubeSphere 在基于 [OpenPitrix](https://github.com/openpitrix/openpitrix) 的应用商店中集成了应用。这些应用可供企业空间内的所有租户使用,但您也可以将这些应用从应用商店中移除。本教程为您演示怎样从应用商店中移除内置应用。 + +## 准备工作 + +- 您需要在本教程中使用具有 `platform-admin` 角色的用户(例如:`admin`)。 +- 您需要[启用应用商店](../../../pluggable-components/app-store/)。 + +## 移除内置应用 + +1. 以 `admin` 身份登录 Web 控制台,点击左上角**平台管理**,然后选择**应用商店管理**。 + +2. 在**应用**页面,您可以看到列表中展示的应用。选择您想要从应用商店中移除的应用,例如,点击 **Tomcat** 跳转到其详情页面。 + +3. 在 Tomcat 的详情页面,点击**下架应用**以移除应用。 + +4. 在出现的对话框中,点击**确定**以确认您的操作。 + +5. 若要让该应用在应用商店中再次可用,请点击**上架应用**,然后点击**确定**以确认您的操作。 + + {{< notice note >}} + + 您也可以根据自己的需要,来创建包含必须角色的新用户。有关更多在 KubeSphere 中管理应用的信息,请参考[应用程序生命周期管理](../../../application-store/app-lifecycle-management/)。 + + {{}} + diff --git a/content/zh/docs/v3.4/faq/console/_index.md b/content/zh/docs/v3.4/faq/console/_index.md new file mode 100644 index 000000000..2a57aed19 --- /dev/null +++ b/content/zh/docs/v3.4/faq/console/_index.md @@ -0,0 +1,7 @@ +--- +title: "KubeSphere Web 控制台" +keywords: 'Kubernetes, KubeSphere, web 控制台' +description: '关于 KubeSphere Web 控制台的常见问题' +layout: "second" +weight: 16500 +--- diff --git a/content/zh/docs/v3.4/faq/console/change-console-language.md b/content/zh/docs/v3.4/faq/console/change-console-language.md new file mode 100644 index 000000000..62f653412 --- /dev/null +++ b/content/zh/docs/v3.4/faq/console/change-console-language.md @@ -0,0 +1,25 @@ +--- +title: "更改控制台语言" +keywords: "FAQ, 控制台, KubeSphere, Kubernetes, 语言" +description: "选择控制台的显示语言。" +linkTitle: "更改控制台语言" +Weight: 16530 +--- + +KubeSphere Web 控制台目前支持四种语言:简体中文、繁体中文、英文和西班牙文。 + +本教程演示如何更改控制台语言。 + +## 准备工作 + +您需要先安装 KubeSphere。 + +## 更改控制台语言 + +1. 登录 KubeSphere,点击右上角的用户名。 + +2. 在下拉菜单中,选择**个人设置**。 + +3. 在**基本信息**页面,从**语言**下拉列表中选择所需的语言。 + +4. 点击 保存设置。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/faq/console/console-web-browser.md b/content/zh/docs/v3.4/faq/console/console-web-browser.md new file mode 100644 index 000000000..eba3d2c4e --- /dev/null +++ b/content/zh/docs/v3.4/faq/console/console-web-browser.md @@ -0,0 +1,11 @@ +--- +title: "支持的浏览器" +keywords: "FAQ, 控制台, KubeSphere, Kubernetes" +description: "使用支持的浏览器访问控制台。" +linkTitle: "支持的浏览器" +Weight: 16510 +--- + +KubeSphere Web 控制台支持多种主流浏览器,包括 Chrome、Firefox、Safari 浏览器、Opera 和 Microsoft Edge。您需要使用以下表格内绿色框中的浏览器版本以访问 KubeSphere Web 控制台。 + +![console-browser](/images/docs/v3.3/faq/kubesphere-web-console/supported-browsers/console-browser.png) diff --git a/content/zh/docs/v3.4/faq/console/edit-resources-in-system-workspace.md b/content/zh/docs/v3.4/faq/console/edit-resources-in-system-workspace.md new file mode 100644 index 000000000..f8d91826d --- /dev/null +++ b/content/zh/docs/v3.4/faq/console/edit-resources-in-system-workspace.md @@ -0,0 +1,49 @@ +--- +title: "在控制台上编辑系统资源" +keywords: "系统, 资源, KubeSphere, Kubernetes" +description: "在控制台上启用对系统资源的编辑功能。" +linkTitle: '在控制台上编辑系统资源' +Weight: 16520 +--- + +当您安装 KubeSphere 时,企业空间 `system-workspace` 将被创建,用于运行所有 KubeSphere 系统项目和 Kubernetes 系统项目。为了避免对这两个系统的误操作,您不能直接在控制台上编辑该企业空间中的资源。但是,您仍然可以使用 `kubectl` 来修改资源。 + +本教程演示如何启用 `system-workspace` 资源的编辑功能。 + +{{< notice warning >}} + +编辑 `system-workspace` 中的资源可能会导致意外结果,例如 KubeSphere 系统和节点故障,并且可能对您的业务造成影响。执行此操作时请高度谨慎。 + +{{}} + +## 编辑控制台配置 + +1. 以 `admin` 用户登录 KubeSphere,点击右下角的 ,然后选择 **Kubectl**。 + +2. 执行如下命令: + + ```bash + kubectl -n kubesphere-system edit cm ks-console-config + ``` + +3. 在 `client` 下添加 `systemWorkspace` 字段并保存文件。 + + ```yaml + client: + version: + kubesphere: v3.3.2 + kubernetes: v1.22.12 + openpitrix: v3.3.2 + enableKubeConfig: true + systemWorkspace: "$" # 请手动添加此行。 + ``` + +4. 执行如下命令重新部署 `ks-console`,并等待容器组重建。 + + ```bash + kubectl -n kubesphere-system rollout restart deployment ks-console + ``` + +5. 刷新 KubeSphere 控制台。`system-workspace` 中的项目将出现编辑按钮。 + +6. 如需关闭控制台的编辑功能,请采用相同方法删除 `systemWorkspace` 字段。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/faq/devops/_index.md b/content/zh/docs/v3.4/faq/devops/_index.md new file mode 100644 index 000000000..79fb04523 --- /dev/null +++ b/content/zh/docs/v3.4/faq/devops/_index.md @@ -0,0 +1,7 @@ +--- +title: "DevOps" +keywords: 'Kubernetes, KubeSphere, DevOps, Jenkins' +description: 'FAQ about DevOps in KubeSphere' +layout: "second" +weight: 16800 +--- diff --git a/content/zh/docs/v3.4/faq/devops/create-devops-kubeconfig-on-aws.md b/content/zh/docs/v3.4/faq/devops/create-devops-kubeconfig-on-aws.md new file mode 100644 index 000000000..758f35bc3 --- /dev/null +++ b/content/zh/docs/v3.4/faq/devops/create-devops-kubeconfig-on-aws.md @@ -0,0 +1,106 @@ +--- +title: "在 AWS 上创建 DevOps Kubeconfig" +keywords: "KubeSphere, Kubernetes, DevOps, Kubeconfig, AWS" +description: "如何在 AWS 上创建 DevOps Kubeconfig" +linkTitle: "在 AWS 上创建 DevOps Kubeconfig" +Weight: 16820 +--- + +在已安装 KubeSphere 的 AWS 集群上运行流水线时,如果无法将应用部署到项目中,可能是因 DevOps kubeconfig 出问题所导致。本教程介绍如何在 AWS 上创建 DevOps kubeconfig。 + +## 准备工作 + +- 您需要准备一个已安装 KubeSphere 的 AWS 集群。有关如何在 AWS 上安装 KubeSphere 的更多信息,请参考[在 AWS EKS 上部署 KubeSphere](../../../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks/)。 +- 您需要启用 [KubeSphere DevOps 系统](../../../pluggable-components/devops/)。 +- 您需要准备一个可以部署应用的项目。本教程以 `kubesphere-sample-dev` 项目为例。 + +## 创建 DevOps Kubeconfig + +### 步骤 1:创建 ServiceAccount + +1. 在您的 AWS 集群上创建 `devops-deploy.yaml` 文件并输入以下内容。 + + ```yaml + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: devops-deploy + namespace: kubesphere-sample-dev + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: devops-deploy-role + namespace: kubesphere-sample-dev + rules: + - apiGroups: + - "*" + resources: + - "*" + verbs: + - "*" + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: devops-deploy-rolebinding + namespace: kubesphere-sample-dev + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: devops-deploy-role + subjects: + - kind: ServiceAccount + name: devops-deploy + namespace: kubesphere-sample-dev + ``` + +2. 运行以下命令应用该 YAML 文件。 + + ```bash + kubectl apply -f devops-deploy.yaml + ``` + +### 步骤 2:获取服务帐户令牌 + +1. 运行以下命令获取服务帐户的令牌。 + + ```bash + export TOKEN_NAME=$(kubectl -n kubesphere-sample-dev get sa devops-deploy -o jsonpath='{.secrets[0].name}') + kubectl -n kubesphere-sample-dev get secret "${TOKEN_NAME}" -o jsonpath='{.data.token}' | base64 -d + ``` + +2. 输出类似如下: + + ![get-token](/images/docs/v3.3/zh-cn/faq/devops/create-devops-kubeconfig-on-aws/get-token.jpg) + +### 步骤 3:创建 DevOps kubeconfig + +1. 登录 AWS 集群的 KubeSphere 控制台,访问您的 DevOps 项目。转到 **DevOps 项目设置**下的**凭证**,然后点击**创建**。您可以按需输入该 kubeconfig 的**凭证 ID**。 + +2. 在 **Content** 文本框中,请注意以下内容: + + ``` + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FUR... + client-key-data: LS0tLS1CRUdJTiBQUk... + ``` + + 您需要将其替换为在步骤 2 中获取的令牌,然后点击**确定**创建 kubeconfig。 + + ```bash + user: + token:eyJhbGciOiJSUzI1NiIsImtpZCI6Ikl3UkhCay13dHpPY2Z6LW9VTlZKQVR6dVdmb2FHallJQ2E4VzJULUNjUzAifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlc3BoZXJlLXNhbXBsZS1kZXYiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlY3JldC5uYW1lIjoiZGV2b3BzLWRlcGxveS10b2tlbi1kcGY2ZiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJkZXZvcHMtZGVwbG95Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiMjM0ZTI4OTUtMjM3YS00M2Y5LTkwMTgtZDg4YjY2YTQyNzVmIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmVzcGhlcmUtc2FtcGxlLWRldjpkZXZvcHMtZGVwbG95In0.Ls6mkpgAU75zVw87FkcWx-MLEXGcJjlnb4rUVtT61Jmc_G6jkn4X45MK1V_HuLje3JZMFjL80QUl5ljHLiCUPQ7oE5AUZaUCdqZVdDYEhqeFuGQb_7Qlh8-UFVGGg8vrb0HeGiOlS0qq5hzwKc9C1OmsXHS92yhNwz9gIOujZRafnGKIsG6TL2hEVY2xI0vvmseDKmKg5o0TbeaTMVePHvECju9Qz3Z7TUYsr7HAOvCPtGutlPWLqGx5uOHenOdeLn71x5RoS98xguZoxYVollciPKCQwBlZ4zWK2hzsLSNNLb9cZpxtgUVyHE0AB0e86IHRngnnNrzpp1_pDxL5jw/ + ``` + + {{< notice note >}} + + 请确保使用您自己的令牌。 + + {{}} + + + + + diff --git a/content/zh/docs/v3.4/faq/devops/install-jenkins-plugins.md b/content/zh/docs/v3.4/faq/devops/install-jenkins-plugins.md new file mode 100644 index 000000000..9849ce7f4 --- /dev/null +++ b/content/zh/docs/v3.4/faq/devops/install-jenkins-plugins.md @@ -0,0 +1,67 @@ +--- +title: "为 KubeSphere 中的 Jenkins 安装插件" +keywords: "KubeSphere, Kubernetes, DevOps, Jenkins, 插件" +description: "了解如何为 KubeSphere 中的 Jenkins 安装插件。" +linkTitle: "为 KubeSphere 中的 Jenkins 安装插件" +Weight: 16810 +--- + +KubeSphere DevOps 系统提供基于 Jenkins 的容器化 CI/CD 功能,而提升 Jenkins 功能的主要方法就是安装插件。本教程介绍如何在 Jenkins 面板上安装插件。 + +{{< notice warning >}} + +并非所有的 Jenkins 插件都拥有良好的维护支持。一些插件可能会导致 Jenkins 出现问题,甚至导致 KubeSphere 出现严重问题。强烈建议您在安装任意插件之前进行备份,若条件允许,先在其他环境进行测试。 + +{{}} + +## 准备工作 + +您需要启用 [KubeSphere DevOps 系统](../../../pluggable-components/devops/)。 + +## 安装插件 + +### 步骤 1:获取 Jenkins 地址 + +1. 运行以下命令获取 Jenkins 的地址。 + + ```bash + export NODE_PORT=$(kubectl get --namespace kubesphere-devops-system -o jsonpath="{.spec.ports[0].nodePort}" services devops-jenkins) + export NODE_IP=$(kubectl get nodes --namespace kubesphere-devops-system -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT + ``` + +2. 您会得到类似如下的输出。您可以通过输出的地址使用自己的 KubeSphere 用户和密码(例如 `admin/P@88w0rd`)访问 Jenkins 面板。 + + ``` + http://192.168.0.4:30180 + ``` + + {{< notice note >}} + + 请确保使用自己的 Jenkins 地址。根据您 KubeSphere 集群部署位置的不同,您可能需要在安全组中打开端口,并配置相关的端口转发规则。 + + {{}} + +### 步骤 2:在 Jenkins 面板上安装插件 + +1. 登录 Jenkins 面板,点击**系统管理**。 + +2. 在**系统管理**页面,下滑到**插件管理**并点击。 + +3. 点击**可选插件**选项卡,您必须使用搜索框来搜索所需插件。例如,您可以在搜索框中输入 `git`,勾选所需插件旁边的复选框,然后按需点击**直接安装**或**下载待重启后安装**。 + + {{< notice note >}} + + Jenkins 的插件相互依赖。安装插件时,您可能还需要安装其依赖项。 + + {{}} + +4. 如果已预先下载 HPI 文件,您也可以点击**高级**选项卡,上传该 HPI 文件作为插件进行安装。 + +5. 在**已安装**选项卡,可以查看已安装的全部插件。能够安全卸载的插件将会在右侧显示**卸载**按钮。 + +6. 在**可更新**选项卡,先勾选插件左侧的复选框,再点击**下载待重启后安装**,即可安装更新的插件。您也可以点击**立即获取**按钮检查更新。 + +## 另请参见 + +[管理插件](https://www.jenkins.io/zh/doc/book/managing/plugins/) \ No newline at end of file diff --git a/content/zh/docs/v3.4/faq/installation/_index.md b/content/zh/docs/v3.4/faq/installation/_index.md new file mode 100644 index 000000000..bc0445e60 --- /dev/null +++ b/content/zh/docs/v3.4/faq/installation/_index.md @@ -0,0 +1,7 @@ +--- +title: "安装" +keywords: 'Kubernetes, KubeSphere, 安装, 常见问题' +description: '关于安装的常见问题' +layout: "second" +weight: 16100 +--- diff --git a/content/zh/docs/v3.4/faq/installation/configure-booster.md b/content/zh/docs/v3.4/faq/installation/configure-booster.md new file mode 100644 index 000000000..8a7cc6c3d --- /dev/null +++ b/content/zh/docs/v3.4/faq/installation/configure-booster.md @@ -0,0 +1,84 @@ +--- +title: "为安装配置加速器" +keywords: 'KubeSphere, 加速器, 安装, FAQ' +description: '设置仓库镜像地址加速安装时的镜像下载速度。' +linkTitle: "为安装配置加速器" +weight: 16200 +--- + +如果您无法从 `dockerhub.io` 下载镜像,强烈建议您预先配置仓库的镜像地址(即加速器)以加快下载速度。您可以参考[ Docker 官方文档](https://docs.docker.com/registry/recipes/mirror/#configure-the-docker-daemon),或执行以下步骤。 + +## 获取加速器地址 + +您需要获取仓库的一个镜像地址以配置加速器。您可以参考如何[从阿里云获取加速器地址](https://help.aliyun.com/document_detail/60750.html)。 + +## 配置仓库镜像地址 + +您可以直接配置 Docker 守护程序,也可以使用 KubeKey 进行配置。 + +### 配置 Docker 守护程序 + +{{< notice note >}} + +采用此方法,您需要预先安装 Docker。 + +{{}} + +1. 执行如下命令: + + ```bash + sudo mkdir -p /etc/docker + ``` + + ```bash + sudo vi /etc/docker/daemon.json + ``` + +2. 在文件中添加 `registry-mirrors` 键值对。 + + ```json + { + "registry-mirrors": ["https://"] + } + ``` + + {{< notice note >}} + + 请将命令中的地址替换成您实际的加速器地址。 + + {{}} + +3. 执行如下命令保存文件并重新加载 Docker,以使修改生效。 + + ```bash + sudo systemctl daemon-reload + ``` + + ```bash + sudo systemctl restart docker + ``` + +### 使用 KubeKey 配置仓库镜像地址 + +1. 在安装前用 KubeKey 创建 `config-sample.yaml` 文件,并定位到文件中的 `registry` 位置。 + + ```yaml + registry: + registryMirrors: [] + insecureRegistries: [] + privateRegistry: "" + ``` + + {{< notice note >}} + + 有关 `registry` 部分各个参数的更多信息,请参见 [Kubernetes 集群配置](../../../installing-on-linux/introduction/vars/)。 + + {{}} + +2. 在 `registryMirrors` 处填入仓库的镜像地址并保存文件。有关安装的更多信息,请参见[多节点安装](../../../installing-on-linux/introduction/multioverview/)。 + +{{< notice note >}} + +[在 Linux 上通过 All-in-One 模式安装 KubeSphere](../../../quick-start/all-in-one-on-linux/) 不需要 `config-sample.yaml` 文件。该模式下请采用第一种方法进行配置。 + +{{}} \ No newline at end of file diff --git a/content/zh/docs/v3.4/faq/installation/install-addon-through-yaml-using-kubekey.md b/content/zh/docs/v3.4/faq/installation/install-addon-through-yaml-using-kubekey.md new file mode 100644 index 000000000..a7018b70f --- /dev/null +++ b/content/zh/docs/v3.4/faq/installation/install-addon-through-yaml-using-kubekey.md @@ -0,0 +1,19 @@ +--- +title: "使用 KubeKey 通过 YAML 安装插件" +keywords: "Installer, KubeKey, KubeSphere, Kubernetes, 插件" +description: "了解使用 KubeKey 安装 YAML 插件时可能失败的原因。" +linkTitle: "使用 KubeKey 通过 YAML 安装插件" +Weight: 16400 +--- + +当您使用 KubeKey 安装插件时,需要在配置文件(默认为 `config-sample.yaml`)的 `addons` 字段下添加插件信息(Chart 或 YAML)。如果所提供的插件以 YAML 格式安装,在某些情况下,安装时可能会报如下错误信息: + +```bash +Error from server: failed to create typed patch object: xxx: element 0: associative list with keys has an element that omits key field "protocol" +``` + +这是一个 [Kubernetes 本身的已知问题](https://github.com/kubernetes-sigs/structured-merge-diff/issues/130),由 `--server-side` 标志导致。若要解决该问题,请在部署完 KubeSphere 之后再应用该 YAML 文件,而非在配置文件中添加插件信息通过 KubeKey 安装。例如: + +```bash +kubectl apply -f xxx.yaml # 请替换为您自己的 YAML 文件。 +``` \ No newline at end of file diff --git a/content/zh/docs/v3.4/faq/installation/ssh-connection-failure.md b/content/zh/docs/v3.4/faq/installation/ssh-connection-failure.md new file mode 100644 index 000000000..da1560d62 --- /dev/null +++ b/content/zh/docs/v3.4/faq/installation/ssh-connection-failure.md @@ -0,0 +1,40 @@ +--- +title: "SSH 连接故障" +keywords: "安装, SSH, KubeSphere, Kubernetes" +description: "SSH 连接故障" +linkTitle: "SSH 连接故障" +Weight: 16600 +--- + +使用 KubeKey 设置集群时,将创建一个包含必要主机信息的配置文件。以下是 `hosts` 字段的示例: + +```bash +spec: + hosts: + - {name: master, address: 192.168.0.2, internalAddress: 192.168.0.2, user: ubuntu, password: Testing123} + - {name: node1, address: 192.168.0.3, internalAddress: 192.168.0.3, user: ubuntu, password: Testing123} + - {name: node2, address: 192.168.0.4, internalAddress: 192.168.0.4, user: ubuntu, password: Testing123} +``` + +在您开始使用 `./kk` 命令创建集群之前,建议使用 SSH 测试任务机与其他实例之间的连接情况。 + +## 可能出现的错误信息 + +```bash +Failed to connect to xx.xxx.xx.xxx: could not establish connection to xx.xxx.xx.xxx:xx: ssh: handshake failed: ssh: unable to authenticate , attempted methods [none], no supported methods remain node=xx.xxx.xx.xxx +``` + +如果出现了以上错误信息,请确保: + +- 您使用的端口号无误。端口 `22` 是 SSH 的默认端口,如果您使用的是不同的端口,则需要在 IP 地址后添加该端口号。例如: + + ```bash + hosts: + - {name: master, address: 192.168.0.2, internalAddress: 192.168.0.2, port: 8022, user: ubuntu, password: Testing123} + ``` + +- `/etc/ssh/sshd_config` 文件中没有限制 SSH 连接。例如,`PasswordAuthentication` 应设置为 `true`。 + +- 您使用的用户名、密码或密钥正确。请注意,用户必须拥有 sudo 权限。 + +- 您的防火墙配置允许 SSH 连接。 diff --git a/content/zh/docs/v3.4/faq/installation/telemetry.md b/content/zh/docs/v3.4/faq/installation/telemetry.md new file mode 100644 index 000000000..7ee58ce34 --- /dev/null +++ b/content/zh/docs/v3.4/faq/installation/telemetry.md @@ -0,0 +1,86 @@ +--- +title: "启用或禁用 KubeSphere 中的 Telemetry" +keywords: "安装器, Telemetry, KubeSphere, Kubernetes" +description: "了解 Telemetry 并学习如何在 KubeSphere 中启用或禁用。" +linkTitle: "启用或禁用 KubeSphere 中的 Telemetry" +Weight: 16300 +--- + +Telemetry 收集已安装 KubeSphere 集群的大小、KubeSphere 和 Kubernetes 版本、已启用的组件、集群运行时间、错误日志等汇总信息。KubeSphere 保证该信息仅由 KubeSphere 社区用于改进产品,并且不与任何第三方分享该信息。 + +## 信息收集范围 + +- 外部网络 IP 地址 +- 下载日期 +- Kubernetes 版本 +- KubeSphere 版本 +- Kubernetes 集群大小 +- 操作系统类型 +- 安装器错误日志 +- 启用的组件 +- Kubernetes 集群运行时间 +- KubeSphere 集群运行时间 +- 集群 ID +- 机器 ID + +## 禁用 Telemetry + +在安装 KubeSphere 时 Telemetry 默认启用。同时,您也可以在安装前或安装后禁用 Telemetry。 + +### 安装前禁用 Telemetry + +在现有 Kubernetes 集群上安装 KubeSphere 时,您需要下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) 文件用于配置集群。如需禁用 Telemetry,请勿直接执行 `kubectl apply -f` 命令应用该文件。 + +{{< notice note >}} + +如果在 Linux 上安装 KubeSphere,请直接参考[安装后禁用 Telemetry](../telemetry/#安装后禁用-telemetry)。 + +{{}} + +1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) 文件并编辑。 + + ```bash + vi cluster-configuration.yaml + ``` + +2. 在 `cluster-configuration.yaml` 文件末尾添加 `telemetry_enabled: false` 字段。 + + ```yaml + openpitrix: + store: + enabled: false + servicemesh: + enabled: false + telemetry_enabled: false # 请手动添加此行以禁用 Telemetry。 + ``` + +3. 保存文件并执行以下命令开始安装: + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f cluster-configuration.yaml + ``` + +### 安装后禁用 Telemetry + +1. 以 `admin` 用户登录控制台,点击页面左上角的**平台管理**。 + +2. 选择**集群管理**,在左侧导航栏中点击**定制资源定义**。 + + {{< notice note >}} +如果[多集群功能](../../../multicluster-management/)已经启用,您需要先选择一个集群。 + {{}} + +3. 在搜索框中输入 `clusterconfiguration`,点击搜索结果打开详情页。 + +4. 点击 `ks-installer` 右侧的 ,并选择**编辑 YAML**。 + +5. 在文件末尾添加 `telemetry_enabled: false` 字段,点击**确定**。 + + +{{< notice note >}} + +如需重新启用 Telemetry,请删除 `telemetry_enabled: false` 字段或将其更改为 `telemetry_enabled: true`,并更新 `ks-installer`。 + +{{}} diff --git a/content/zh/docs/v3.4/faq/multi-cluster-management/_index.md b/content/zh/docs/v3.4/faq/multi-cluster-management/_index.md new file mode 100644 index 000000000..57f23b873 --- /dev/null +++ b/content/zh/docs/v3.4/faq/multi-cluster-management/_index.md @@ -0,0 +1,7 @@ +--- +title: "多集群管理" +keywords: 'Kubernetes, KubeSphere, 多集群管理, 主集群, 成员集群' +description: 'KubeSphere 多集群管理常见问题' +layout: "second" +weight: 16700 +--- diff --git a/content/zh/docs/v3.4/faq/multi-cluster-management/host-cluster-access-member-cluster.md b/content/zh/docs/v3.4/faq/multi-cluster-management/host-cluster-access-member-cluster.md new file mode 100644 index 000000000..8f66b661b --- /dev/null +++ b/content/zh/docs/v3.4/faq/multi-cluster-management/host-cluster-access-member-cluster.md @@ -0,0 +1,71 @@ +--- +title: "恢复主集群对成员集群的访问权限" +keywords: "Kubernetes, KubeSphere, 多集群, 主集群, 成员集群" +description: "了解如何恢复主集群对成员集群的访问。" +linkTitle: "恢复主集群对成员集群的访问权限" +Weight: 16720 +--- + +[多集群管理](../../../multicluster-management/introduction/kubefed-in-kubesphere/)是 KubeSphere 的一大特色,拥有必要权限的租户(通常是集群管理员)能够从主集群访问中央控制平面,以管理全部成员集群。强烈建议您通过主集群管理整个集群的资源。 + +本教程演示如何恢复主集群对成员集群的访问权限。 + +## 可能出现的错误信息 + +如果您无法从中央控制平面访问成员集群,并且浏览器一直将您重新定向到 KubeSphere 的登录页面,请在该成员集群上运行以下命令来获取 ks-apiserver 的日志。 + +``` +kubectl -n kubesphere-system logs ks-apiserver-7c9c9456bd-qv6bs +``` + +{{< notice note >}} + +`ks-apiserver-7c9c9456bd-qv6bs` 指的是该成员集群上的容器组 ID。请确保您使用自己的容器组 ID。 + +{{}} + +您可能会看到以下错误信息: + +``` +E0305 03:46:42.105625 1 token.go:65] token not found in cache +E0305 03:46:42.105725 1 jwt_token.go:45] token not found in cache +E0305 03:46:42.105759 1 authentication.go:60] Unable to authenticate the request due to error: token not found in cache +E0305 03:46:52.045964 1 token.go:65] token not found in cache +E0305 03:46:52.045992 1 jwt_token.go:45] token not found in cache +E0305 03:46:52.046004 1 authentication.go:60] Unable to authenticate the request due to error: token not found in cache +E0305 03:47:34.502726 1 token.go:65] token not found in cache +E0305 03:47:34.502751 1 jwt_token.go:45] token not found in cache +E0305 03:47:34.502764 1 authentication.go:60] Unable to authenticate the request due to error: token not found in cache +``` + +## 解决方案 + +### 步骤 1:验证 jwtSecret + +分别在主集群和成员集群上运行以下命令,确认它们的 jwtSecret 是否相同。 + +``` +kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v “apiVersion” | grep jwtSecret +``` + +### 步骤 2:更改 `accessTokenMaxAge` + +请确保主集群和成员集群的 jwtSecret 相同,然后在该成员集群上运行以下命令获取 `accessTokenMaxAge` 的值。 + +``` +kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep accessTokenMaxAge +``` + +如果该值不为 `0`,请运行以下命令更改 `accessTokenMaxAge` 的值。 + +``` +kubectl -n kubesphere-system edit cm kubesphere-config -o yaml +``` + +将 `accessTokenMaxAge` 的值更改为 `0` 之后,运行以下命令重启 ks-apiserver。 + +``` +kubectl -n kubesphere-system rollout restart deploy ks-apiserver +``` + +现在,您可以再次从中央控制平面访问该成员集群。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/faq/multi-cluster-management/manage-multi-cluster.md b/content/zh/docs/v3.4/faq/multi-cluster-management/manage-multi-cluster.md new file mode 100644 index 000000000..d50fc8330 --- /dev/null +++ b/content/zh/docs/v3.4/faq/multi-cluster-management/manage-multi-cluster.md @@ -0,0 +1,60 @@ +--- +title: "在 KubeSphere 上管理多集群环境" +keywords: 'Kubernetes,KubeSphere,联邦,多集群,混合云' +description: '理解如何在 KubeSphere 上管理多集群环境。' +linkTitle: "在 KubeSphere 上管理多集群环境" +weight: 16710 + +--- + +KubeSphere 提供了易于使用的多集群功能,帮助您[在 KubeSphere 上构建多集群环境](../../../multicluster-management/)。本指南说明如何在 KubeSphere 上管理多集群环境。 + +## 准备工作 + +- 请确保您的 Kubernetes 集群在用作主集群和成员集群之前已安装 KubeSphere。 +- 请确保主集群和成员集群分别设置了正确的集群角色,并且在主集群和成员集群上的 `jwtSecret` 也相同。 +- 建议成员集群在导入主集群之前是干净环境,即没有创建任何资源。 + + +## 管理 KubeSphere 多集群环境 + +当您在 KubeSphere 上创建多集群环境之后,您可以通过主集群的中央控制平面管理该环境。在创建资源的时候,您可以选择一个特定的集群,但是需要避免您的主集群过载。不建议您登录成员集群的 KubeSphere Web 控制台去创建资源,因为部分资源(例如:企业空间)将不会同步到您的主集群进行管理。 + +### 资源管理 + +不建议您将主集群转换为成员集群,或将成员集群转换成主集群。如果一个成员集群曾经被导入进主集群,您将该成员集群从先前的主集群解绑后,再导入进新的主集群时必须使用相同的集群名称。 + +如果您想在将成员集群导入新的主集群时保留现有项目,请按照以下步骤进行操作。 + +1. 在成员集群上运行以下命令将需要保留的项目从企业空间解绑。 + + ```bash + kubectl label ns kubesphere.io/workspace- && kubectl patch ns -p '{"metadata":{"ownerReferences":[]}}' --type=merge + ``` + +2. 在成员集群运行以下命令清除您的企业空间。 + + ```bash + kubectl delete workspacetemplate + ``` + +3. 当您在主集群中创建新的企业空间,并将成员集群分配到这个企业空间时,请在成员集群运行以下命令将保留的项目绑定至新的企业空间。 + + ```bash + kuebctl label ns kubesphere.io/workspace= + ``` + +### 用户管理 + +您通过主集群的中央控制平面创建的用户会被同步至成员集群。 + +如果您希望让不同的用户访问不同的集群,您可以创建企业空间并[赋予他们不同的集群](../../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/)。 在此之后,您可以根据这些用户的访问要求,邀请不同的用户至这些企业空间。 + +### KubeSphere 组件管理 + +KubeSphere 提供了一些可插拔组件,您可以根据需要去启用。在多集群环境下,您可以在主集群或成员集群上启用这些组件。 + +例如,您只需在主集群上启用应用商店,就可以直接在成员集群上使用与应用商店相关的功能。对于其他组件,当您在主集群上启用它们时,仍然需要在成员集群上手动启用相同组件以实现相同的功能。此外,您还可以仅在成员集群上启用组件,以便仅在成员集群上实现相应的功能。 + +有关如何启用可插拔组件的更多信息,请参考[启用可插拔组件](../../../pluggable-components/)。 + diff --git a/content/zh/docs/v3.4/faq/observability/_index.md b/content/zh/docs/v3.4/faq/observability/_index.md new file mode 100644 index 000000000..12c7317a1 --- /dev/null +++ b/content/zh/docs/v3.4/faq/observability/_index.md @@ -0,0 +1,7 @@ +--- +title: "可观测性" +keywords: 'Kubernetes, KubeSphere, 可观测性, FAQ' +description: '关于可观测性的常见问题' +layout: "second" +weight: 16300 +--- diff --git a/content/zh/docs/v3.4/faq/observability/byop.md b/content/zh/docs/v3.4/faq/observability/byop.md new file mode 100644 index 000000000..506073bd1 --- /dev/null +++ b/content/zh/docs/v3.4/faq/observability/byop.md @@ -0,0 +1,207 @@ +--- +title: "集成您自己的 Prometheus" +keywords: "监控, Prometheus, node-exporter, kube-state-metrics, KubeSphere, Kubernetes" +description: "在 KubeSphere 中使用您自己的 Prometheus 堆栈设置。" +linkTitle: "集成您自己的 Prometheus" +Weight: 16330 +--- + +KubeSphere 自带一些预装的自定义监控组件,包括 Prometheus Operator、Prometheus、Alertmanager、Grafana(可选)、各种 ServiceMonitor、node-exporter 和 kube-state-metrics。在您安装 KubeSphere 之前,这些组件可能已经存在。在 KubeSphere 3.3 中,您可以使用自己的 Prometheus 堆栈设置。 + +## 集成您自己的 Prometheus + +要使用您自己的 Prometheus 堆栈设置,请执行以下步骤: + +### 步骤 1:卸载 KubeSphere 的自定义 Prometheus 堆栈 + +1. 执行以下命令,卸载堆栈: + + ```bash + kubectl -n kubesphere-system exec $(kubectl get pod -n kubesphere-system -l app=ks-installer -o jsonpath='{.items[0].metadata.name}') -- kubectl delete -f /kubesphere/kubesphere/prometheus/alertmanager/ 2>/dev/null + kubectl -n kubesphere-system exec $(kubectl get pod -n kubesphere-system -l app=ks-installer -o jsonpath='{.items[0].metadata.name}') -- kubectl delete -f /kubesphere/kubesphere/prometheus/devops/ 2>/dev/null + kubectl -n kubesphere-system exec $(kubectl get pod -n kubesphere-system -l app=ks-installer -o jsonpath='{.items[0].metadata.name}') -- kubectl delete -f /kubesphere/kubesphere/prometheus/etcd/ 2>/dev/null + kubectl -n kubesphere-system exec $(kubectl get pod -n kubesphere-system -l app=ks-installer -o jsonpath='{.items[0].metadata.name}') -- kubectl delete -f /kubesphere/kubesphere/prometheus/grafana/ 2>/dev/null + kubectl -n kubesphere-system exec $(kubectl get pod -n kubesphere-system -l app=ks-installer -o jsonpath='{.items[0].metadata.name}') -- kubectl delete -f /kubesphere/kubesphere/prometheus/kube-state-metrics/ 2>/dev/null + kubectl -n kubesphere-system exec $(kubectl get pod -n kubesphere-system -l app=ks-installer -o jsonpath='{.items[0].metadata.name}') -- kubectl delete -f /kubesphere/kubesphere/prometheus/node-exporter/ 2>/dev/null + kubectl -n kubesphere-system exec $(kubectl get pod -n kubesphere-system -l app=ks-installer -o jsonpath='{.items[0].metadata.name}') -- kubectl delete -f /kubesphere/kubesphere/prometheus/upgrade/ 2>/dev/null + kubectl -n kubesphere-system exec $(kubectl get pod -n kubesphere-system -l app=ks-installer -o jsonpath='{.items[0].metadata.name}') -- kubectl delete -f /kubesphere/kubesphere/prometheus/prometheus-rules-v1.16\+.yaml 2>/dev/null + kubectl -n kubesphere-system exec $(kubectl get pod -n kubesphere-system -l app=ks-installer -o jsonpath='{.items[0].metadata.name}') -- kubectl delete -f /kubesphere/kubesphere/prometheus/prometheus-rules.yaml 2>/dev/null + kubectl -n kubesphere-system exec $(kubectl get pod -n kubesphere-system -l app=ks-installer -o jsonpath='{.items[0].metadata.name}') -- kubectl delete -f /kubesphere/kubesphere/prometheus/prometheus 2>/dev/null + # Uncomment this line if you don't have Prometheus managed by Prometheus Operator in other namespaces. + # kubectl -n kubesphere-system exec $(kubectl get pod -n kubesphere-system -l app=ks-installer -o jsonpath='{.items[0].metadata.name}') -- kubectl delete -f /kubesphere/kubesphere/prometheus/init/ 2>/dev/null + ``` + +2. 删除 Prometheus 使用的 PVC。 + + ```bash + kubectl -n kubesphere-monitoring-system delete pvc `kubectl -n kubesphere-monitoring-system get pvc | grep -v VOLUME | awk '{print $1}' | tr '\n' ' '` + ``` + +### 步骤 2:安装您自己的 Prometheus 堆栈 + +{{< notice note >}} + +KubeSphere 3.3 已经过认证,可以与以下 Prometheus 堆栈组件搭配使用: + +- Prometheus Operator **v0.55.1+** +- Prometheus **v2.34.0+** +- Alertmanager **v0.23.0+** +- kube-state-metrics **v2.5.0** +- node-exporter **v1.3.1** + +请确保您的 Prometheus 堆栈组件版本符合上述版本要求,尤其是 **node-exporter** 和 **kube-state-metrics**。 + +如果只安装了 **Prometheus Operator** 和 **Prometheus**,请您务必安装 **node-exporter** 和 **kube-state-metrics**。**node-exporter** 和 **kube-state-metrics** 是 KubeSphere 正常运行的必要条件。 + +**如果整个 Prometheus 堆栈已经启动并运行,您可以跳过此步骤。** + +{{}} + +Prometheus 堆栈可以通过多种方式进行安装。下面的步骤演示如何使用 [ks-installer 中的 Prometheus stack manifests](https://github.com/kubesphere/ks-installer/tree/release-3.3/roles/ks-monitor/files/prometheus) (其由 KubeSphere 基于 [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus.git) 的定制版本生成) 将 Prometheus 堆栈安装至命名空间 `monitoring` 中。 + +1. 获取 KubeSphere 3.3.0 所使用的 `ks-installer`。 + + ```bash + cd ~ && git clone -b release-3.3 https://github.com/kubesphere/ks-installer.git && cd ks-installer/roles/ks-monitor/files/prometheus + ``` + +2. 创建 `kustomization.yaml`: + ```bash + # create + cat < kustomization.yaml + apiVersion: kustomize.config.k8s.io/v1beta1 + kind: Kustomization + namespace: monitoring + resources: + EOF + # append yaml paths + find . -mindepth 2 -name "*.yaml" -type f -print | sed 's/^/- /' >> kustomization.yaml + ``` + +3. (可选)移除不必要的组件。例如,KubeSphere 未启用 Grafana 时,可以删除 `kustomization.yaml` 中的 `grafana` 部分: + + ```bash + sed -i '/grafana\//d' kustomization.yaml + ``` + +4. 安装堆栈。 + + ```bash + kubectl apply -k . + ``` + +### 步骤 3:将 KubeSphere 自定义组件安装至您的 Prometheus 堆栈 + +{{< notice note >}} + +如果您的 Prometheus 堆栈是通过 [ks-installer 中的 Prometheus stack manifests](https://github.com/kubesphere/ks-installer/tree/release-3.3/roles/ks-monitor/files/prometheus) 进行安装,您可以跳过此步骤。 + +KubeSphere 3.3.0 使用 Prometheus Operator 来管理 Prometheus/Alertmanager 配置和生命周期、ServiceMonitor(用于管理抓取配置)和 PrometheusRule(用于管理 Prometheus 记录/告警规则)。 + +如果您的 Prometheus 堆栈不是由 Prometheus Operator 进行管理,您可以跳过此步骤。但请务必确保: + +- 您必须将 [PrometheusRule](https://github.com/kubesphere/ks-installer/tree/release-3.3/roles/ks-monitor/files/prometheus/kubernetes/kubernetes-prometheusRule.yaml) 和 [PrometheusRule for etcd](https://github.com/kubesphere/ks-installer/tree/release-3.3/roles/ks-monitor/files/prometheus/etcd/prometheus-rulesEtcd.yaml) 中的记录/告警规则复制至您的 Prometheus 配置中,以便 KubeSphere 3.3.0 能够正常运行。 + +- 配置您的 Prometheus,使其抓取指标的目标 (Target) 与 各组件的 [serviceMonitor](https://github.com/kubesphere/ks-installer/tree/release-3.3/roles/ks-monitor/files/prometheus/) 文件中列出的目标相同。 + +{{}} + +1. 获取 KubeSphere 3.3.0 所使用的 `ks-installer`。 + + ```bash + cd ~ && git clone -b release-3.3 https://github.com/kubesphere/ks-installer.git && cd ks-installer/roles/ks-monitor/files/prometheus + ``` + +2. 创建 `kustomization.yaml`,填充如下内容。 + + ```yaml + apiVersion: kustomize.config.k8s.io/v1beta1 + kind: Kustomization + namespace: + resources: + - ./alertmanager/alertmanager-secret.yaml + - ./etcd/prometheus-rulesEtcd.yaml + - ./kube-state-metrics/kube-state-metrics-serviceMonitor.yaml + - ./kubernetes/kubernetes-prometheusRule.yaml + - ./kubernetes/kubernetes-serviceKubeControllerManager.yaml + - ./kubernetes/kubernetes-serviceKubeScheduler.yaml + - ./kubernetes/kubernetes-serviceMonitorApiserver.yaml + - ./kubernetes/kubernetes-serviceMonitorCoreDNS.yaml + - ./kubernetes/kubernetes-serviceMonitorKubeControllerManager.yaml + - ./kubernetes/kubernetes-serviceMonitorKubeScheduler.yaml + - ./kubernetes/kubernetes-serviceMonitorKubelet.yaml + - ./node-exporter/node-exporter-serviceMonitor.yaml + - ./prometheus/prometheus-clusterRole.yaml + ``` + + {{< notice note >}} + + - 将此处 `namespace` 的值设置为您自己的命名空间。例如,如果您在步骤 2 将 Prometheus 安装在命名空间 `monitoring` 中,这里即为 `monitoring`。 + - 如果您启用了 KubeSphere 的告警,还需要将 `thanos-ruler` 的 yaml 文件路径补充到 `kustomization.yaml` 中。 + + {{}} + + +3. 安装以上 KubeSphere 必要组件。 + + ```bash + kubectl apply -k . + ``` + +4. 在您自己的命名空间中查找 Prometheus CR,通常为 k8s。 + + ```bash + kubectl -n get prometheus + ``` + +5. 将 Prometheus 规则评估间隔设置为 1m,与 KubeSphere 3.3.0 的自定义 ServiceMonitor 保持一致。规则评估间隔应大于或等于抓取间隔。 + + ```bash + kubectl -n patch prometheus k8s --patch '{ + "spec": { + "evaluationInterval": "1m" + } + }' --type=merge + ``` + +### 步骤 4:更改 KubeSphere 的 `monitoring endpoint` + +您自己的 Prometheus 堆栈现在已启动并运行,您可以更改 KubeSphere 的监控 Endpoint 来使用您自己的 Prometheus。 + +1. 运行以下命令,编辑 `kubesphere-config`。 + + ```bash + kubectl edit cm -n kubesphere-system kubesphere-config + ``` + +2. 搜索 `monitoring endpoint` 部分,如下所示。 + + ```yaml + monitoring: + endpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090 + ``` + +3. 将 `endpoint` 的值更改为您自己的 Prometheus。 + + ```yaml + monitoring: + endpoint: http://prometheus-operated.monitoring.svc:9090 + ``` + +4. 如果您启用了 KubeSphere 的告警组件,请搜索 `alerting` 的 `prometheusEndpoint` 和 `thanosRulerEndpoint`,并参照如下示例修改。KubeSphere Apiserver 将自动重启使设置生效。 + + ```yaml + ... + alerting: + ... + prometheusEndpoint: http://prometheus-operated.monitoring.svc:9090 + thanosRulerEndpoint: http://thanos-ruler-operated.monitoring.svc:10902 + ... + ... + ``` + +{{< notice warning >}} + +如果您按照[此指南](../../../pluggable-components/overview/)启用/禁用 KubeSphere 可插拔组件,`monitoring endpoint` 会重置为初始值。此时,您需要再次将其更改为您自己的 Prometheus。 + +{{}} \ No newline at end of file diff --git a/content/zh/docs/v3.4/faq/observability/logging.md b/content/zh/docs/v3.4/faq/observability/logging.md new file mode 100644 index 000000000..a29fe0e44 --- /dev/null +++ b/content/zh/docs/v3.4/faq/observability/logging.md @@ -0,0 +1,170 @@ +--- +title: "日志" +keywords: "Kubernetes, Elasticsearch, KubeSphere, 日志" +description: "有关日志功能的常见问题。" +linkTitle: "日志" +weight: 16310 +--- + +本页包含一些关于日志的常见问题。 + +- [如何将日志存储改为外部 Elasticsearch 并关闭内部 Elasticsearch](../../observability/logging/#如何将日志存储改为外部-elasticsearch-并关闭内部-elasticsearch) +- [如何在启用 X-Pack Security 的情况下将日志存储改为 Elasticsearch](../../observability/logging/#如何在启用-x-pack-security-的情况下将日志存储改为-elasticsearch) +- [如何修改日志数据保留期限](../../observability/logging/#如何修改日志数据保留期限) +- [无法使用工具箱找到某些节点上工作负载的日志](../../observability/logging/#无法使用工具箱找到某些节点上工作负载的日志) +- [工具箱中的日志查询页面在加载时卡住](../../observability/logging/#工具箱中的日志查询页面在加载时卡住) +- [工具箱显示今天没有日志记录](../../observability/logging/#工具箱显示今天没有日志记录) +- [在工具箱中查看日志时,报告内部服务器错误](../../observability/logging/#在工具箱中查看日志时报告内部服务器错误) +- [如何让 KubeSphere 只收集指定工作负载的日志](../../observability/logging/#如何让-kubesphere-只收集指定工作负载的日志) +- [在查看容器实时日志的时候,控制台上看到的实时日志要比 kubectl log -f xxx 看到的少](../../observability/logging/#在查看容器实时日志的时候控制台上看到的实时日志要比-kubectl-log--f-xxx-看到的少) + +## 如何将日志存储改为外部 Elasticsearch 并关闭内部 Elasticsearch + +如果您使用的是 KubeSphere 内部的 Elasticsearch,并且想把它改成您的外部 Elasticsearch,请按照以下步骤操作。如果您还没有启用日志系统,请参考 [KubeSphere 日志系统](../../../pluggable-components/logging/)直接设置外部 Elasticsearch。 + +1. 首先,请执行以下命令更新 KubeKey 配置: + + ```bash + kubectl edit cc -n kubesphere-system ks-installer + ``` + +2. 将 `es.elasticsearchDataXXX`、`es.elasticsearchMasterXXX` 和 `status.logging` 的注释取消,将 `es.externalElasticsearchHost` 设置为 Elasticsearch 的地址,将 `es.externalElasticsearchPort` 设置为其端口号。以下示例供您参考: + + ```yaml + apiVersion: installer.kubesphere.io/v1alpha1 + kind: ClusterConfiguration + metadata: + name: ks-installer + namespace: kubesphere-system + ... + spec: + ... + common: + es: + # elasticsearchDataReplicas: 1 + # elasticsearchDataVolumeSize: 20Gi + # elasticsearchMasterReplicas: 1 + # elasticsearchMasterVolumeSize: 4Gi + elkPrefix: logstash + logMaxAge: 7 + externalElasticsearchHost: <192.168.0.2> + externalElasticsearchPort: <9200> + ... + status: + ... + # logging: + # enabledTime: 2020-08-10T02:05:13UTC + # status: enabled + ... + ``` + +3. 重新运行 `ks-installer`。 + + ```bash + kubectl rollout restart deploy -n kubesphere-system ks-installer + ``` + +4. 运行以下命令删除内部 Elasticsearch,请确认您已备份内部 Elasticsearch 中的数据。 + + ```bash + helm uninstall -n kubesphere-logging-system elasticsearch-logging + ``` + +5. 如果启用了 Istio,需要修改 Jaeger 配置。 + + ```yaml + $ kubectl -n istio-system edit jaeger + ... + options: + es: + index-prefix: logstash + server-urls: http://elasticsearch-logging-data.kubesphere-logging-system.svc:9200 # 修改为外部地址 + ``` + +## 如何在启用 X-Pack Security 的情况下将日志存储改为 Elasticsearch + +KubeSphere 暂不支持启用 X-Pack Security 的 Elasticsearch 集成,此功能即将推出。 + +## 如何设置审计、事件、日志及 Istio 日志信息的保留期限 + +KubeSphere v3.3 还支持您设置日志、审计、事件及 Istio 日志信息的保留期限。 + +您需要更新 KubeKey 配置并重新运行 `ks-installer`。 + +1. 执行以下命令: + + ```bash + kubectl edit cc -n kubesphere-system ks-installer + ``` + +2. 在 YAML 文件中,如果您只想修改日志的保存期限,可以直接修改 `logMaxAge` 的默认值。如果您想设置审计、事件及 Istio 日志信息的保留期限,需要添加参数 `auditingMaxAge`、`eventMaxAge` 和 `istioMaxAge`,并分别设置它们的保存期限,如下例所示: + + ```yaml + apiVersion: installer.kubesphere.io/v1alpha1 + kind: ClusterConfiguration + metadata: + name: ks-installer + namespace: kubesphere-system + ... + spec: + ... + common: + es: # Storage backend for logging, events and auditing. + ... + logMaxAge: 7 # Log retention time in built-in Elasticsearch. It is 7 days by default. + auditingMaxAge: 2 + eventMaxAge: 1 + istioMaxAge: 4 + ... + ``` + +3. 重新运行 `ks-installer`。 + + ```bash + kubectl rollout restart deploy -n kubesphere-system ks-installer + ``` + +## 无法使用工具箱找到某些节点上工作负载的日志 + +如果您采用[多节点安装](../../../installing-on-linux/introduction/multioverview)部署 KubeSphere,并且使用符号链接作为 Docker 根目录,请确保所有节点遵循完全相同的符号链接。日志代理以守护进程集的形式部署到节点上。容器日志路径的任何差异都可能导致该节点上日志收集失败。 + +若要找出节点上的 Docker 根目录路径,您可以运行以下命令。请确保所有节点都适用相同的值。 + +```shell +docker info -f '{{.DockerRootDir}}' +``` + +## 工具箱中的日志查询页面在加载时卡住 + +如果您发现日志查询页面在加载时卡住,请检查您所使用的存储系统。例如,配置不当的 NFS 存储系统可能会导致此问题。 + +## 工具箱显示今天没有日志记录 + +请检查您的日志存储卷是否超过了 Elasticsearch 的存储限制。如果是,请增加 Elasticsearch 的磁盘存储卷容量。 + +## 在工具箱中查看日志时,报告内部服务器错误 + +如果您在工具箱中看到内部服务器错误,可能有以下几个原因: + +- 网络分区 +- 无效的 Elasticsearch 主机和端口 +- Elasticsearch 健康状态为红色 + +## 如何让 KubeSphere 只收集指定工作负载的日志 + +KubeSphere 的日志代理由 Fluent Bit 所提供,您需要更新 Fluent Bit 配置来排除某些工作负载的日志。若要修改 Fluent Bit 输入配置,请运行以下命令: + +```shell +kubectl edit input -n kubesphere-logging-system tail +``` + +更新 `Input.Spec.Tail.ExcludePath` 字段。例如,将路径设置为 `/var/log/containers/*_kube*-system_*.log`,以排除系统组件的全部日志。 + +有关更多信息,请参见 [Fluent Bit Operator](https://github.com/kubesphere/fluentbit-operator)。 + +## 在查看容器实时日志的时候,控制台上看到的实时日志要比 kubectl log -f xxx 看到的少 + +主要有以下几个原因: + +- 当实时去查看容器日志时,Kubernetes 是分 chunk 形式返回,Kubernetes 大概 2 分钟左右会返回一次数据,比较慢 +- 未开启‘实时查看’时看到的末尾部分,在实时查看时,被划分在下次返回的部分中,现象看起来像是日志缺失 diff --git a/content/zh/docs/v3.4/faq/observability/monitoring.md b/content/zh/docs/v3.4/faq/observability/monitoring.md new file mode 100644 index 000000000..9732c80e4 --- /dev/null +++ b/content/zh/docs/v3.4/faq/observability/monitoring.md @@ -0,0 +1,124 @@ +--- +title: "监控" +keywords: "Kubernetes, Prometheus, KubeSphere, 监控" +description: "有关监控功能的常见问题。" +linkTitle: "监控" +weight: 16320 +--- + +本页包含关于监控的一些常见问题。 + +- [如何访问 KubeSphere Prometheus 控制台](../../observability/monitoring/#如何访问-kubesphere-prometheus-控制台) +- [Node Exporter 引起的主机端口 9100 冲突](../../observability/monitoring/#node-exporter-引起的主机端口-9100-冲突) +- [与现有的 Prometheus Operator 相冲突](../../observability/monitoring/#与现有的-prometheus-operator-相冲突) +- [如何更改监控数据保留期限](../../observability/monitoring/#如何更改监控数据保留期限) +- [kube-scheduler 和 kube-controller-manager 没有监控数据](../../observability/monitoring/#kube-scheduler-和-kube-controller-manager-没有监控数据) +- [近几分钟没有监控数据](../../observability/monitoring/#近几分钟没有监控数据) +- [节点和控制平面都没有监控数据](../../observability/monitoring/#节点和控制平面都没有监控数据) +- [Prometheus 产生错误日志:打开存储失败、没有此文件或目录](../../observability/monitoring/#prometheus-产生错误日志打开存储失败没有此文件或目录) + +## 如何访问 KubeSphere Prometheus 控制台 + +KubeSphere 监控引擎由 Prometheus 提供支持。出于调试目的,您可能希望通过 NodePort 访问内置的 Prometheus 服务,请运行以下命令将服务类型更改为 `NodePort`: + +```shell +kubectl edit svc -n kubesphere-monitoring-system prometheus-k8s +``` + +{{< notice note >}} + +若要访问 Prometheus 控制台,您可能需要根据您的环境开放相关端口并配置端口转发规则。 + +{{}} + +## Node Exporter 引起的主机端口 9100 冲突 + +如果有进程占用主机端口 9100,`kubespher-monitoring-system` 下的 Node Exporter 会崩溃。若要解决冲突,您需要终止进程或将 Node Exporter 换到另一个可用端口。 + +若要采用另一个主机端口(例如 `29100`),请运行以下命令将所有的 `9100` 替换为 `29100`(需要更改 5 处)。 + + ```shell + kubectl edit ds -n kubesphere-monitoring-system node-exporter + ``` + + ```shell + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: node-exporter + namespace: kubesphere-monitoring-system + ... + spec: + ... + template: + ... + spec: + containers: + - name: node-exporter + image: kubesphere/node-exporter:ks-v0.18.1 + args: + - --web.listen-address=127.0.0.1:9100 + ... + - name: kube-rbac-proxy + image: kubesphere/kube-rbac-proxy:v0.4.1 + args: + - --logtostderr + - --secure-listen-address=[$(IP)]:9100 + - --upstream=http://127.0.0.1:9100/ + ... + ports: + - containerPort: 9100 + hostPort: 9100 + ... + ``` + +## 与现有的 Prometheus Operator 相冲突 + +如果您已自行部署 Prometheus Operator,请确保在安装 KubeSphere 之前将 Prometheus Operator 删除。否则,可能会出现冲突,即 KubeSphere 内置的 Prometheus Operator 选择重复的 ServiceMonitor 对象。 + +## 如何更改监控数据保留期限 + +运行以下命令编辑最大保留期限。导航到 `retention` 字段,并设置所需保留期限(默认为 `7d`)。 + +```shell +kubectl edit prometheuses -n kubesphere-monitoring-system k8s +``` + +## kube-scheduler 和 kube-controller-manager 没有监控数据 + +首先,请确保标志 `--bind-address` 设置为 `0.0.0.0`(默认),而不是 `127.0.0.1`。Prometheus 可能需要从其他主机访问这些组件。 + +其次,请检查 `kube-scheduler` 和 `kube-controller-manager` 的端点对象是否存在。如果缺失,请通过创建服务和选择目标 Pod 手动创建。 + +```shell +kubectl get ep -n kube-system | grep -E 'kube-scheduler|kube-controller-manager' +``` + +## 近几分钟没有监控数据 + +请检查计算机浏览器的本地时钟是否与互联网时间以及您的集群同步,时差可能会导致该问题。如果您的计算机连接的是内联网,尤其可能会出现这种情况。 + +## 节点和控制平面都没有监控数据 + +请检查您的网络插件,并确保您的主机和 Pod 网络 CIDR 之间没有 IPPool 重叠。强烈建议您使用 [KubeKey](https://github.com/kubesphere/kubekey) 安装 Kubernetes。 + +中文读者可以参考 KubeSphere 开发者社区的[讨论](https://ask.kubesphere.io/forum/d/2027/16)了解更多信息。 + +## Prometheus 产生错误日志:打开存储失败、没有此文件或目录 + +如果 `kubesphere-monitoring-system` 中的 Prometheus Pod 崩溃并产生以下错误日志,您的 Prometheus 数据可能已经损坏,需要手动删除才能恢复。 + +```shell +level=error ts=2020-10-14T17:43:30.485Z caller=main.go:764 err="opening storage failed: block dir: \"/prometheus/01EM0016F8FB33J63RNHFMHK3\": open /prometheus/01EM0016F8FB33J63RNHFMHK3/meta.json: no such file or directory" +``` + +执行进入 Prometheus Pod(如果可能),并删除目录 `/prometheus/01EM0016F8FB33J63RNHFMHK3`: + +```shell +kubectl exec -it -n kubesphere-monitoring-system prometheus-k8s-0 -c prometheus sh + +rm -rf 01EM0016F8FB33J63RNHFMHK3/ +``` + +或者,您可以直接从绑定到 Prometheus PVC 的持久卷中删除该目录。 + diff --git a/content/zh/docs/v3.4/faq/upgrade/_index.md b/content/zh/docs/v3.4/faq/upgrade/_index.md new file mode 100644 index 000000000..cbbce981d --- /dev/null +++ b/content/zh/docs/v3.4/faq/upgrade/_index.md @@ -0,0 +1,7 @@ +--- +title: "升级" +keywords: 'Kubernetes, KubeSphere, 升级, FAQ' +description: '关于升级的常见问题' +layout: "second" +weight: 16200 +--- diff --git a/content/zh/docs/v3.4/faq/upgrade/qingcloud-csi-upgrade.md b/content/zh/docs/v3.4/faq/upgrade/qingcloud-csi-upgrade.md new file mode 100644 index 000000000..fb74b1767 --- /dev/null +++ b/content/zh/docs/v3.4/faq/upgrade/qingcloud-csi-upgrade.md @@ -0,0 +1,60 @@ +--- +title: "升级 QingCloud CSI" +keywords: "Kubernetes, 升级, KubeSphere, v3.3.2" +description: "升级 KubeSphere 后升级 QingCloud CSI。" +linkTitle: "升级 QingCloud CSI" +weight: 16210 +--- + +## 升级 KubeSphere 后升级 QingCloud CSI + +目前 QingCloud CSI 无法通过 KubeKey 升级。升级 KubeSphere 之后您可以运行以下命令手动升级 CSI: + +``` +git clone https://github.com/yunify/qingcloud-csi.git +``` + +``` +cd qingcloud-csi/ +``` + +``` +git checkout v1.1.1 +``` + +``` +kubectl delete -f deploy/disk/kubernetes/releases/qingcloud-csi-disk-v1.1.1.yaml +``` + +``` +kubectl delete sc csi-qingcloud +``` + +``` +helm repo add test https://charts.kubesphere.io/test +``` + +``` +helm install test/csi-qingcloud --name-template csi-qingcloud --namespace kube-system \ + --set config.qy_access_key_id=KEY,config.qy_secret_access_key=SECRET,config.zone=ZONE,sc.type=2 +``` + +等待 CSI 控制器和守护进程集启动并运行: + +``` +$ kubectl get po -n kube-system | grep csi +csi-qingcloud-controller-56979d46cb-qk9ck 5/5 Running 0 24h +csi-qingcloud-node-4s8n5 2/2 Running 0 24h +csi-qingcloud-node-65dqn 2/2 Running 0 24h +csi-qingcloud-node-khk49 2/2 Running 0 24h +csi-qingcloud-node-nz9q9 2/2 Running 0 24h +csi-qingcloud-node-pxr56 2/2 Running 0 24h +csi-qingcloud-node-whqhk 2/2 Running 0 24h +``` + +运行以下命令查看 CSI 镜像版本是否是 1.2.x: + +``` +$ kubectl get po -n kube-system csi-qingcloud-controller-56979d46cb-qk9ck -ojson | jq '.spec.containers[].image' | grep qingcloud +"csiplugin/csi-qingcloud:v1.2.0-rc.4" +``` diff --git a/content/zh/docs/v3.4/installing-on-kubernetes/_index.md b/content/zh/docs/v3.4/installing-on-kubernetes/_index.md new file mode 100644 index 000000000..543d6aa84 --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-kubernetes/_index.md @@ -0,0 +1,18 @@ +--- +title: 在 Kubernetes 上安装 KubeSphere +description: "演示如何在云或本地托管的现有 Kubernetes 集群上安装 KubeSphere" +layout: "second" + +linkTitle: "在 Kubernetes 上安装 KubeSphere" +weight: 4000 + +icon: "/images/docs/v3.3/docs.svg" +--- + +本章演示如何在云上或本地托管的现有 Kubernetes 集群上部署 KubeSphere。KubeSphere 为容器编排提供了高度灵活的解决方案,可以部署在多种 Kubernetes 引擎和服务上。 + +## 最受欢迎的页面 + +在下面的章节中,您将找到一些最受欢迎的页面。强烈建议您先参考它们。 + +{{< popularPage icon="/images/docs/v3.3/bitmap.jpg" title="基于 AWS EKS 安装 KubeSphere" description="在 EKS 上的现有 Kubernetes 集群上配置 KubeSphere。" link="../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks/" >}} diff --git a/content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/_index.md b/content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/_index.md new file mode 100644 index 000000000..7782ab48f --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "在托管 Kubernetes 上部署" +weight: 4200 + +_build: + render: false +--- \ No newline at end of file diff --git a/content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-ks-on-tencent-tke.md b/content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-ks-on-tencent-tke.md new file mode 100644 index 000000000..388185f36 --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-ks-on-tencent-tke.md @@ -0,0 +1,139 @@ +--- +title: "在腾讯云 TKE 安装 KubeSphere" +keywords: "kubesphere, kubernetes, docker, tencent, tke" +description: "介绍如何在腾讯云 TKE 上部署 KubeSphere。" + + +weight: 4270 +--- + +本指南将介绍如何在[腾讯云 TKE](https://cloud.tencent.com/document/product/457/6759) 上部署并使用 KubeSphere 3.3 平台。 + +## 腾讯云 TKE 环境准备 + +### 创建 Kubernetes 集群 +首先按使用环境的资源需求[创建 Kubernetes 集群](https://cloud.tencent.com/document/product/457/32189),满足以下一些条件即可(如已有环境并满足条件可跳过本节内容): + +- KubeSphere 3.3 默认支持的 Kubernetes 版本为 v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如需使用边缘节点,推荐安装 v1.21.x; +- 如果老集群版本不大于1.15.0,需要操作控制台先升级master节点然后升级node节点,依次升级至符合要求版本即可。 +- 工作节点机型配置规格方面选择 `标准型S5` 的 `4核|8GB` 配置即可,并按需扩展工作节点数量(通常生产环境需要 3 个及以上工作节点)。 + +### 创建公网 kubectl 证书 + +- 创建完集群后,进入 `容器服务` > `集群` 界面,选择刚创建的集群,在 `基本信息` 面板中, `集群APIServer信息` 中开启 `外网访问` 。 +- 然后在下方 `kubeconfig` 列表项中点击 `下载`,即可获取公用可用的 kubectl 证书。 + +![generate-kubeconfig.png](/images/docs/v3.3/tencent-tke/generate-kubeconfig.png) + +- 获取 kubectl 配置文件后,可通过 kubectl 命令行工具来验证集群连接: + +```bash +$ kubectl version +Client Version: version.Info{Major:"1", Minor:"18", GitVersion:"v1.18.4", GitCommit:"c96aede7b5205121079932896c4ad89bb93260af", GitTreeState:"clean", BuildDate:"2020-06-17T11:41:22Z", GoVersion:"go1.13.9", Compiler:"gc", Platform:"linux/amd64"} +Server Version: version.Info{Major:"1", Minor:"18+", GitVersion:"v1.18.4-tke.2", GitCommit:"f6b0517bc6bc426715a9ff86bd6aef39c81fd64a", GitTreeState:"clean", BuildDate:"2020-08-12T02:18:32Z", GoVersion:"go1.13.15", Compiler:"gc", Platform:"linux/amd64"} +``` + + +## KubeSphere 平台部署 + +### 通过 ks-installer 执行最小化部署 +接下来就可以使用 [ks-installer](https://github.com/kubesphere/ks-installer) 在已有的 Kubernetes 集群上来执行 KubeSphere 部署,建议首先还是以最小功能集进行安装。 + +- 使用 kubectl 执行以下命令安装 KubeSphere: + +```bash +kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml +``` + +- 下载集群配置文件 + +```bash +wget https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml +``` + + {{< notice tip >}} + +腾讯云创建云硬盘大小必须为 10 的倍数,且普通云硬盘/高性能云硬盘最小是 10G,SSD/HSSD 云硬盘最小是 20G。Kubernetes 集群创建完成后会自动创建好普通云硬盘的 StoragClass,这里示例将直接使用默认的普通云硬盘。 + + {{}} + +- 修改集群配置文件,PVC 修改为 10G 的倍数(1倍n倍都可以),其他可拔插组件如果开启也需要调整,开启哪个调整哪个即可,默认最小化安装未开启可插拔组件。 + +```bash +vim cluster-configuration.yaml +//默认值 + common: + mysqlVolumeSize: 20Gi # MySQL PVC size. + minioVolumeSize: 20Gi # Minio PVC size. + etcdVolumeSize: 20Gi # etcd PVC size. + openldapVolumeSize: 2Gi # openldap PVC size. + redisVolumSize: 2Gi # Redis PVC size. + +//修改后的值,PVC 为 10G 的倍数(1倍n倍都可以),其他可拔插组件如果开启也需要调整 + common: + mysqlVolumeSize: 20Gi # MySQL PVC size. + minioVolumeSize: 20Gi # Minio PVC size. + etcdVolumeSize: 20Gi # etcd PVC size. + openldapVolumeSize: 10Gi # openldap PVC size. + redisVolumSize: 10Gi # Redis PVC size. +``` + +- 然后执行以下命令部署: + +```bash +kubectl apply -f cluster-configuration.yaml +``` + + +- 执行以下命令查看部署日志,当日志输出如以下图片内容时则表示部署完成: + +```bash +kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f +``` + +![ks-install-log.png](/images/docs/v3.3/tencent-tke/ks-install-log.png) + +### 访问 KubeSphere 控制台 + +部署完成后,您可以通过以下步骤访问 KubeSphere 控制台。 + +#### NodePort 方式访问 + +- 在 `容器服务` > `集群` 界面中,选择创建好的集群,在 `节点管理` > `节点` 面板中,查看任意一个节点的 `公网 IP`(集群安装时默认会免费为每个节点绑定公网 IP)。 + +![nodeport.png](/images/docs/v3.3/tencent-tke/nodeport.png) + +- 由于服务安装时默认开启 NodePort 且端口为 30880,浏览器输入 `<公网 IP>:30880` ,并以默认帐户(用户名 `admin`,密码 `P@88w0rd`)即可登录控制台。 + +#### LoadBalancer 方式访问 + +- 在 `容器服务` > `集群` 界面中,选择创建好的集群,在 `服务与路由` > `service` 面板中,点击 `ks-console` 一行中 `更新访问方式`。 + +![loadbalancer1.png](/images/docs/v3.3/tencent-tke/loadbalancer1.png) + +- `服务访问方式` 选择 `提供公网访问`,`端口映射` 中 `服务端口` 填写您希望的端口号,点击 `更新访问方式`。 + +![loadbalancer2.png](/images/docs/v3.3/tencent-tke/loadbalancer2.png) + +- 此时界面您将会看到 LoadBalancer 公网 IP: + +![loadbalancer3.png](/images/docs/v3.3/tencent-tke/loadbalancer3.png) + +- 浏览器输入 `:<映射端口>`,并以默认帐户(用户名 `admin`,密码 `P@88w0rd`)即可登录控制台。 + + +{{< notice tip >}} + +若您用 admin 帐户无法登录控制台,界面显示 “Internal error occurred: account is not active” 且 `ks-controller-manager` pod 日志显示 “tls: bad certificate”,则需要更新一下 `ks-controller-manager` 的证书: + +```bash +kubectl apply -f https://raw.githubusercontent.com/kubesphere/ks-installer/2c4b479ec65110f7910f913734b3d069409d72a8/roles/ks-core/prepare/files/ks-init/users.iam.kubesphere.io.yaml +kubectl apply -f https://raw.githubusercontent.com/kubesphere/ks-installer/2c4b479ec65110f7910f913734b3d069409d72a8/roles/ks-core/prepare/files/ks-init/webhook-secret.yaml +kubectl -n kubesphere-system rollout restart deploy ks-controller-manager +``` + +{{}} + +### 通过 KubeSphere 开启附加组件 +以上示例演示了默认的最小安装过程,要在 KubeSphere 中启用其他组件,请参阅[启用可插拔组件](../../../pluggable-components/)。 +全部附加组件开启并安装成功后,进入集群管理界面,在**系统组件**区域可以看到已经开启的各个基础和附加组件。 diff --git a/content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-ack.md b/content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-ack.md new file mode 100644 index 000000000..6d688837d --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-ack.md @@ -0,0 +1,233 @@ +--- +title: "在阿里云 ACK 上安装 KubeSphere" +keywords: "kubesphere, kubernetes, docker, aliyun, ack" +description: "了解如何在阿里云容器服务 ACK 上部署 KubeSphere。" + + +weight: 4250 +--- + +本指南将介绍如何在[阿里云容器服务 ACK](https://www.aliyun.com/product/kubernetes/) 上部署并使用 KubeSphere 3.3 平台。 + +## 阿里云 ACK 环境准备 + +### 创建 Kubernetes 集群 + +首先按使用环境的资源需求创建 Kubernetes 集群,满足以下一些条件即可(如已有环境并满足条件可跳过本节内容): + +- KubeSphere 3.3 默认支持的 Kubernetes 版本为 v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如需使用边缘节点,推荐安装 v1.21.x; +- 需要确保 Kubernetes 集群所使用的 ECS 实例的网络正常工作,可以通过在创建集群的同时**自动创建**或**使用已有**弹性 IP;或者在集群创建后自行配置网络(如配置 [NAT 网关](https://www.aliyun.com/product/network/nat/)); +- 小规模场景下工作节点规格建议选择 `4核|8GB` 配置,不推荐`2核|4GB` ,并按需扩展工作节点数量(通常生产环境需要 3 个及以上工作节点),详情可参考[最佳实践- ECS 选型](https://help.aliyun.com/document_detail/98886.html)。 + +1.创建标准托管集群,转到导航菜单,然后参考下图创建集群,您可以使用集群模板快速创建标准托管集群: + +![ack-template](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-ack/ack-template.png) + +选择标准托管集群 + +![standard-template](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-ack/standard-template.png) + +2.在**配置集群**页面,配置以下集群信息: + +![create-ack-cluster](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-ack/create-ack-cluster.png) + +说明:配置集群名称、选择 Kubernetes 版本、容器运行时版本等。 + + + +3.指定专有网络,勾选为专有网络配置 SNAT 以及使用 EIP 暴露 API Server: + +![network-and-apiserver](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-ack/network-and-apiserver.png) + +4.配置 Worker 实例规格 + +![ack-worker-config](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-ack/ack-worker-config.png) + +指定实例操作系统类型,并为实例配置密码或秘钥 + +![ack-worker-password](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-ack/ack-worker-password.png) + +5.选择安装相关组件,完成后创建集群 + +![ack-components](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-ack/ack-components.png) + +{{< notice warning >}} + +由于阿里云 Prometheus node-exporter 端口与 KubeSphere 冲突,这里不勾选 Prometheus 监控服务, 不安装阿里云 Prometheus 组件。 + +{{}} + + + +6.等待集群创建完成,点击详情,查看集群信息 + +![ack-cluster](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-ack/ack-cluster.png) + + + +### 连接到 ACK 集群 + +- 创建完集群后,点击**集群信息** > **连接信息**界面,选择**公网访问**,复制下方 kubeconfig 信息到本地计算机,即可在本地连接到 ack 集群。 + + ![ack-kubeconfig](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-ack/ack-kubeconfig.png) + + + +也可以通过连接 CloudShell 管理集群,点击通过 CloudShell 管理集群,执行以下命令查看集群节点信息: + +```bash +shell@Alicloud:~$ kubectl get nodes -o wide +NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME +cn-shenzhen.192.168.0.35 Ready 15m v1.18.8-aliyun.1 192.168.0.35 CentOS Linux 7 (Core) 3.10.0-1127.19.1.el7.x86_64 docker://19.3.5 +cn-shenzhen.192.168.0.36 Ready 15m v1.18.8-aliyun.1 192.168.0.36 CentOS Linux 7 (Core) 3.10.0-1127.19.1.el7.x86_64 docker://19.3.5 +cn-shenzhen.192.168.0.37 Ready 15m v1.18.8-aliyun.1 192.168.0.37 CentOS Linux 7 (Core) 3.10.0-1127.19.1.el7.x86_64 docker://19.3.5 +``` + +## 部署 KubeSphere + +### 查看 StorageClass + +{{< notice note >}} + +您可以在阿里云容器服务 Kubernetes 集群中使用阿里云云盘存储卷。目前,阿里云 CSI 插件支持通过 PV/PVC 方式挂载云盘,包括静态存储卷和动态存储卷。 + +默认阿里云 ACK 已经为用户创建了不同规格的 StorageClass,可直接使用,但存在最小容量规格限制,详情参考[云盘存储卷使用说明](https://help.aliyun.com/document_detail/134767.html)。 + +{{}} + +连接到 cloudshell 查看 StorageClass 类型 + +```bash +shell@Alicloud:~$ kubectl get sc +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +alicloud-disk-available diskplugin.csi.alibabacloud.com Delete Immediate true 14m +alicloud-disk-efficiency diskplugin.csi.alibabacloud.com Delete Immediate true 14m +alicloud-disk-essd diskplugin.csi.alibabacloud.com Delete Immediate true 14m +alicloud-disk-ssd diskplugin.csi.alibabacloud.com Delete Immediate true 14m +alicloud-disk-topology diskplugin.csi.alibabacloud.com Delete WaitForFirstConsumer true 14m +``` + +容器服务 Kubernetes 版(ACK)集群默认提供了以下几种 StorageClass: + +- alicloud-disk-efficiency:高效云盘。 +- alicloud-disk-ssd:SSD 云盘。 +- alicloud-disk-essd:ESSD 云盘。 +- alicloud-disk-available:提供高可用选项,优先创建 SSD 云盘;如果 SSD 云盘售尽,则创建高效云盘。 +- alicloud-disk-topology:使用延迟绑定的方式创建云盘。 + +**指定默认 StorageClass ** + +本次使用 alicloud-disk-efficiency,注意申请高效云盘时申请的 PV 大小不得小于20G。 + +```bash +kubectl patch sc alicloud-disk-efficiency -p '{"metadata": {"annotations": {"storageclass.beta.kubernetes.io/is-default-class": "true"}}}' +``` + +确认配置成功 + +```bash +shell@Alicloud:~$ kubectl get sc +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +alicloud-disk-available diskplugin.csi.alibabacloud.com Delete Immediate true 30m +alicloud-disk-efficiency (default) diskplugin.csi.alibabacloud.com Delete Immediate true 30m +alicloud-disk-essd diskplugin.csi.alibabacloud.com Delete Immediate true 30m +alicloud-disk-ssd diskplugin.csi.alibabacloud.com Delete Immediate true 30m +alicloud-disk-topology diskplugin.csi.alibabacloud.com Delete WaitForFirstConsumer true 30m +``` + + + +### 最小化部署 kubesphere + +1.使用 [ks-installer](https://github.com/kubesphere/ks-installer) 在已有的 Kubernetes 集群上来部署 KubeSphere,下载 YAML 文件: + +``` +wget https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml +wget https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml +``` + + + +{{< notice warning >}} + +由于阿里云 ACK 高效磁盘最小申请大小为20G,所以挂载的 PV 申请的容量大小不能小于该值,参考以下配置修改 cluster-configuration.yaml。 + +{{}} + + + +部分存储卷 VolumeSize 小于20G,需要手动调整: + +```bash +shell@Alicloud:~$ cat cluster-configuration.yaml | grep Volum + mysqlVolumeSize: 20Gi # MySQL PVC size. + minioVolumeSize: 20Gi # Minio PVC size. + etcdVolumeSize: 20Gi # etcd PVC size. + openldapVolumeSize: 2Gi # openldap PVC size. + redisVolumSize: 2Gi # Redis PVC size. + elasticsearchMasterVolumeSize: 4Gi # Volume size of Elasticsearch master nodes. + elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes. + jenkinsVolumeSize: 8Gi # Jenkins volume size. + prometheusVolumeSize: 20Gi # Prometheus PVC size. +``` + +编辑 cluster-configuration.yaml 文件,调整 `openldapVolumeSize、redisVolumSize、elasticsearchMasterVolumeSize、jenkinsVolumeSize` 4 个卷大小为 20G。 + +执行以下命令部署 kubesphere: + +```bash +kubectl apply -f kubesphere-installer.yaml +kubectl apply -f cluster-configuration.yaml +``` + +2.检查安装日志: + +```bash +kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f +``` + +3.安装完成后,您会看到以下消息: + +```yaml +##################################################### +### Welcome to KubeSphere! ### +##################################################### +Account: admin +Password: P@88w0rd +NOTES: + 1. After logging into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are ready. + 2. Please modify the default password after login. +##################################################### +https://kubesphere.io 2020-xx-xx xx:xx:xx +``` + +## 访问 KubeSphere 控制台 + +现在已经安装了 KubeSphere,您可以按照以下步骤访问 KubeSphere 的 Web 控制台。 + +- 切换到 kubesphere-system 命名空间,选择服务,选择 ks-console 点击更新 。 + +- 将 service 类型 `NodePort` 更改为 `LoadBalancer` ,完成后点击更新。 + + ![ack-lb](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-ack/ack-lb.png) + +- 获取您的 EXTERNAL-IP。 + + ![ack-lb-ip](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-ack/ack-lb-ip.png) + +- 使用 ACK 生成的 external-ip 访问 KubeSphere 的 Web 控制台, 默认帐户和密码(`admin/P@88w0rd`)。 + + +## 启用可插拔组件(可选) + +上面的示例演示了默认的最小安装过程,要在 KubeSphere 中启用其他组件,请参阅[启用可插拔组件](../../../pluggable-components/)。 + +{{< notice warning >}} + +由于阿里云 ACK 已经在 kube-system 命名空间部署 Metrics-server,请勿开启 KubeSphere metrics-server 插件,否则部署失败。 + +{{}} diff --git a/content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks.md b/content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks.md new file mode 100644 index 000000000..e670bb02c --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks.md @@ -0,0 +1,154 @@ +--- +title: "在 Azure AKS 上部署 KubeSphere" +keywords: "KubeSphere, Kubernetes, 安装, Azure, AKS" +description: "了解如何在 Azure Kubernetes 服务上部署 KubeSphere。" + +weight: 4210 +--- + +本文演示在 [Azure Kubernetes Service](https://docs.microsoft.com/en-us/azure/aks/) 上部署 KubeSphere 的步骤。 + +## 准备 AKS 集群 + +Azure 可以通过提供自动化部署资源功能从而实现基础设施即代码的能力,常用的工具包括 [ARM templates](https://docs.microsoft.com/en-us/azure/azure-resource-manager/templates/overview) 和 [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/what-is-azure-cli?view=azure-cli-latest)。在本指南中,我们将使用 Azure CLI 创建安装 KubeSphere 所需的所有资源。 + +### 使用 Azure Cloud Shell + +由于 Azure 提供了基于 Web 的终端,因此您不必在计算机上安装 Azure CLI。单击 Azure 门户右上角菜单栏上的 Cloud Shell 按钮。 + +![Cloud Shell](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks/aks-launch-icon.png) + +选择 **Bash** Shell。 + +![Bash Shell](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks/aks-choices-bash.png) + +### 创建资源组 + +Azure 资源组是在其中部署和管理 Azure 资源的逻辑组。以下示例在`westus`区域中创建一个名为`KubeSphereRG`的资源组。 + +```bash +az group create --name KubeSphereRG --location westus +``` + +### 创建一个 AKS 集群 + +使用`az aks create`命令创建 AKS 集群。以下示例创建一个名为`KuberSphereCluster`的集群,该集群具有三个节点,需要等待几分钟完成。 + +```bash +az aks create --resource-group KubeSphereRG --name KuberSphereCluster --node-count 3 --enable-addons monitoring --generate-ssh-keys +``` + +{{< notice note >}} + +您可以使用`--node-vm-size`或`-s`选项来更改 Kubernetes 节点的大小,默认值是 Standard_DS2_v2(2v CPU,7GB 内存)。有关更多选项,请参见 [az aks create](https://docs.microsoft.com/en-us/cli/azure/aks?view=azure-cli-latest#az-aks-create)。 + +{{}} + +### 连接集群 + +为了能够使用 kubectl 操作该 Kubernetes 集群,需要执行`az aks get-credentials`命令,该命令下载 Kubernetes CLI 将要使用到的凭据和配置。 + +```bash +az aks get-credentials --resource-group KubeSphereRG --name KuberSphereCluster +``` + +查看节点信息 + +```bash +$ kubectl get nodes +NAME STATUS ROLES AGE VERSION +aks-nodepool1-27194461-vmss000000 Ready agent 77s v1.17.13 +aks-nodepool1-27194461-vmss000001 Ready agent 63s v1.17.13 +aks-nodepool1-27194461-vmss000002 Ready agent 65s v1.17.13 +``` + +### 在门户中检查 Azure 资源 + +执行完以上所有命令后,您可以看到在 Azure Portal 中创建了 2 个资源组。 + +![Resource groups](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks/aks-create-command.png) + +查看资源组详情 + +```bash +$ az group show --resource-group KubeSphereRG +{ + "id": "/subscriptions/6017690f-c286-4a8f-123e-c53e2f3bc7b5/resourceGroups/KubeSphereRG", + "location": "westus", + "managedBy": null, + "name": "KubeSphereRG", + "properties": { + "provisioningState": "Succeeded" + }, + "tags": null, + "type": "Microsoft.Resources/resourceGroups" +} +``` + +Azure Kubernetes Services 本身将放置在`KubeSphereRG`中。 + +![Azure Kubernetes Services](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks/aks-dashboard.png) + +所有其他资源都将放置在`MC_KubeSphereRG_KuberSphereCluster_westus`中,例如 VM,负载均衡器和虚拟网络。 + +![Azure Kubernetes Services](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks/aks-all-resources.png) + +## 在 AKS 上部署 KubeSphere + +请使用以下命令开始部署 KubeSphere。 + +```bash +kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + +kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml +``` + +可以通过以下命令检查安装日志: + +```bash +kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f +``` + +## 访问 KubeSphere 控制台 + +要从公共 IP 地址访问 KubeSphere 控制台,需要将服务类型更改为`LoadBalancer`。 + +```bash +kubectl edit service ks-console -n kubesphere-system +``` + +找到以下部分,并将类型更改为`LoadBalancer`。 + +```yaml +spec: + clusterIP: 10.0.78.113 + externalTrafficPolicy: Cluster + ports: + - name: nginx + nodePort: 30880 + port: 80 + protocol: TCP + targetPort: 8000 + selector: + app: ks-console + tier: frontend + version: v3.0.0 + sessionAffinity: None + type: LoadBalancer # Change NodePort to LoadBalancer +status: + loadBalancer: {} +``` + +保存 ks-console 服务的配置后,可以使用以下命令获取公共 IP 地址(在下方 EXTERNAL-IP)。 + +```bash +$ kubectl get svc/ks-console -n kubesphere-system +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +ks-console LoadBalancer 10.0.181.93 13.86.xxx.xxx 80:30194/TCP 13m 6379/TCP 10m +``` + +使用 external-ip 地址用默认帐户和密码(admin/P@88w0rd)访问控制台。 + +## 启用可插拔组件(可选) + +上面的示例演示了默认的最小安装过程,对于可插拔组件,可以在安装之前或之后启用它们。有关详细信息,请参见[启用可插拔组件](../../../pluggable-components/)。 diff --git a/content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do.md b/content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do.md new file mode 100644 index 000000000..65d99ee96 --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do.md @@ -0,0 +1,116 @@ +--- +title: "在 DigitalOcean 上部署 KubeSphere" +keywords: 'Kubernetes, KubeSphere, DigitalOcean, 安装' +description: '了解如何在 DigitalOcean 上部署 KubeSphere。' + +weight: 4230 +--- + +![KubeSphere+DOKS](/images/docs/v3.3/do/KubeSphere-DOKS.png) + +本指南将介绍在 [DigitalOcean Kubernetes](https://www.digitalocean.com/products/kubernetes/) 上部署 KubeSphere 的步骤。 + +## 准备一个 DOKS 集群 + +在 DO 上创建一个标准的 Kubernetes 集群是安装 KubeSphere 的前提条件。登录您的 [DO account](https://cloud.digitalocean.com/) 帐户,然后在导航菜单中,参考下图创建集群。 + +![create-cluster-do](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do/create-cluster-do.png) + +您需要选择: + +1. Kubernetes 版本(例如 1.18.6-do.0) +2. 数据中心区域(例如 `Frankfurt`) +3. VPC 网络(例如 default-fra1) +4. 集群规模(例如 2 个标准节点,每个节点具有 2 个 vCPU 和 4GB 内存) +5. 集群名称(例如 kubesphere-3) + +![config-cluster-do-1](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do/config-cluster-do-1.png) + +![config-cluster-do-2](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do/config-cluster-do-2.png) + +{{< notice note >}} + +- 如需在 Kubernetes 上安装 KubeSphere 3.3,您的 Kubernetes 版本必须为:v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如需使用边缘节点,推荐安装 v1.21.x。 +- 此示例中包括 3 个节点。您可以根据自己的需求添加更多节点,尤其是在生产环境中。 +- 机器类型 Standard/4 GB/2 vCPU 仅用于最小化安装的,如果您计划启用多个可插拔组件或将集群用于生产,建议将节点升级到规格更大的类型(例如,CPU-Optimized /8 GB /4 vCPUs)。DigitalOcean 是基于工作节点类型来配置主节点,而对于标准节点,API server 可能会很快会变得无响应。 + +{{}} + +集群准备就绪后,您可以下载 kubectl 的配置文件。 + +![download-config-file](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do/download-config-file.png) + +## 在 DOKS 上安装 KubeSphere + +现在集群已准备就绪,您可以按照以下步骤安装 KubeSphere: + +- 使用 kubectl 安装 KubeSphere,以下命令仅用于默认的最小安装。 + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml + ``` + +- 检查安装日志: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + +安装完成后,您会看到以下消息: + +```yaml +##################################################### +### Welcome to KubeSphere! ### +##################################################### +Console: http://10.XXX.XXX.XXX:30880 +Account: admin +Password: P@88w0rd +NOTES: + 1. After logging into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are ready. + 2. Please modify the default password after login. +##################################################### +https://kubesphere.io 2020-xx-xx xx:xx:xx +``` + +## 访问 KubeSphere 控制台 + +现在已经安装了 KubeSphere,可以按照以下步骤访问 KubeSphere 的 Web 控制台。 + +- 转到 DigitalOcean 提供的 Kubernetes 仪表板。 + + ![kubernetes-dashboard-access](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do/kubernetes-dashboard-access.png) + +- 下拉选择 **kubesphere-system** 命名空间 + + ![kubernetes-dashboard-namespace](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do/kubernetes-dashboard-namespace.png) + +- 在 **Service** -> **Services** 中,编辑 **ks-console** 服务。 + + ![kubernetes-dashboard-edit](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do/kubernetes-dashboard-edit.png) + +- 将类型从`NodePort`更改为`LoadBalancer`,完成后更新文件。 + + ![lb-change](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do/lb-change.png) + +- 使用 DO 生成的端点访问 KubeSphere 的 Web 控制台。 + + ![access-console](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do/access-console.png) + + {{< notice tip >}} + + 除了将服务类型更改为`LoadBalancer`,您还可以通过`NodeIP:NodePort`(服务类型设置为`NodePort`)访问 KubeSphere 控制台,这种方式需要获取任意节点的公共 IP。 + + {{}} + +- 使用默认帐户和密码(`admin/P@88w0rd`)登录控制台。 + + +## 启用可插拔组件(可选) + +上面的示例演示了默认的最小安装过程,要在 KubeSphere 中启用其他组件,请参阅[启用可插拔组件](../../../pluggable-components/)。 diff --git a/content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks.md b/content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks.md new file mode 100644 index 000000000..90873d6cb --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks.md @@ -0,0 +1,214 @@ +--- +title: "在 AWS EKS 上部署 KubeSphere" +keywords: 'Kubernetes, KubeSphere, EKS, 安装' +description: '了解如何在 Amazon Elastic Kubernetes Service 上部署 KubeSphere。' + +weight: 4220 +--- + +本指南将介绍如何在 [AWS EKS](https://docs.aws.amazon.com/eks/latest/userguide/what-is-eks.html) 上部署 KubeSphere。您也可以通过 [KubeSphere on AWS Quick Start](https://aws.amazon.com/quickstart/architecture/qingcloud-kubesphere/) 在 AWS 上自动部署 EKS 和 KubeSphere。 + +## 安装 AWS CLI + +AWS EKS 没有像 GKE CloudShell 这样的 Web 终端,因此我们必须先安装 aws cli。下面以 linux 为例,macOS 和其他操作系统可参考 [EKS 入门](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html)。 + +```shell +curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" +unzip awscliv2.zip +sudo ./aws/install +``` + +使用`aws --version`检查安装版本。 + +```shell +$ aws --version +aws-cli/2.1.2 Python/3.7.3 Linux/4.18.0-193.6.3.el8_2.x86_64 exe/x86_64.centos.8 +``` + +## 准备 EKS 集群 + +1. 在 AWS 上创建一个标准的 Kubernetes 集群是安装 KubeSphere 的前提条件,转到导航菜单,然后参考下图创建集群。 + + ![create-cluster-eks](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks/eks-launch-icon.png) + +2. 在**配置集群**页面,配置以下集群信息: + + ![config-cluster-page](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks/config-cluster-page.png) + + 配置选项说明: + + - 名称:集群的唯一名称。 + - Kubernetes 版本:指定创建集群的 Kubernetes 版本。 + - 集群服务角色:选择通过[创建 Amazon EKS 集群 IAM 角色](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html#role-create)创建的 IAM 角色。 + - Secrets 加密(可选):选择使用 AWS 密钥管理服务(AWS KMS)启用 Kubernetes secrets 的信封加密。如果启用信封加密,Kubernetes secrets 将使用您选择的客户主密钥(CMK)进行加密。CMK 必须是对称的,在与集群相同的区域中创建,如果 CMK 是在不同的帐户中创建的,则用户必须有权访问 CMK。有关详细信息,请在 AWS 密钥管理服务开发人员指南中参阅 [允许其他帐户中的用户使用CMK](https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-modifying-external-accounts.html)。 + - 使用 AWS KMS CM 进行 Kubernetes 秘钥加密需要 Kubernetes 1.13 或更高版本。如果密钥不存在,则必须先创建一个。有关更多信息,请参见[创建密钥](https://docs.aws.amazon.com/kms/latest/developerguide/create-keys.html)。 + - 标签(可选):将所有标签添加到您的集群。有关更多信息,请参阅[标记 Amazon EKS 资源](https://docs.aws.amazon.com/eks/latest/userguide/eks-using-tags.html)。 + +3. 选择下一步,在**指定联网**页面上,为以下字段选择值: + + ![network](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks/networking.png) + + - VPC:您之前在[创建 Amazon EKS 集群 VPC](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html#vpc-create) 中创建的 VPC,您可以在下拉列表中找到 VPC 的名称。 + - 子网:默认情况下,上一字段中指定的 VPC 中的可用子网是预选的。选择您不想承载集群资源的任何子网,例如工作节点或负载均衡器。 + - 安全组:通过[创建 Amazon EKS 集群 VPC](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html#vpc-create) 中生成的 AWS CloudFormation 输出的 SecurityGroups 值。该安全组在下拉名称中具有`ControlPlaneSecurityGroup`。 + + - 对于集群`endpoints`访问–选择以下选项之一: + + ![endpoints](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks/endpoints.png) + + - 公有:仅启用对集群的 Kubernetes API server 端点的公共访问,来自集群 VPC 外部的 Kubernetes API 请求使用这个公共端点。默认情况下,允许从任何源 IP 地址进行访问,您也可以只允许一个或多个 CIDR 地址段访问,例如选择**高级设置**,然后选择**添加源**指定地址段 192.168.0.0/16 才能访问。 + - 私有:仅启用对集群的 Kubernetes API server 端点的专用访问。来自集群 VPC 内部的 Kubernetes API 请求使用这个私有 VPC 端点。 + + {{< notice note >}} + 如果创建的 VPC 没有出站 Internet 访问,则必须启用私有访问。 + {{}} + + - 公有和私有:启用公有和私有访问。 + +4. 选择下一步,在**配置日志记录**页面上,可以选择要启用的日志类型。默认情况下,每种日志类型均为**禁用**。有关更多信息,请参阅[Amazon EKS 控制平面日志记录](https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html)。 + ![logging](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks/logging.png) + +5. 选择下一步,在**查看和创建**页面上,查看在之前页面上输入或选择的信息。如果需要更改任何选择,请选择**编辑**。对设置满意后,选择**创建**,状态字段将显示**正在创建**,直到集群创建完毕。 + ![revies](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks/review.png) + + - 有关先前选项的更多信息,请参阅[修改集群端点访问](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html#modify-endpoint-access)。集群配置完成后(通常在10到15分钟之间),请记下 API server 端点和证书颁发机构值,这些将在您的 kubectl 配置中使用。 + ![creating](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks/creating.png) + +6. 点击**添加节点组**,在此集群中定义 3 个节点。 + + ![node-group](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks/node-group.png) + +7. 配置节点组,注意创建[节点角色](https://docs.aws.amazon.com/eks/latest/userguide/create-node-role.html)。 + + ![config-node-group](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks/config-node-grop.png) + + {{< notice note >}} + +- 如需在 Kubernetes 上安装 KubeSphere 3.3,您的 Kubernetes 版本必须为:v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如需使用边缘节点,推荐安装 v1.21.x 版本。 +- 此示例中包括 3 个节点。您可以根据自己的需求添加更多节点,尤其是在生产环境中。 +- t3.medium(2 个 vCPU,4 GB 内存)机器类型仅用于最小化安装,如果要启用可插拔组件或集群用于生产,请选择具有更大规格的机器类型。 +- 对于其他设置,您也可以根据自己的需要进行更改,也可以使用默认值。 + + {{}} + +1. 当 EKS 集群准备就绪时,您可以使用 kubectl 连接到集群。 + +## 配置 kubectl + +我们将使用 kubectl 命令行工具与集群 API Server 进行通信。首先需要获取刚刚创建的 EKS 集群的 kubeconfig。 + +1. 配置您的 AWS CLI 凭证 + + ```shell + $ aws configure + AWS Access Key ID [None]: AKIAIOSFODNN7EXAMPLE + AWS Secret Access Key [None]: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY + Default region name [None]: region-code + Default output format [None]: json + ``` + +2. 使用 AWS CLI 创建 kubeconfig 文件 + + ```shell + aws eks --region us-west-2 update-kubeconfig --name cluster_name + ``` + + - 默认情况下,生成的配置文件在主目录中的默认 kubeconfig 路径(`.kube/config`)中创建,或与该位置处的现有 kubeconfig 合并。您可以使用`--kubeconfig`选项指定其他路径。 + + - 您可以使用`--role-arn`选项指定 IAM 角色 ARN,以在执行 kubectl 命令时用于身份验证。否则,将使用默认 AWS CLI 或 SDK 证书链中的 IAM 实体。您可以通过运行`aws sts get-caller-identity`命令查看默认的 AWS CLI 或 SDK 身份。 + + 有关更多信息,请参阅带有 aws eks update-kubeconfig help 命令的帮助页面,或参阅[AWS CLI命令参考](https://docs.aws.amazon.com/eks/latest/userguide/security_iam_id-based-policy-examples.html)中的update-kubeconfig。 + +3. 测试您的配置。 + + ```shell + kubectl get svc + ``` + +## 在 EKS 上安装 KubeSphere + +- 使用 kubectl 安装 KubeSphere,以下命令仅用于默认的最小安装。 + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml + ``` + +- 检查安装日志: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + +- 安装完成后,您会看到以下消息: + + ```yaml + ##################################################### + ### Welcome to KubeSphere! ### + ##################################################### + Account: admin + Password: P@88w0rd + NOTES: + 1. After logging into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are ready. + 2. Please modify the default password after login. + ##################################################### + https://kubesphere.io 2020-xx-xx xx:xx:xx + ``` + +## 访问 KubeSphere 控制台 + +现在已经安装了 KubeSphere,您可以按照以下步骤访问 KubeSphere 的 Web 控制台。 + +- 查看 ks-console 服务。 + + ```shell + kubectl get svc -n kubesphere-system + ``` + +- 执行`kubectl edit ks-console`将 service 类型`NodePort` 更改为`LoadBalancer`,完成后保存文件。 + + ```shell + # kubectl edit svc ks-console -n kubesphere-system + ...... + spec: + clusterIP: 10.100.160.240 + externalTrafficPolicy: Cluster + ports: + - name: nginx + nodePort: 30880 + port: 80 + protocol: TCP + targetPort: 8000 + selector: + app: ks-console + tier: frontend + version: v3.0.0 + sessionAffinity: None + type: LoadBalancer + ``` + +- 执行`kubectl get svc -n kubesphere-system`获取您的 EXTERNAL-IP。 + + ```shell + # kubectl get svc -n kubesphere-system + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + ks-apiserver ClusterIP 10.100.108.212 80/TCP 6m28s + ks-console LoadBalancer 10.100.160.240 ad107c54ee456744c91c8da0b9321f2c-1235661477.ap-east-1.elb.amazonaws.com 80:30880/TCP 6m25s + ks-controller-manager ClusterIP 10.100.126.96 443/TCP 6m28s + openldap ClusterIP None 389/TCP 6m54s + redis ClusterIP 10.100.218.34 6379/TCP 6m59s + ``` + +- 使用 EKS 生成的 external-ip 访问 KubeSphere 的 Web 控制台。 + +- 使用默认帐户和密码(`admin/P@88w0rd`)登录控制台。 + + +## 启用可插拔组件(可选) + +上面的示例演示了默认的最小安装过程,要在 KubeSphere 中启用其他组件,请参阅[启用可插拔组件](../../../pluggable-components/)。 diff --git a/content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke.md b/content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke.md new file mode 100644 index 000000000..d2897ab8b --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke.md @@ -0,0 +1,107 @@ +--- +title: "在 Google GKE 上部署 KubeSphere" +keywords: 'Kubernetes, KubeSphere, GKE, 安装' +description: '了解如何在 Google Kubernetes Engine 上部署 KubeSphere。' + +weight: 4240 +--- + +![KubeSphere+GKE](https://pek3b.qingstor.com/kubesphere-docs/png/20191123145223.png) + +本指南将演示如何在 [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/) 上部署 KubeSphere。 + +## 准备一个 GKE 集群 + +- 在 GKE 上创建一个标准的 Kubernetes 集群是安装 KubeSphere 的前提条件,转到导航菜单然后参考下图创建集群。 + + ![create-cluster-gke](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke/create-cluster-gke.png) + +- 在**集群基本信息**中,选择一个主版本,指定 Kubernetes 静态版本。 + + ![select-master](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke/master-version.png) + +- 在 **default-pool** 下的**节点池详情**中,在此集群中定义 3 个节点。 + + ![node-number](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke/node-number.png) + +- 转到**节点数**,选择映像类型,然后设置如下机器配置。完成后,点击**创建**。 + + ![machine-config](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke/machine-configuration.png) + + {{< notice note >}} + +- 如需在 Kubernetes 上安装 KubeSphere 3.3,您的 Kubernetes 版本必须为:v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如需使用边缘节点,推荐安装 v1.21.x 版本。 +- 此示例中包括3个节点,可以根据自己的需求添加更多节点,尤其是在生产环境中。 +- 最小安装的机器类型为 e2-medium(2 个 vCPU,4GB 内存)。如果要启用可插拔组件或将集群用于生产,请选择具有更高配置的机器类型。 +- 对于其他设置,可以根据自己的需要进行更改,也可以使用默认值。 + + {{}} + +- 当 GKE 集群准备就绪时,可以使用 Cloud Shell 连接到集群。 + + ![cloud-shell-gke](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke/cloud-shell.png) + +## 在 GKE上安装 KubeSphere + +- 使用 kubectl 安装 KubeSphere,以下命令仅用于默认的最小安装。 + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml + ``` + +- 检查安装日志: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + +- 安装完成后,会看到以下消息: + + ```yaml + ##################################################### + ### Welcome to KubeSphere! ### + ##################################################### + Console: http://10.128.0.44:30880 + Account: admin + Password: P@88w0rd + NOTES: + 1. After logging into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are ready. + 2. Please modify the default password after login. + ##################################################### + https://kubesphere.io 2020-xx-xx xx:xx:xx + ``` + +## 访问 KubeSphere 控制台 + +现在已经安装了 KubeSphere,您可以按照以下步骤访问 KubeSphere 的 Web 控制台。 + +- 在 **Services 和 Ingress** 选项中, 选择 **ks-console** 服务. + + ![ks-console](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke/console-service.png) + +- 在**服务详细信息**中,单击**修改**,然后将服务类型从`NodePort`更改为`LoadBalancer`,完成后保存文件。 + + ![lb-change](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke/lb-change.png) + +- 使用 GKE 生成的端点访问 KubeSphere 的 Web 控制台。 + + ![access-console](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke/access-console.png) + + {{< notice tip >}} + +除了将服务类型更改为`LoadBalancer`,还可以通过`NodeIP:NodePort`(服务类型设置为 `NodePort`)访问 KubeSphere 控制台,注意需要在防火墙规则中打开 30880 端口。 + + {{}} + +- 使用默认帐户和密码(`admin/P@88w0rd`)登录控制台。 + + +## 启用可插拔组件(可选) + +上面的示例演示了默认的最小安装过程,要在KubeSphere中启用其他组件,请参阅[启用可插拔组件](../../../pluggable-components/)。 diff --git a/content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-huaweicloud-cce.md b/content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-huaweicloud-cce.md new file mode 100644 index 000000000..a4f491cf8 --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-huaweicloud-cce.md @@ -0,0 +1,113 @@ +--- +title: "在华为云 CCE 上安装 KubeSphere" +keywords: "kubesphere, kubernetes, docker, huawei, cce" +description: "了解如何在华为云容器引擎上部署 KubeSphere。" + + +weight: 4250 +--- + +本指南将介绍如果在[华为云 CCE 容器引擎](https://support.huaweicloud.com/cce/)上部署并使用 KubeSphere 3.3 平台。 + +## 华为云 CCE 环境准备 + +### 创建 Kubernetes 集群 + +首先按使用环境的资源需求创建 Kubernetes 集群,满足以下一些条件即可(如已有环境并满足条件可跳过本节内容): + +- 如需在 Kubernetes 上安装 KubeSphere 3.3,您的 Kubernetes 版本必须为:v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如需使用边缘节点,推荐安装 v1.21.x 版本。 +- 需要确保 Kubernetes 集群所使用的云主机的网络正常工作,可以通过在创建集群的同时**自动创建**或**使用已有**弹性 IP;或者在集群创建后自行配置网络(如配置 [NAT 网关](https://support.huaweicloud.com/natgateway/))。 +- 工作节点规格建议选择 `s3.xlarge.2` 的 `4核|8GB` 配置,并按需扩展工作节点数量(通常生产环境需要 3 个及以上工作节点)。 + +### 创建公网 kubectl 证书 + +- 创建完集群后,进入**资源管理** > **集群管理**界面,在**基本信息** > **网络** 面板中,绑定`公网apiserver地址`; +- 在右侧面板中,选择 **kubectl** 标签页,并在**下载kubectl配置文件**列表项中**点击此处下载**,即可获取公用可用的 kubectl 证书。 + +![生成 Kubectl 配置文件](/images/docs/v3.3/huawei-cce/zh/generate-kubeconfig.png) + +获取 kubectl 配置文件后,可通过 kubectl 命令行工具来验证集群连通性: + +```bash +$ kubectl version +Client Version: version.Info{Major:"1", Minor:"18", GitVersion:"v1.18.8", GitCommit:"9f2892aab98fe339f3bd70e3c470144299398ace", GitTreeState:"clean", BuildDate:"2020-08-15T10:08:56Z", GoVersion:"go1.14.7", Compiler:"gc", Platform:"darwin/amd64"} +Server Version: version.Info{Major:"1", Minor:"17+", GitVersion:"v1.17.9-r0-CCE20.7.1.B003-17.36.3", GitCommit:"136c81cf3bd314fcbc5154e07cbeece860777e93", GitTreeState:"clean", BuildDate:"2020-08-08T06:01:28Z", GoVersion:"go1.13.9", Compiler:"gc", Platform:"linux/amd64"} +``` + +## 部署 KubeSphere + +### 创建自定义 StorageClass + +{{< notice note >}} + +由于华为 CCE 自带的 Everest CSI 组件所提供的 StorageClass `csi-disk` 默认指定的是 SATA 磁盘(即普通 I/O 磁盘),但实际创建的 Kubernetes 集群所配置的磁盘基本只有 SAS(高 I/O)和 SSD (超高 I/O),因此建议额外创建对应的 StorageClass(并设定为默认)以方便后续部署使用。参见官方文档 - [使用 kubectl 创建云硬盘](https://support.huaweicloud.com/usermanual-cce/cce_01_0044.html#section7)。 +以下示例展示如何创建一个 SAS(高 I/O)磁盘对应的 StorageClass: + +{{}} + +```yaml +# csi-disk-sas.yaml +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "true" + storageclass.kubesphere.io/support-snapshot: "false" + name: csi-disk-sas +parameters: + csi.storage.k8s.io/csi-driver-name: disk.csi.everest.io + csi.storage.k8s.io/fstype: ext4 + # 绑定华为 “高I/O” 磁盘,如需 “超高I/O“ 则此值改为 SSD + everest.io/disk-volume-type: SAS + everest.io/passthrough: "true" +provisioner: everest-csi-provisioner +allowVolumeExpansion: true +reclaimPolicy: Delete +volumeBindingMode: Immediate +``` + +关于如何设定/取消默认 StorageClass,可参考 Kubernetes 官方文档 - [改变默认 StorageClass](https://kubernetes.io/zh/docs/tasks/administer-cluster/change-default-storage-class/)。 + +### 通过 ks-installer 执行最小化部署 + +接下来就可以使用 [ks-installer](https://github.com/kubesphere/ks-installer) 在已有的 Kubernetes 集群上来部署 KubeSphere,建议首先还是以最小功能集进行安装,可执行以下命令: + +```bash +kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml +kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml +``` + +执行部署命令后,可以通过进入**工作负载** > **容器组 Pod** 界面,在右侧面板中查询 `kubesphere-system` 命名空间下的 Pod 运行状态了解 KubeSphere 平台最小功能集的部署状态;通过该命名空间下 `ks-console-xxxx` 容器的状态来了解 KubeSphere 控制台应用的可用状态。 + +![部署 KubeSphere 最小功能集](/images/docs/v3.3/huawei-cce/zh/deploy-ks-minimal.png) + +### 开启 KubeSphere 外网访问 + +通过 `kubesphere-system` 命名空间下的 Pod 运行状态确认 KubeSphere 基础组件都已进入运行状态后,我们需要为 KubeSphere 控制台开启外网访问。 + +进入**资源管理** > **网络**,在右侧面板中选择 `ks-console` 更改网络访问方式,建议选用 `负载均衡(LoadBalancer` 访问方式(需绑定弹性公网 IP),配置完成后如下图: + +![开启 KubeSphere 外网访问](/images/docs/v3.3/huawei-cce/zh/expose-ks-console.png) + +服务细节配置基本上选用默认选项即可,当然也可以按需进行调整: + +![为 KubeSphere 控制台配置负载均衡访问](/images/docs/v3.3/huawei-cce/zh/edit-ks-console-svc.png) + +通过负载均衡绑定公网访问后,即可使用给定的访问地址进行访问,进入到 KubeSphere 的登录界面并使用默认帐户(用户名 `admin`,密码 `P@88w0rd`)即可登录平台。 + +### 通过 KubeSphere 开启附加组件 + +上面的示例演示了默认的最小安装过程,要在 KubeSphere 中启用其他组件,请参阅[启用可插拔组件](../../../pluggable-components/)。 + +{{< notice warning >}} + +在开启 Istio 组件之前,由于定制资源定义(CRD)冲突的问题,需要先删除华为 CCE 自带的 `applications.app.k8s.io` ,最直接的方式是通过 kubectl 工具来完成: + +```bash +kubectl delete crd applications.app.k8s.io +``` + +{{}} + +全部附加组件开启并安装成功后,进入集群管理界面,在**系统组件** 区域可以看到已经开启的各个基础和附加组件。 diff --git a/content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke.md b/content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke.md new file mode 100644 index 000000000..0b90f1fa8 --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke.md @@ -0,0 +1,142 @@ +--- +title: "在 Oracle OKE 上部署 KubeSphere" +keywords: 'Kubernetes, KubeSphere, OKE, 安装, Oracle-cloud' +description: '了解如何在 Oracle Cloud Infrastructure Container Engine 上部署 KubeSphere。' + +weight: 4260 +--- + +本文演示在 [Oracle Kubernetes Engine](https://www.oracle.com/cn/cloud/compute/container-engine-kubernetes.html) 上部署 KubeSphere 的步骤。 + +## 创建 Kubernetes 集群 + +1. 在 OKE 上创建一个标准的 Kubernetes 集群是安装 KubeSphere 的前提条件。在导航栏中,请参考下图创建集群。 + + ![创建集群](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke/创建集群.jpg) + +2. 在弹出窗口中,选择**快速创建**并点击**启动工作流**。 + + ![快速创建](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke/快速创建.jpg) + + {{< notice note >}} +本示例演示**快速创建**,Oracle Cloud 通过此模式会为集群自动创建所必需的资源。如果您选择**定制创建**,您需要自己创建所有资源(例如 VCN 和负载均衡器子网)。 + {{}} + +3. 接下来,您需要为集群设置基本信息(可参考以下图例)。完成后,请点击**下一步**。 + + ![集群基本信息](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke/集群基本信息.jpg) + + {{< notice note >}} + +- 如需在 Kubernetes 上安装 KubeSphere 3.3,您的 Kubernetes 版本必须为:v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如需使用边缘节点,推荐安装 v1.21.x 版本。 +- 建议您在**可见性类型**中选择**公共**,即每个节点会分配到一个公共 IP 地址,此地址之后可用于访问 KubeSphere Web 控制台。 +- 在 Oracle Cloud 中,**配置**定义了一个实例会分配到的 CPU 和内存等资源量,本示例使用 `VM.Standard.E2.2 (2 CPUs and 16G Memory)`。有关更多信息,请参见 [Standard Shapes](https://docs.cloud.oracle.com/en-us/iaas/Content/Compute/References/computeshapes.htm#vmshapes__vm-standard)。 +- 本示例包含 3 个节点,可以根据需求自行添加节点(尤其是生产环境)。 + + {{}} + +1. 检查集群信息,确认无需修改后点击**创建集群**。 + + ![完成创建集群](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke/完成创建集群.jpg) + +2. 集群创建后,点击**关闭**。 + + ![集群创建完成](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke/集群创建完成.jpg) + +3. 确保集群状态为**活动**后,点击**访问集群**。 + + ![访问集群](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke/访问集群.jpg) + +4. 在弹出窗口中,选择 **Cloud Shell 访问权限**。点击**启动 Cloud Shell**,并将 Oracle Cloud 所提供的命令复制到 Cloud Shell。 + + ![启动Cloud-shell](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke/启动Cloud-shell.jpg) + +5. 在 Cloud Shell 中,粘贴该命令以便之后可以执行 KubeSphere 安装命令。 + + ![cloud-shell-oke](/images/docs/v3.3/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke/cloud-shell-oke.jpg) + + {{< notice warning >}}如果不在 Cloud Shell 中执行该命令,您无法继续进行以下操作。 + + {{}} + +## 在 OKE 上安装 KubeSphere + +1. 使用 kubectl 安装 KubeSphere。直接输入以下命令会默认执行 KubeSphere 的最小化安装。 + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml + ``` + +2. 检查安装日志: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + +3. 安装完成后会输出以下信息: + + ```yaml + ##################################################### + ### Welcome to KubeSphere! ### + ##################################################### + + Console: http://10.0.10.2:30880 + Account: admin + Password: P@88w0rd + + NOTES: + 1. After logging into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are ready. + 2. Please modify the default password after login. + + ##################################################### + https://kubesphere.io 20xx-xx-xx xx:xx:xx + ``` + +## 访问 KubeSphere 控制台 + +KubeSphere 安装完成后,您可以通过 `NodePort` 或 `LoadBalancer` 的模式访问 KubeSphere 的 Web 控制台。 + +1. 通过以下命令查看 KubeSphere 控制台的服务状态。 + + ```bash + kubectl get svc -n kubesphere-system + ``` + +2. 输出如下,将类型修改为 `LoadBalancer`,从而暴露外部 IP 地址。 + + ![console-nodeport](https://ap3.qingstor.com/kubesphere-website/docs/nodeport-console.jpg) + + {{< notice tip >}} +在上图中,`ks-console` 服务通过 `NodePort` 的类型暴露,即您可以通过 `NodeIP:NodePort` 的方式直接访问 Web 控制台(任意节点的公共 IP 都可用),值得注意的是需要在防火墙中提前开启端口 30880。 + {{}} + +3. 执行以下命令编辑服务配置。 + + ```bash + kubectl edit svc ks-console -o yaml -n kubesphere-system + ``` + +4. 将 `type` 字段所对应的值修改为 `LoadBalancer`,然后保存配置。 + + ![change-svc-type](https://ap3.qingstor.com/kubesphere-website/docs/change-service-type.png) + +5. 再次执行以下命令,可以看到 IP 地址现已暴露(如下图)。 + + ```bash + kubectl get svc -n kubesphere-system + ``` + + ![console-service](https://ap3.qingstor.com/kubesphere-website/docs/console-service.png) + +6. 访问此外部 IP 地址并通过默认的帐户和密码 (`admin/P@88w0rd`) 登录 Web 控制台。在**集群管理**页面,您可以看到集群概览。 + + +## 启用可插拔组件(可选) + +上面的示例演示了默认的最小安装过程,要在 KubeSphere 中启用其他组件,请参阅[启用可插拔组件](../../../pluggable-components/)。 diff --git a/content/zh/docs/v3.4/installing-on-kubernetes/introduction/_index.md b/content/zh/docs/v3.4/installing-on-kubernetes/introduction/_index.md new file mode 100644 index 000000000..3b9a38111 --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-kubernetes/introduction/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "安装说明" +weight: 4100 + +_build: + render: false +--- \ No newline at end of file diff --git a/content/zh/docs/v3.4/installing-on-kubernetes/introduction/overview.md b/content/zh/docs/v3.4/installing-on-kubernetes/introduction/overview.md new file mode 100644 index 000000000..2f579b4d8 --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-kubernetes/introduction/overview.md @@ -0,0 +1,65 @@ +--- +title: "概述" +keywords: "KubeSphere, Kubernetes, 安装" +description: "了解在已有 Kubernetes 集群上部署 KubeSphere 的一般步骤。" + +linkTitle: "概述" +weight: 4110 +--- + +![KubeSphere+K8s](https://pek3b.qingstor.com/kubesphere-docs/png/20191123144507.png) + +KubeSphere 承诺为用户提供即插即用架构,您可以轻松地将 KubeSphere 安装在现有的 Kubernetes 集群上。更具体地说,KubeSphere 既可以部署于托管在云端(例如 AWS EKS、青云QingCloud QKE 和 Google GKE 等)的 Kubernetes 服务上,也可以部署在本地 Kubernetes 集群上。这是因为 KubeSphere 不会侵入 Kubernetes,它仅与 Kubernetes API 交互,以管理 Kubernetes 集群资源。换句话说,KubeSphere 可以安装在任何原生 Kubernetes 集群和 Kubernetes 发行版上。 + +本节概述了在 Kubernetes 上安装 KubeSphere 的一般步骤。有关在不同环境中特定安装方式的更多信息,请参见在托管 Kubernetes 上安装和在本地 Kubernetes 上安装。 + +{{< notice note >}} + +在现有 Kubernetes 集群上安装 KubeSphere 之前,请参阅[准备工作](../prerequisites/)。 + +{{}} + +## 视频演示 + + + +## 部署 KubeSphere + +确保现有的 Kubernetes 集群满足所有要求之后,您可以使用 kubectl 以默认最小安装包来安装 KubeSphere。 + +1. 执行以下命令以开始安装: + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml + ``` + +2. 检查安装日志: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + +3. 使用 `kubectl get pod --all-namespaces` 查看所有 Pod 在 KubeSphere 相关的命名空间是否正常运行。如果是正常运行,请通过以下命令来检查控制台的端口(默认为 30880): + + ```bash + kubectl get svc/ks-console -n kubesphere-system + ``` + +4. 确保在安全组中打开了 30880 端口,通过 NodePort (`IP:30880`) 使用默认帐户和密码 (`admin/P@88w0rd`) 访问 Web 控制台。 + + +## 启用可插拔组件(可选) + +如果您使用默认的最小化安装,请参考[启用可插拔组件](../../../pluggable-components/)来安装其他组件。 + +{{< notice tip >}} + +- 您可以在 KubeSphere 安装之前或之后启用可插拔组件。请参考示例文件 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/blob/master/deploy/cluster-configuration.yaml) 获取更多详细信息。 +- 请确保集群中有足够的 CPU 和内存。 +- 强烈建议安装这些可插拔组件,以体验 KubeSphere 提供的全栈功能。 + +{{}} diff --git a/content/zh/docs/v3.4/installing-on-kubernetes/introduction/prerequisites.md b/content/zh/docs/v3.4/installing-on-kubernetes/introduction/prerequisites.md new file mode 100644 index 000000000..534161afe --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-kubernetes/introduction/prerequisites.md @@ -0,0 +1,51 @@ +--- +title: "准备工作" +keywords: "KubeSphere, Kubernetes, 安装, 准备工作" +description: "确保现有 Kubernetes 集群运行所在的环境满足部署 KubeSphere 的前提条件。" +linkTitle: "准备工作" +weight: 4120 +--- + + + +您可以在虚拟机和裸机上安装 KubeSphere,并同时配置 Kubernetes。另外,只要 Kubernetes 集群满足以下前提条件,那么您也可以在云托管和本地 Kubernetes 集群上部署 KubeSphere。 + +- 如需在 Kubernetes 上安装 KubeSphere 3.3,您的 Kubernetes 版本必须为:v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如需使用边缘节点,推荐安装 v1.21.x。 +- 可用 CPU > 1 核;内存 > 2 G。CPU 必须为 x86_64,暂时不支持 Arm 架构的 CPU。 +- Kubernetes 集群已配置**默认** StorageClass(请使用 `kubectl get sc` 进行确认)。 +- 使用 `--cluster-signing-cert-file` 和 `--cluster-signing-key-file` 参数启动集群时,kube-apiserver 将启用 CSR 签名功能。请参见 [RKE 安装问题](https://github.com/kubesphere/kubesphere/issues/1925#issuecomment-591698309)。 + +## 预检查 + +1. 在集群节点中运行 `kubectl version`,确保 Kubernetes 版本可兼容。输出如下所示: + + ```bash + $ kubectl version + Client Version: version.Info{Major:"1", Minor:"19", GitVersion:"v1.19.8", GitCommit:"fd5d41537aee486160ad9b5356a9d82363273721", GitTreeState:"clean", BuildDate:"2021-02-17T12:41:51Z", GoVersion:"go1.15.8", Compiler:"gc", Platform:"linux/amd64"} + Server Version: version.Info{Major:"1", Minor:"19", GitVersion:"v1.19.8", GitCommit:"fd5d41537aee486160ad9b5356a9d82363273721", GitTreeState:"clean", BuildDate:"2021-02-17T12:33:08Z", GoVersion:"go1.15.8", Compiler:"gc", Platform:"linux/amd64"} + ``` + + {{< notice note >}} +请注意 `Server Version` 这一行。如果 `GitVersion` 显示为旧版本,则需要先升级 Kubernetes。 + {{}} + +2. 检查集群中的可用资源是否满足最低要求。 + + ```bash + $ free -g + total used free shared buff/cache available + Mem: 16 4 10 0 3 2 + Swap: 0 0 0 + ``` + +3. 检查集群中是否有**默认** StorageClass(准备默认 StorageClass 是安装 KubeSphere 的前提条件)。 + + ```bash + $ kubectl get sc + NAME PROVISIONER AGE + glusterfs (default) kubernetes.io/glusterfs 3d4h + ``` + +如果 Kubernetes 集群环境满足上述所有要求,那么您就可以在现有的 Kubernetes 集群上部署 KubeSphere 了。 + +有关更多信息,请参见[概述](../overview/)。 diff --git a/content/zh/docs/v3.4/installing-on-kubernetes/on-prem-kubernetes/_index.md b/content/zh/docs/v3.4/installing-on-kubernetes/on-prem-kubernetes/_index.md new file mode 100644 index 000000000..024df05de --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-kubernetes/on-prem-kubernetes/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "在本地 Kubernetes 上安装" +weight: 4300 + +_build: + render: false +--- diff --git a/content/zh/docs/v3.4/installing-on-kubernetes/on-prem-kubernetes/install-ks-on-linux-airgapped.md b/content/zh/docs/v3.4/installing-on-kubernetes/on-prem-kubernetes/install-ks-on-linux-airgapped.md new file mode 100644 index 000000000..0eea2cde7 --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-kubernetes/on-prem-kubernetes/install-ks-on-linux-airgapped.md @@ -0,0 +1,411 @@ +--- +title: "离线安装" +keywords: 'Kubernetes, KubeSphere, 离线, 安装' +description: '了解在离线环境中安装 KubeSphere 的最佳实践。' + +linkTitle: "离线安装" +weight: 4310 +--- + +离线安装几乎与在线安装相同,不同之处是您必须创建一个本地仓库来托管 Docker 镜像。本教程演示了如何在离线环境中将 KubeSphere 安装到 Kubernetes 上。 + +开始下方步骤之前,请先参阅[准备工作](../../../installing-on-kubernetes/introduction/prerequisites/)。 + +## 步骤 1:准备一个私有镜像仓库 + +您可以使用 Harbor 或者其他任意私有镜像仓库。本教程以 Docker 仓库作为示例,并使用[自签名证书](https://docs.docker.com/registry/insecure/#use-self-signed-certificates)(如果您有自己的私有镜像仓库,可以跳过这一步)。 + +### 使用自签名证书 + +1. 执行以下命令生成您自己的证书: + + ```bash + mkdir -p certs + ``` + + ```bash + openssl req \ + -newkey rsa:4096 -nodes -sha256 -keyout certs/domain.key \ + -x509 -days 36500 -out certs/domain.crt + ``` + +2. 当您生成自己的证书时,请确保在字段 `Common Name` 中指定一个域名。例如,本示例中该字段被指定为 `dockerhub.kubekey.local`。 + + ![自签名证书](/images/docs/v3.3/zh-cn/installing-on-kubernetes/installing-on-on-premises-kubernetes/air-gapped-installation/self-signed-cert.jpg) + +### 启动 Docker 仓库 + +执行以下命令启动 Docker 仓库: + +``` +docker run -d \ + --restart=always \ + --name registry \ + -v "$(pwd)"/certs:/certs \ + -v /mnt/registry:/var/lib/registry \ + -e REGISTRY_HTTP_ADDR=0.0.0.0:443 \ + -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \ + -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \ + -p 443:443 \ + registry:2 +``` + +{{< notice note >}} + +Docker 使用 `/var/lib/docker` 作为默认路径来存储所有 Docker 相关文件(包括镜像)。建议您添加附加存储卷,分别给 `/var/lib/docker` 和 `/mnt/registry` 挂载至少 **100G**。请参见 [fdisk](https://www.computerhope.com/unix/fdisk.htm) 的参考命令。 + +{{}} + +### 配置仓库 + +1. 在 `/etc/hosts` 中添加一个条目,将主机名(即仓库域名;在本示例中是 `dockerhub.kubekey.local`)映射到您机器的私有 IP 地址,如下所示。 + + ```bash + # docker registry + 192.168.0.2 dockerhub.kubekey.local + ``` + +2. 执行以下命令,复制证书到指定目录,并使 Docker 信任该证书。 + + ```bash + mkdir -p /etc/docker/certs.d/dockerhub.kubekey.local + ``` + + ```bash + cp certs/domain.crt /etc/docker/certs.d/dockerhub.kubekey.local/ca.crt + ``` + + {{< notice note >}} + + 证书的路径与域名相关联。当您复制路径时,如果与上面设置的路径不同,请使用实际域名。 + + {{}} + +3. 要验证私有仓库是否有效,您可以先复制一个镜像到您的本地机器,然后使用 `docker push` 和 `docker pull` 来测试。 + +## 步骤 2:准备安装镜像 + +当您在离线环境中安装 KubeSphere 时,需要事先准备一个包含所有必需镜像的镜像包。 + +1. 使用以下命令从能够访问互联网的机器上下载镜像清单文件 `images-list.txt`: + + ```bash + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/images-list.txt + ``` + + {{< notice note >}} + + 该文件根据不同的模块列出了 `##+modulename` 下的镜像。您可以按照相同的规则把自己的镜像添加到这个文件中。若需要查看完整文件,请参见[附录](#附录)。 + + {{}} + +2. 下载 `offline-installation-tool.sh`。 + + ```bash + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/offline-installation-tool.sh + ``` + +3. 使 `.sh` 文件可执行。 + + ```bash + chmod +x offline-installation-tool.sh + ``` + +4. 您可以执行命令 `./offline-installation-tool.sh -h` 来查看如何使用脚本: + + ```bash + root@master:/home/ubuntu# ./offline-installation-tool.sh -h + Usage: + + ./offline-installation-tool.sh [-l IMAGES-LIST] [-d IMAGES-DIR] [-r PRIVATE-REGISTRY] [-v KUBERNETES-VERSION ] + + Description: + -b : save kubernetes' binaries. + -d IMAGES-DIR : the dir of files (tar.gz) which generated by `docker save`. default: ./kubesphere-images + -l IMAGES-LIST : text file with list of images. + -r PRIVATE-REGISTRY : target private registry:port. + -s : save model will be applied. Pull the images in the IMAGES-LIST and save images as a tar.gz file. + -v KUBERNETES-VERSION : download kubernetes' binaries. default: v1.17.9 + -h : usage message + ``` + +5. 在 `offline-installation-tool.sh` 中拉取镜像。 + + ```bash + ./offline-installation-tool.sh -s -l images-list.txt -d ./kubesphere-images + ``` + + {{< notice note >}} + + 您可以根据需要选择拉取的镜像。例如,如果已经有一个 Kubernetes 集群了,您可以在 `images-list.text` 中删除 `##k8s-images` 和在它下面的相关镜像。 + + {{}} + +## 步骤 3:推送镜像至私有仓库 + +将打包的镜像文件传输至您的本地机器,并运行以下命令把它推送至仓库。 + +```bash +./offline-installation-tool.sh -l images-list.txt -d ./kubesphere-images -r dockerhub.kubekey.local +``` + +{{< notice note >}} + +命令中的域名是 `dockerhub.kubekey.local`。请确保使用您**自己仓库的地址**。 + +{{}} + +## 步骤 4:下载部署文件 + +与在现有 Kubernetes 集群上在线安装 KubeSphere 相似,您也需要事先下载 `cluster-configuration.yaml` 和 `kubesphere-installer.yaml`。 + +1. 执行以下命令下载这两个文件,并将它们传输至您充当任务机的机器,用于安装。 + + ```bash + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + ``` + +2. 编辑 `cluster-configuration.yaml` 添加您的私有镜像仓库。例如,本教程中的仓库地址是 `dockerhub.kubekey.local`,将它用作 `.spec.local_registry` 的值,如下所示: + + ```yaml + spec: + persistence: + storageClass: "" + authentication: + jwtSecret: "" + local_registry: dockerhub.kubekey.local # Add this line manually; make sure you use your own registry address. + ``` + + {{< notice note >}} + + 您可以在该 YAML 文件中启用可插拔组件,体验 KubeSphere 的更多功能。有关详情,请参考[启用可插拔组件](../../../pluggable-components/)。 + + {{}} + +3. 编辑完成后保存 `cluster-configuration.yaml`。使用以下命令将 `ks-installer` 替换为您**自己仓库的地址**。 + + ```bash + sed -i "s#^\s*image: kubesphere.*/ks-installer:.*# image: dockerhub.kubekey.local/kubesphere/ks-installer:v3.0.0#" kubesphere-installer.yaml + ``` + + {{< notice warning >}} + + 命令中的仓库地址是 `dockerhub.kubekey.local`。请确保使用您自己仓库的地址。 + + {{}} + + +## 步骤 5:开始安装 + +确定完成上面所有步骤后,您可以执行以下命令。 + +```bash +kubectl apply -f kubesphere-installer.yaml +kubectl apply -f cluster-configuration.yaml +``` + +## 步骤 6:验证安装 + +安装完成后,您会看到以下内容: + +```bash +##################################################### +### Welcome to KubeSphere! ### +##################################################### + +Console: http://192.168.0.2:30880 +Account: admin +Password: P@88w0rd + +NOTES: + 1. After logging into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are ready. + 2. Please modify the default password after login. + +##################################################### +https://kubesphere.io 20xx-xx-xx xx:xx:xx +##################################################### +``` + +现在,您可以通过 `http://{IP}:30880` 使用默认帐户和密码 `admin/P@88w0rd` 访问 KubeSphere 的 Web 控制台。 + +{{< notice note >}} + +要访问控制台,请确保在您的安全组中打开端口 30880。 + +{{}} + +## 附录 + +### KubeSphere 3.3 镜像清单 + +```txt +##k8s-images +registry.cn-beijing.aliyuncs.com/kubesphereio/kube-apiserver:v1.23.10 +registry.cn-beijing.aliyuncs.com/kubesphereio/kube-controller-manager:v1.23.10 +registry.cn-beijing.aliyuncs.com/kubesphereio/kube-proxy:v1.23.10 +registry.cn-beijing.aliyuncs.com/kubesphereio/kube-scheduler:v1.23.10 +registry.cn-beijing.aliyuncs.com/kubesphereio/kube-apiserver:v1.24.3 +registry.cn-beijing.aliyuncs.com/kubesphereio/kube-controller-manager:v1.24.3 +registry.cn-beijing.aliyuncs.com/kubesphereio/kube-proxy:v1.24.3 +registry.cn-beijing.aliyuncs.com/kubesphereio/kube-scheduler:v1.24.3 +registry.cn-beijing.aliyuncs.com/kubesphereio/kube-apiserver:v1.22.12 +registry.cn-beijing.aliyuncs.com/kubesphereio/kube-controller-manager:v1.22.12 +registry.cn-beijing.aliyuncs.com/kubesphereio/kube-proxy:v1.22.12 +registry.cn-beijing.aliyuncs.com/kubesphereio/kube-scheduler:v1.22.12 +registry.cn-beijing.aliyuncs.com/kubesphereio/kube-apiserver:v1.21.14 +registry.cn-beijing.aliyuncs.com/kubesphereio/kube-controller-manager:v1.21.14 +registry.cn-beijing.aliyuncs.com/kubesphereio/kube-proxy:v1.21.14 +registry.cn-beijing.aliyuncs.com/kubesphereio/kube-scheduler:v1.21.14 +registry.cn-beijing.aliyuncs.com/kubesphereio/pause:3.7 +registry.cn-beijing.aliyuncs.com/kubesphereio/pause:3.6 +registry.cn-beijing.aliyuncs.com/kubesphereio/pause:3.5 +registry.cn-beijing.aliyuncs.com/kubesphereio/pause:3.4.1 +registry.cn-beijing.aliyuncs.com/kubesphereio/coredns:1.8.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/coredns:1.8.6 +registry.cn-beijing.aliyuncs.com/kubesphereio/cni:v3.23.2 +registry.cn-beijing.aliyuncs.com/kubesphereio/kube-controllers:v3.23.2 +registry.cn-beijing.aliyuncs.com/kubesphereio/node:v3.23.2 +registry.cn-beijing.aliyuncs.com/kubesphereio/pod2daemon-flexvol:v3.23.2 +registry.cn-beijing.aliyuncs.com/kubesphereio/typha:v3.23.2 +registry.cn-beijing.aliyuncs.com/kubesphereio/flannel:v0.12.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/provisioner-localpv:2.10.1 +registry.cn-beijing.aliyuncs.com/kubesphereio/linux-utils:2.10.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/haproxy:2.3 +registry.cn-beijing.aliyuncs.com/kubesphereio/nfs-subdir-external-provisioner:v4.0.2 +registry.cn-beijing.aliyuncs.com/kubesphereio/k8s-dns-node-cache:1.15.12 +##kubesphere-images +registry.cn-beijing.aliyuncs.com/kubesphereio/ks-installer:v3.3.2 +registry.cn-beijing.aliyuncs.com/kubesphereio/ks-apiserver:v3.3.2 +registry.cn-beijing.aliyuncs.com/kubesphereio/ks-console:v3.3.2 +registry.cn-beijing.aliyuncs.com/kubesphereio/ks-controller-manager:v3.3.2 +registry.cn-beijing.aliyuncs.com/kubesphereio/ks-upgrade:v3.3.2 +registry.cn-beijing.aliyuncs.com/kubesphereio/kubectl:v1.22.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/kubectl:v1.21.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/kubectl:v1.20.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/kubefed:v0.8.1 +registry.cn-beijing.aliyuncs.com/kubesphereio/tower:v0.2.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/minio:RELEASE.2019-08-07T01-59-21Z +registry.cn-beijing.aliyuncs.com/kubesphereio/mc:RELEASE.2019-08-07T23-14-43Z +registry.cn-beijing.aliyuncs.com/kubesphereio/snapshot-controller:v4.0.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/nginx-ingress-controller:v1.1.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/defaultbackend-amd64:1.4 +registry.cn-beijing.aliyuncs.com/kubesphereio/metrics-server:v0.4.2 +registry.cn-beijing.aliyuncs.com/kubesphereio/redis:5.0.14-alpine +registry.cn-beijing.aliyuncs.com/kubesphereio/haproxy:2.0.25-alpine +registry.cn-beijing.aliyuncs.com/kubesphereio/alpine:3.14 +registry.cn-beijing.aliyuncs.com/kubesphereio/openldap:1.3.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/netshoot:v1.0 +##kubeedge-images +registry.cn-beijing.aliyuncs.com/kubesphereio/cloudcore:v1.9.2 +registry.cn-beijing.aliyuncs.com/kubesphereio/iptables-manager:v1.9.2 +registry.cn-beijing.aliyuncs.com/kubesphereio/edgeservice:v0.2.0 +##gatekeeper-images +registry.cn-beijing.aliyuncs.com/kubesphereio/gatekeeper:v3.5.2 +##openpitrix-images +registry.cn-beijing.aliyuncs.com/kubesphereio/openpitrix-jobs:v3.3.2 +##kubesphere-devops-images +registry.cn-beijing.aliyuncs.com/kubesphereio/devops-apiserver:ks-v3.3.2 +registry.cn-beijing.aliyuncs.com/kubesphereio/devops-controller:ks-v3.3.2 +registry.cn-beijing.aliyuncs.com/kubesphereio/devops-tools:ks-v3.3.2 +registry.cn-beijing.aliyuncs.com/kubesphereio/ks-jenkins:v3.3.0-2.319.1 +registry.cn-beijing.aliyuncs.com/kubesphereio/inbound-agent:4.10-2 +registry.cn-beijing.aliyuncs.com/kubesphereio/builder-base:v3.2.2 +registry.cn-beijing.aliyuncs.com/kubesphereio/builder-nodejs:v3.2.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/builder-maven:v3.2.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/builder-maven:v3.2.1-jdk11 +registry.cn-beijing.aliyuncs.com/kubesphereio/builder-python:v3.2.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/builder-go:v3.2.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/builder-go:v3.2.2-1.16 +registry.cn-beijing.aliyuncs.com/kubesphereio/builder-go:v3.2.2-1.17 +registry.cn-beijing.aliyuncs.com/kubesphereio/builder-go:v3.2.2-1.18 +registry.cn-beijing.aliyuncs.com/kubesphereio/builder-base:v3.2.2-podman +registry.cn-beijing.aliyuncs.com/kubesphereio/builder-nodejs:v3.2.0-podman +registry.cn-beijing.aliyuncs.com/kubesphereio/builder-maven:v3.2.0-podman +registry.cn-beijing.aliyuncs.com/kubesphereio/builder-maven:v3.2.1-jdk11-podman +registry.cn-beijing.aliyuncs.com/kubesphereio/builder-python:v3.2.0-podman +registry.cn-beijing.aliyuncs.com/kubesphereio/builder-go:v3.2.0-podman +registry.cn-beijing.aliyuncs.com/kubesphereio/builder-go:v3.2.2-1.16-podman +registry.cn-beijing.aliyuncs.com/kubesphereio/builder-go:v3.2.2-1.17-podman +registry.cn-beijing.aliyuncs.com/kubesphereio/builder-go:v3.2.2-1.18-podman +registry.cn-beijing.aliyuncs.com/kubesphereio/s2ioperator:v3.2.1 +registry.cn-beijing.aliyuncs.com/kubesphereio/s2irun:v3.2.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/s2i-binary:v3.2.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/tomcat85-java11-centos7:v3.2.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/tomcat85-java11-runtime:v3.2.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/tomcat85-java8-centos7:v3.2.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/tomcat85-java8-runtime:v3.2.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/java-11-centos7:v3.2.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/java-8-centos7:v3.2.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/java-8-runtime:v3.2.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/java-11-runtime:v3.2.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/nodejs-8-centos7:v3.2.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/nodejs-6-centos7:v3.2.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/nodejs-4-centos7:v3.2.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/python-36-centos7:v3.2.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/python-35-centos7:v3.2.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/python-34-centos7:v3.2.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/python-27-centos7:v3.2.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/argocd:v2.3.3 +registry.cn-beijing.aliyuncs.com/kubesphereio/argocd-applicationset:v0.4.1 +registry.cn-beijing.aliyuncs.com/kubesphereio/dex:v2.30.2 +registry.cn-beijing.aliyuncs.com/kubesphereio/redis:6.2.6-alpine +##kubesphere-monitoring-images +registry.cn-beijing.aliyuncs.com/kubesphereio/configmap-reload:v0.5.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/prometheus:v2.34.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/prometheus-config-reloader:v0.55.1 +registry.cn-beijing.aliyuncs.com/kubesphereio/prometheus-operator:v0.55.1 +registry.cn-beijing.aliyuncs.com/kubesphereio/kube-rbac-proxy:v0.11.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/kube-state-metrics:v2.5.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/node-exporter:v1.3.1 +registry.cn-beijing.aliyuncs.com/kubesphereio/alertmanager:v0.23.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/thanos:v0.25.2 +registry.cn-beijing.aliyuncs.com/kubesphereio/grafana:8.3.3 +registry.cn-beijing.aliyuncs.com/kubesphereio/kube-rbac-proxy:v0.8.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/notification-manager-operator:v1.4.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/notification-manager:v1.4.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/notification-tenant-sidecar:v3.2.0 +##kubesphere-logging-images +registry.cn-beijing.aliyuncs.com/kubesphereio/elasticsearch-curator:v5.7.6 +registry.cn-beijing.aliyuncs.com/kubesphereio/elasticsearch-oss:6.8.22 +registry.cn-beijing.aliyuncs.com/kubesphereio/fluentbit-operator:v0.13.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/docker:19.03 +registry.cn-beijing.aliyuncs.com/kubesphereio/fluent-bit:v1.8.11 +registry.cn-beijing.aliyuncs.com/kubesphereio/log-sidecar-injector:1.1 +registry.cn-beijing.aliyuncs.com/kubesphereio/filebeat:6.7.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/kube-events-operator:v0.4.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/kube-events-exporter:v0.4.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/kube-events-ruler:v0.4.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/kube-auditing-operator:v0.2.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/kube-auditing-webhook:v0.2.0 +##istio-images +registry.cn-beijing.aliyuncs.com/kubesphereio/pilot:1.11.1 +registry.cn-beijing.aliyuncs.com/kubesphereio/proxyv2:1.11.1 +registry.cn-beijing.aliyuncs.com/kubesphereio/jaeger-operator:1.27 +registry.cn-beijing.aliyuncs.com/kubesphereio/jaeger-agent:1.27 +registry.cn-beijing.aliyuncs.com/kubesphereio/jaeger-collector:1.27 +registry.cn-beijing.aliyuncs.com/kubesphereio/jaeger-query:1.27 +registry.cn-beijing.aliyuncs.com/kubesphereio/jaeger-es-index-cleaner:1.27 +registry.cn-beijing.aliyuncs.com/kubesphereio/kiali-operator:v1.38.1 +registry.cn-beijing.aliyuncs.com/kubesphereio/kiali:v1.38 +##example-images +registry.cn-beijing.aliyuncs.com/kubesphereio/busybox:1.31.1 +registry.cn-beijing.aliyuncs.com/kubesphereio/nginx:1.14-alpine +registry.cn-beijing.aliyuncs.com/kubesphereio/wget:1.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/hello:plain-text +registry.cn-beijing.aliyuncs.com/kubesphereio/wordpress:4.8-apache +registry.cn-beijing.aliyuncs.com/kubesphereio/hpa-example:latest +registry.cn-beijing.aliyuncs.com/kubesphereio/fluentd:v1.4.2-2.0 +registry.cn-beijing.aliyuncs.com/kubesphereio/perl:latest +registry.cn-beijing.aliyuncs.com/kubesphereio/examples-bookinfo-productpage-v1:1.16.2 +registry.cn-beijing.aliyuncs.com/kubesphereio/examples-bookinfo-reviews-v1:1.16.2 +registry.cn-beijing.aliyuncs.com/kubesphereio/examples-bookinfo-reviews-v2:1.16.2 +registry.cn-beijing.aliyuncs.com/kubesphereio/examples-bookinfo-details-v1:1.16.2 +registry.cn-beijing.aliyuncs.com/kubesphereio/examples-bookinfo-ratings-v1:1.16.3 +##weave-scope-images +registry.cn-beijing.aliyuncs.com/kubesphereio/scope:1.13.0 +``` diff --git a/content/zh/docs/v3.4/installing-on-kubernetes/uninstall-kubesphere-from-k8s.md b/content/zh/docs/v3.4/installing-on-kubernetes/uninstall-kubesphere-from-k8s.md new file mode 100644 index 000000000..9130062e3 --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-kubernetes/uninstall-kubesphere-from-k8s.md @@ -0,0 +1,15 @@ +--- +title: "从 Kubernetes 上卸载 KubeSphere" +keywords: 'Kubernetes, KubeSphere, 卸载, 移除集群' +description: '从 Kubernetes 集群中删除 KubeSphere。' +LinkTitle: "从 Kubernetes 上卸载 KubeSphere" +weight: 4400 +--- + +您可以使用 [kubesphere-delete.sh](https://github.com/kubesphere/ks-installer/blob/release-3.1/scripts/kubesphere-delete.sh) 将 KubeSphere 从您现有的 Kubernetes 集群中卸载。复制 [GitHub 源文件](https://raw.githubusercontent.com/kubesphere/ks-installer/release-3.1/scripts/kubesphere-delete.sh)并在本地机器上执行此脚本。 + +{{< notice warning >}} + +卸载意味着 KubeSphere 会从您的 Kubernetes 集群中移除。此操作不可逆并且没有任何备份,请谨慎操作。 + +{{}} \ No newline at end of file diff --git a/content/zh/docs/v3.4/installing-on-linux/_index.md b/content/zh/docs/v3.4/installing-on-linux/_index.md new file mode 100644 index 000000000..17a608f0b --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-linux/_index.md @@ -0,0 +1,14 @@ +--- +linkTitle: "在 Linux 上安装" +title: "在 Linux 上安装 KubeSphere" +description: "演示如何在云上和本地 Linux 环境中安装 KubeSphere。" +layout: "second" + +linkTitle: "在 Linux 上安装 KubeSphere" +weight: 3000 + +icon: "/images/docs/v3.3/docs.svg" +--- + + +本章演示如何使用 KubeKey 在不同环境的 Linux 上预配置生产就绪的 Kubernetes 和 KubeSphere 集群。 您还可以使用 KubeKey 轻松扩展和缩小集群,并根据需要设置各种存储类。 diff --git a/content/zh/docs/v3.4/installing-on-linux/cluster-operation/_index.md b/content/zh/docs/v3.4/installing-on-linux/cluster-operation/_index.md new file mode 100644 index 000000000..2710fff9a --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-linux/cluster-operation/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "添加或删除节点" +weight: 3600 + +_build: + render: false +--- diff --git a/content/zh/docs/v3.4/installing-on-linux/cluster-operation/add-edge-nodes.md b/content/zh/docs/v3.4/installing-on-linux/cluster-operation/add-edge-nodes.md new file mode 100644 index 000000000..e36b29056 --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-linux/cluster-operation/add-edge-nodes.md @@ -0,0 +1,258 @@ +--- +title: "添加边缘节点" +keywords: 'Kubernetes, KubeSphere, KubeEdge' +description: '将边缘节点添加到集群。' +linkTitle: "添加边缘节点" +weight: 3630 +--- + +KubeSphere 利用 [KubeEdge](https://kubeedge.io/zh/) 将原生容器化应用程序编排功能扩展到边缘的主机。KubeEdge 拥有单独的云端和边端核心模块,提供完整的边缘计算解决方案,但安装过程可能较为繁琐。 + +![kubeedge_arch](/images/docs/v3.3/zh-cn/installing-on-linux/add-and-delete-nodes/add-edge-nodes/kubeedge_arch.png) + +{{< notice note >}} + +有关 KubeEdge 不同组件的更多信息,请参见 [KubeEdge 文档](https://docs.kubeedge.io/zh/docs/kubeedge/#components)。 + +{{}} + +本教程演示如何将边缘节点添加到集群。 + +## 准备工作 + +- 您需要启用 [KubeEdge](../../../pluggable-components/kubeedge/)。 +- 为了避免兼容性问题,建议安装 v1.21.x 及以下版本的 Kubernetes。 +- 您有一个可用节点作为边缘节点,该节点可以运行 Ubuntu(建议)或 CentOS。本教程以 Ubuntu 18.04 为例。 +- 与 Kubernetes 集群节点不同,边缘节点应部署在单独的网络中。 + +## 防止非边缘工作负载调度到边缘节点 + +由于部分守护进程集(例如,Calico)有强容忍度,为了避免影响边缘节点的正常工作,您需要手动 Patch Pod 以防止非边缘工作负载调度至边缘节点。 + +```bash +#!/bin/bash + + +NoShedulePatchJson='{"spec":{"template":{"spec":{"affinity":{"nodeAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":{"nodeSelectorTerms":[{"matchExpressions":[{"key":"node-role.kubernetes.io/edge","operator":"DoesNotExist"}]}]}}}}}}}' + +ns="kube-system" + + +DaemonSets=("nodelocaldns" "kube-proxy" "calico-node") + +length=${#DaemonSets[@]} + +for((i=0;i}} + 在 ks-installer 的 `ClusterConfiguration`中,如果您设置的是局域网地址,那么需要配置转发规则。如果您未配置转发规则,直接连接 30000 – 30004 端口即可。 + {{}} + +| 字段 | 外网端口 | 字段 | 内网端口 | +| ------------------- | -------- | ----------------------- | -------- | +| `cloudhubPort` | `10000` | `cloudhubNodePort` | `30000` | +| `cloudhubQuicPort` | `10001` | `cloudhubQuicNodePort` | `30001` | +| `cloudhubHttpsPort` | `10002` | `cloudhubHttpsNodePort` | `30002` | +| `cloudstreamPort` | `10003` | `cloudstreamNodePort` | `30003` | +| `tunnelPort` | `10004` | `tunnelNodePort` | `30004` | + +## 配置边缘节点 + +您需要在边缘节点上安装容器运行时并配置 EdgeMesh。 + +### 安装容器运行时 + +[KubeEdge](https://docs.kubeedge.io/zh/docs/) 支持多种容器运行时,包括 Docker、containerd、CRI-O 和 Virtlet。有关更多信息,请参见 [KubeEdge 文档](https://docs.kubeedge.io/zh/docs/advanced/cri/)。 + +{{< notice note >}} + +如果您的边缘节点使用 Docker 作为容器运行时,为确保 KubeSphere 可以获取 Pod 指标,请务必在边缘节点上安装 Docker v19.3.0 或更高版本。 + +{{}} + +### 配置 EdgeMesh + +执行以下步骤以在边缘节点上配置 [EdgeMesh](https://kubeedge.io/zh/docs/advanced/edgemesh/)。 + +1. 编辑 `/etc/nsswitch.conf`。 + + ```bash + vi /etc/nsswitch.conf + ``` + +2. 在该文件中添加以下内容。 + + ```bash + hosts: dns files mdns4_minimal [NOTFOUND=return] + ``` + +3. 保存文件并运行以下命令启用 IP 转发: + + ```bash + sudo echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf + ``` + +4. 验证修改: + + ```bash + sudo sysctl -p | grep ip_forward + ``` + + 预期结果: + + ```bash + net.ipv4.ip_forward = 1 + ``` + +## 添加边缘节点 + +1. 使用 `admin` 用户登录控制台,点击左上角的**平台管理**。 + +2. 选择**集群管理**,然后导航至**节点**下的**边缘节点**。 + + {{< notice note >}} + + 如果已经启用[多集群管理](../../../multicluster-management/),则需要首先选择一个集群。 + + {{}} + +3. 点击**添加**。在出现的对话框中,设置边缘节点的节点名称并输入其内网 IP 地址。点击**验证**以继续。 + + ![add-edge-node](/images/docs/v3.3/zh-cn/installing-on-linux/add-and-delete-nodes/add-edge-nodes/add-edge-node.png) + + {{< notice note >}} + + - 内网 IP 地址仅用于节点间通信,您不一定要使用边缘节点的真实内网 IP 地址。只要 IP 地址验证成功,您就可以使用该 IP 地址。 + - 建议您勾选方框添加默认污点。 + + {{}} + +4. 复制**边缘节点配置命令**下自动创建的命令,并在您的边缘节点上运行该命令。 + + ![edge-command](/images/docs/v3.3/zh-cn/installing-on-linux/add-and-delete-nodes/add-edge-nodes/edge-command.png) + + {{< notice note >}} + + 在运行该命令前,请确保您的边缘节点上已安装 `wget`。 + + {{}} + +5. 关闭对话框,刷新页面,您将看到边缘节点显示在列表中。 + + {{< notice note >}} + + 添加边缘节点后,如果在**边缘节点**页面查看不到 CPU 和内存资源使用情况,请确保您的集群中已安装 [Metrics Server](../../../pluggable-components/metrics-server/) 0.4.1 或以上版本。 + + {{}} + + +## 收集边缘节点监控信息 + +如果需要收集边缘节点的监控信息,请先在`ClusterConfiguration` 中开启 `metrics_server`,以及在 KubeEdge 中开启 `edgeStream`。 + +1. 在 KubeSphere 控制台上,点击**平台管理 > 集群管理**。 + +2. 在左侧导航栏。点击**定制资源定义**。 + +3. 在右侧的搜索框中,输入 `clusterconfiguration`,并点击结果查看其详细页面。 + +4. 点击 `ks-installer` 右侧的 icon,选择**编辑 YAML**。 + +5. 找到 **metrics_server**,将 `enabled` 的 `false` 更改为 `true`。 + + ```yaml + metrics_server: + enabled: true # 将“false”更改为“true”。 + ``` + +6. 点击右下角的**确定**,保存配置。 + +7. 进入 `/etc/kubeedge/config` 文件,搜索 `edgeStream`,将 `false` 更改为 `true` 并保存文件。 + ```bash + cd /etc/kubeedge/config + vi edgecore.yaml + ``` + + ```bash + edgeStream: + enable: true #将“false”更改为“true”。 + handshakeTimeout: 30 + readDeadline: 15 + server: xx.xxx.xxx.xxx:10004 #如果没有添加端口转发,将端口修改为30004。 + tlsTunnelCAFile: /etc/kubeedge/ca/rootCA.crt + tlsTunnelCertFile: /etc/kubeedge/certs/server.crt + tlsTunnelPrivateKeyFile: /etc/kubeedge/certs/server.key + writeDeadline: 15 + ``` + +8. 重启 `edgecore.service`。 + ```bash + systemctl restart edgecore.service + ``` + +9. 如果仍然无法显示监控数据,执行以下命令: + ```bash + journalctl -u edgecore.service -b -r + ``` + + {{< notice note >}} + + 如果提示 `failed to check the running environment: kube-proxy should not running on edge node when running edgecore`,需要参考步骤 8 再次重启 `edgecore.service`。 + + {{}} +## 移除边缘节点 + +移除边缘节点之前,请删除在该节点上运行的全部工作负载。 + +1. 在边缘节点上运行以下命令: + + ```bash + ./keadm reset + ``` + + ``` + apt remove mosquitto + ``` + + ```bash + rm -rf /var/lib/kubeedge /var/lib/edged /etc/kubeedge/ca /etc/kubeedge/certs + ``` + + {{< notice note >}} + + 如果无法删除 tmpfs 挂载的文件夹,请重启节点或先取消挂载该文件夹。 + + {{}} + +2. 运行以下命令从集群中移除边缘节点: + + ```bash + kubectl delete node + ``` + +3. 如需从集群中卸载 KubeEdge,运行以下命令: + + ```bash + helm uninstall kubeedge -n kubeedge + ``` + + ```bash + kubectl delete ns kubeedge + ``` + + {{< notice note >}} + + 卸载完成后,您将无法为集群添加边缘节点。 + + {{}} \ No newline at end of file diff --git a/content/zh/docs/v3.4/installing-on-linux/cluster-operation/add-new-nodes.md b/content/zh/docs/v3.4/installing-on-linux/cluster-operation/add-new-nodes.md new file mode 100644 index 000000000..c137c6901 --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-linux/cluster-operation/add-new-nodes.md @@ -0,0 +1,158 @@ +--- +title: "添加新节点" +keywords: 'Kubernetes, KubeSphere, 水平扩缩, 添加节点' +description: '添加更多节点以扩展集群。' +linkTitle: "添加新节点" +weight: 3610 +--- + +KubeSphere 使用一段时间之后,由于工作负载不断增加,您可能需要水平扩展集群。自 KubeSphere v3.0.0 起,您可以使用全新的安装程序 [KubeKey](https://github.com/kubesphere/kubekey) 将新节点添加到集群。从根本上说,该操作是基于 Kubelet 的注册机制。换言之,新节点将自动加入现有的 Kubernetes 集群。KubeSphere 支持混合环境,这意味着新添加的主机操作系统可以是 CentOS 或者 Ubuntu。 + +本教程演示了如何将新节点添加到单节点集群。若要水平扩展多节点集群,操作步骤基本相同。 + +## 准备工作 + +- 您需要一个单节点集群。有关更多信息,请参见[在 Linux 上以 All-in-One 模式安装 KubeSphere](../../../quick-start/all-in-one-on-linux/)。 + +- 您需要已经[下载了 KubeKey](../../../installing-on-linux/introduction/multioverview/#步骤-2下载-kubekey)。 + +## 添加工作节点 + +1. 使用 KubeKey 检索集群信息。以下命令会创建配置文件 (`sample.yaml`)。 + + ```bash + ./kk create config --from-cluster + ``` + + {{< notice note >}} + +如果您的机器上已有配置文件,就可以跳过此步骤。例如,若要将节点添加到由 KubeKey 设置的多节点集群,如果您没有删除该集群,则可能仍拥有该配置文件。 + +{{}} + +2. 在配置文件中,将新节点的信息放在 `hosts` 和 `roleGroups` 之下。该示例添加了两个新节点(即 `node1` 和 `node2`)。这里的 `master1` 是现有节点。 + + ```bash + ··· + spec: + hosts: + - {name: master1, address: 192.168.0.3, internalAddress: 192.168.0.3, user: root, password: Qcloud@123} + - {name: node1, address: 192.168.0.4, internalAddress: 192.168.0.4, user: root, password: Qcloud@123} + - {name: node2, address: 192.168.0.5, internalAddress: 192.168.0.5, user: root, password: Qcloud@123} + roleGroups: + etcd: + - master1 + control-plane: + - master1 + worker: + - node1 + - node2 + ··· + ``` + + {{< notice note >}} + +- 有关更多配置文件的信息,请参见[编辑配置文件](../../../installing-on-linux/introduction/multioverview/#2-编辑配置文件)。 + +- 添加新节点时,请勿修改现有节点的主机名。 + +- 用自己的主机名替换示例中的主机名。 + + {{}} + +3. 执行以下命令: + + ```bash + ./kk add nodes -f sample.yaml + ``` + +4. 安装完成后,您将能够在 KubeSphere 的控制台上查看新节点及其信息。在**集群管理**页面,选择左侧菜单**节点**下的**集群节点**,或者执行命令 `kubectl get node` 以检查更改。 + + ```bash + $ kubectl get node + NAME STATUS ROLES AGE VERSION + master1 Ready master,worker 20d v1.17.9 + node1 Ready worker 31h v1.17.9 + node2 Ready worker 31h v1.17.9 + ``` + +## 添加主节点以实现高可用 + +添加主节点的步骤与添加工作节点的步骤大体一致,不过您需要为集群配置负载均衡器。您可以使用任何云负载均衡器或者硬件负载均衡器(例如 F5)。另外,Keepalived 和 [HAproxy](https://www.haproxy.com/)、或者 Nginx 也是创建高可用集群的替代方案。 + +1. 使用 KubeKey 创建配置文件。 + + ``` + ./kk create config --from-cluster + ``` + +2. 打开文件,可以看到一些字段预先填充了值。将新节点和负载均衡器的信息添加到文件中。以下示例供您参考: + + ```yaml + apiVersion: kubekey.kubesphere.io/v1alpha1 + kind: Cluster + metadata: + name: sample + spec: + hosts: + # You should complete the ssh information of the hosts + - {name: master1, address: 172.16.0.2, internalAddress: 172.16.0.2, user: root, password: Testing123} + - {name: master2, address: 172.16.0.5, internalAddress: 172.16.0.5, user: root, password: Testing123} + - {name: master3, address: 172.16.0.6, internalAddress: 172.16.0.6, user: root, password: Testing123} + - {name: worker1, address: 172.16.0.3, internalAddress: 172.16.0.3, user: root, password: Testing123} + - {name: worker2, address: 172.16.0.4, internalAddress: 172.16.0.4, user: root, password: Testing123} + - {name: worker3, address: 172.16.0.7, internalAddress: 172.16.0.7, user: root, password: Testing123} + roleGroups: + etcd: + - master1 + - master2 + - master3 + control-plane: + - master1 + - master2 + - master3 + worker: + - worker1 + - worker2 + - worker3 + controlPlaneEndpoint: + # If loadbalancer is used, 'address' should be set to loadbalancer's ip. + domain: lb.kubesphere.local + address: 172.16.0.253 + port: 6443 + kubernetes: + version: v1.22.12 + imageRepo: kubesphere + clusterName: cluster.local + proxyMode: ipvs + masqueradeAll: false + maxPods: 110 + nodeCidrMaskSize: 24 + network: + plugin: calico + kubePodsCIDR: 10.233.64.0/18 + kubeServiceCIDR: 10.233.0.0/18 + registry: + privateRegistry: "" + ``` + +3. 请注意 `controlPlaneEndpoint` 字段。 + + ```yaml + controlPlaneEndpoint: + # If you use a load balancer, the address should be set to the load balancer's ip. + domain: lb.kubesphere.local + address: 172.16.0.253 + port: 6443 + ``` + + - 负载均衡器的域名默认为 `lb.kubesphere.local`,用于内部访问。您可以按需进行更改。 + - 大多数情况下,您需要为 `address` 字段提供负载均衡器的**私有 IP 地址**。然而,不同的云厂商可能为负载均衡器进行不同的配置。例如,如果您在阿里云上配置服务器负载均衡 (SLB),该平台会为 SLB 分配一个公共 IP 地址,这意味着您需要为 `address` 字段指定公共 IP 地址。 + - `port` 字段指代 `api-server` 的端口。 + +4. 保存文件并执行以下命令以应用配置。 + + ```bash + ./kk add nodes -f sample.yaml + ``` + diff --git a/content/zh/docs/v3.4/installing-on-linux/cluster-operation/remove-nodes.md b/content/zh/docs/v3.4/installing-on-linux/cluster-operation/remove-nodes.md new file mode 100644 index 000000000..c5013520c --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-linux/cluster-operation/remove-nodes.md @@ -0,0 +1,33 @@ +--- +title: "删除节点" +keywords: 'Kubernetes, KubeSphere, 水平扩缩, 删除节点' +description: '停止调度节点,或者删除节点以缩小集群规模。' +linkTitle: "删除节点" +weight: 3620 +--- + +## 停止调度节点 + +将节点标记为不可调度可防止调度程序将新的 Pod 放置到该节点上,同时不会影响该节点上的现有 Pod。作为节点重启或者其他维护之前的准备步骤,这十分有用。 + +以 `admin` 身份登录控制台,访问**集群管理**页面。若要将节点标记为不可调度,从左侧菜单中选择**节点**下的**集群节点**,找到想要从集群中删除的节点,点击**停止调度**。或者,直接执行命令 `kubectl cordon $NODENAME`。有关更多详细信息,请参见 [Kubernetes 节点](https://kubernetes.io/docs/concepts/architecture/nodes/)。 + +{{< notice note >}} + +守护进程集的 Pod 可以在无法调度的节点上运行。守护进程集通常提供应在节点上运行的本地节点服务,即使正在驱逐工作负载应用程序也不受影响。 + +{{}} + +## 删除节点 + +1. 若要删除节点,您需要首先准备集群的配置文件(即在[设置集群](../../introduction/multioverview/#1-create-an-example-configuration-file)时所用的配置文件)。如果您没有该配置文件,请使用 [KubeKey](https://github.com/kubesphere/kubekey) 检索集群信息(将默认创建文件 `sample.yaml`)。 + + ```bash + ./kk create config --from-cluster + ``` + +2. 请确保在该配置文件中提供主机的所有信息,然后运行以下命令以删除节点。 + + ```bash + ./kk delete node -f config-sample.yaml + ``` \ No newline at end of file diff --git a/content/zh/docs/v3.4/installing-on-linux/high-availability-configurations/_index.md b/content/zh/docs/v3.4/installing-on-linux/high-availability-configurations/_index.md new file mode 100644 index 000000000..80984cb8d --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-linux/high-availability-configurations/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "高可用配置" +weight: 3200 + +_build: + render: false +--- diff --git a/content/zh/docs/v3.4/installing-on-linux/high-availability-configurations/ha-configuration.md b/content/zh/docs/v3.4/installing-on-linux/high-availability-configurations/ha-configuration.md new file mode 100644 index 000000000..a6f577b48 --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-linux/high-availability-configurations/ha-configuration.md @@ -0,0 +1,216 @@ +--- +title: "使用负载均衡器创建高可用集群" +keywords: 'KubeSphere, Kubernetes, HA, 高可用, 安装, 配置' +description: '如何配置一个高可用 Kubernetes 集群。' +linkTitle: "使用负载均衡器创建高可用集群" +weight: 3150 +--- + +您可以根据教程[多节点安装](../../../installing-on-linux/introduction/multioverview/)来创建单主节点 Kubernetes 集群并安装 KubeSphere。大多数情况下,单主节点集群大致足以供开发和测试环境使用。但是,对于生产环境,您需要考虑集群的高可用性。如果关键组件(例如 kube-apiserver、kube-scheduler 和 kube-controller-manager)都在同一个主节点上运行,一旦主节点宕机,Kubernetes 和 KubeSphere 都将不可用。因此,您需要为多个主节点配置负载均衡器,以创建高可用集群。您可以使用任意云负载均衡器或者任意硬件负载均衡器(例如 F5)。此外,也可以使用 Keepalived 和 [HAproxy](https://www.haproxy.com/),或者 Nginx 来创建高可用集群。 + +本教程演示了在 Linux 上安装 KubeSphere 时,高可用集群的大体配置。 + +## 架构 + +在您开始操作前,请确保准备了 6 台 Linux 机器,其中 3 台充当主节点,另外 3 台充当工作节点。下图展示了这些机器的详情,包括它们的私有 IP 地址和角色。有关系统和网络要求的更多信息,请参见[多节点安装](../../../installing-on-linux/introduction/multioverview/#步骤1准备-linux-主机)。 + +![高可用架构](/images/docs/v3.3/zh-cn/installing-on-linux/introduction/ha-configurations/ha-architecture.png) + +## 配置负载均衡器 + +您必须在您的环境中创建一个负载均衡器来监听(在某些云平台也称作监听器)关键端口。建议监听下表中的端口。 + +| 服务 | 协议 | 端口 | +| ---------- | ---- | ----- | +| apiserver | TCP | 6443 | +| ks-console | TCP | 30880 | +| http | TCP | 80 | +| https | TCP | 443 | + +{{< notice note >}} + +- 请确保您的负载均衡器至少监听 apiserver 端口。 + +- 根据集群的部署位置,您可能需要在安全组中打开端口以确保外部流量不被屏蔽。有关更多信息,请参见[端口要求](../../../installing-on-linux/introduction/port-firewall/)。 +- 在一些云平台上,您可以同时配置内置负载均衡器和外置负载均衡器。为外置负载均衡器分配公共 IP 地址后,您可以使用该 IP 地址来访问集群。 +- 有关如何配置负载均衡器的更多信息,请参见“在公有云上安装”中对在主要公有云平台上具体操作步骤的说明。 + +{{}} + +## 下载 KubeKey + +[Kubekey](https://github.com/kubesphere/kubekey) 是新一代安装程序,可以简单、快速和灵活地安装 Kubernetes 和 KubeSphere。请按照以下步骤下载 KubeKey。 + +{{< tabs >}} + +{{< tab "如果您能正常访问 GitHub 和 Googleapis" >}} + +从 [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) 下载 KubeKey 或直接使用以下命令。 + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "如果您访问 GitHub 和 Googleapis 受限" >}} + +先执行以下命令以确保您从正确的区域下载 KubeKey。 + +```bash +export KKZONE=cn +``` + +执行以下命令下载 KubeKey: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +在您下载 KubeKey 后,如果您将其传至新的机器,且访问 Googleapis 同样受限,在您执行以下步骤之前请务必再次执行 `export KKZONE=cn` 命令。 + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +执行以上命令会下载最新版 KubeKey,您可以修改命令中的版本号下载指定版本。 + +{{}} + +为 `kk` 添加可执行权限: + +```bash +chmod +x kk +``` + +创建包含默认配置的示例配置文件。这里使用 Kubernetes v1.22.12 作为示例。 + +```bash +./kk create config --with-kubesphere v3.3.2 --with-kubernetes v1.22.12 +``` + +{{< notice note >}} + +- 安装 KubeSphere 3.3 的建议 Kubernetes 版本:v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如需使用边缘节点,推荐安装 v1.21.x。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.23.10。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../../installing-on-linux/introduction/kubekey/#支持矩阵)。 + +- 如果您在这一步的命令中不添加标志 `--with-kubesphere`,则不会部署 KubeSphere,只能使用配置文件中的 `addons` 字段安装,或者在您后续使用 `./kk create cluster` 命令时再次添加这个标志。 + +- 如果您添加标志 `--with-kubesphere` 时不指定 KubeSphere 版本,则会安装最新版本的 KubeSphere。 + +{{}} + +## 部署 KubeSphere 和 Kubernetes + +运行以上命令后,会创建一个配置文件 `config-sample.yaml`。编辑该文件以添加机器信息、配置负载均衡器和其他内容。 + +{{< notice note >}} + +如果您自定义文件名,文件名称可能会不同。 + +{{}} + +### config-sample.yaml 示例 + +```yaml +spec: + hosts: + - {name: master1, address: 192.168.0.2, internalAddress: 192.168.0.2, user: ubuntu, password: Testing123} + - {name: master2, address: 192.168.0.3, internalAddress: 192.168.0.3, user: ubuntu, password: Testing123} + - {name: master3, address: 192.168.0.4, internalAddress: 192.168.0.4, user: ubuntu, password: Testing123} + - {name: node1, address: 192.168.0.5, internalAddress: 192.168.0.5, user: ubuntu, password: Testing123} + - {name: node2, address: 192.168.0.6, internalAddress: 192.168.0.6, user: ubuntu, password: Testing123} + - {name: node3, address: 192.168.0.7, internalAddress: 192.168.0.7, user: ubuntu, password: Testing123} + roleGroups: + etcd: + - master1 + - master2 + - master3 + control-plane: + - master1 + - master2 + - master3 + worker: + - node1 + - node2 + - node3 +``` + +有关该配置文件中不同字段的更多信息,请参见 [Kubernetes 集群配置](../../../installing-on-linux/introduction/vars/)和[多节点安装](../../../installing-on-linux/introduction/multioverview/#2-编辑配置文件)。 + +### 配置负载均衡器 + +```yaml +spec: + controlPlaneEndpoint: + ##Internal loadbalancer for apiservers + #internalLoadbalancer: haproxy + + domain: lb.kubesphere.local + address: "192.168.0.xx" + port: 6443 +``` + +{{< notice note >}} + +- `config-sample.yaml` 文件中的 `address` 和 `port` 应缩进两个空格。 +- 大多数情况下,您需要在负载均衡器的 `address` 字段中提供**私有 IP 地址**。但是,不同的云厂商可能对负载均衡器有不同的配置。例如,如果您在阿里云上配置服务器负载均衡器 (SLB),平台会为 SLB 分配一个公共 IP 地址,所以您需要在 `address` 字段中指定公共 IP 地址。 +- 负载均衡器默认的内部访问域名是 `lb.kubesphere.local`。 +- 若要使用内置负载均衡器,请将 `internalLoadbalancer` 字段取消注释。 + +{{}} + +### 持久化存储插件配置 + +在生产环境中,您需要准备持久化存储并在 `config-sample.yaml` 中配置存储插件(例如 CSI),以明确您想使用哪一种存储服务。有关更多信息,请参见[持久化存储配置](../../../installing-on-linux/persistent-storage-configurations/understand-persistent-storage/)。 + +### 启用可插拔组件(可选) + +自 v2.1.0 起,KubeSphere 解耦了一些核心功能组件。您可以在安装之前或者之后启用这些可插拔组件。如果您不启用这些组件,KubeSphere 将默认以最小化安装。 + +您可以根据您的需求来启用任意可插拔组件。强烈建议您安装这些可插拔组件,以便体验 KubeSphere 提供的全栈特性和功能。启用前,请确保您的机器有足够的 CPU 和内存。有关详情请参见[启用可插拔组件](../../../pluggable-components/)。 + +### 开始安装 + +配置完成后,您可以执行以下命令来开始安装: + +```bash +./kk create cluster -f config-sample.yaml +``` + +### 验证安装 + +1. 运行以下命令查看安装日志。 + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + +2. 若您看到以下信息,您的高可用集群便已创建成功。 + + ```bash + ##################################################### + ### Welcome to KubeSphere! ### + ##################################################### + + Console: http://192.168.0.3:30880 + Account: admin + Password: P@88w0rd + + NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. + + ##################################################### + https://kubesphere.io 2020-xx-xx xx:xx:xx + ##################################################### + ``` diff --git a/content/zh/docs/v3.4/installing-on-linux/high-availability-configurations/internal-ha-configuration.md b/content/zh/docs/v3.4/installing-on-linux/high-availability-configurations/internal-ha-configuration.md new file mode 100644 index 000000000..04a7b3e73 --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-linux/high-availability-configurations/internal-ha-configuration.md @@ -0,0 +1,200 @@ +--- +title: "使用 KubeKey 内置 HAproxy 创建高可用集群" +keywords: 'KubeSphere, Kubernetes, KubeKey, 高可用, 安装' +description: '如何使用 KubeKey 内置的 HAproxy 安装一个高可用的 KubeSphere 与 Kubernetes 集群。' +linkTitle: "使用 KubeKey 内置 HAproxy 创建高可用集群" +weight: 3150 +--- + +[KubeKey](https://github.com/kubesphere/kubekey) 作为一种集群安装工具,从版本 v1.2.1 开始,提供了内置高可用模式,支持一键部署高可用集群环境。KubeKey 的高可用模式实现方式称作本地负载均衡模式。具体表现为 KubeKey 会在每一个工作节点上部署一个负载均衡器(HAproxy),所有主节点的 Kubernetes 组件连接其本地的 kube-apiserver ,而所有工作节点的 Kubernetes 组件通过由 KubeKey 部署的负载均衡器反向代理到多个主节点的 kube-apiserver 。这种模式相较于专用到负载均衡器来说效率有所降低,因为会引入额外的健康检查机制,但是如果当前环境无法提供外部负载均衡器或者虚拟 IP(VIP)时这将是一种更实用、更有效、更方便的高可用部署模式。 + +本教程演示了在 Linux 上安装 KubeSphere 时,使用 KubeKey 内置高可用模式部署的大体配置。 + +## 架构 + +下图举例展示了内置高可用模式的架构图。有关系统和网络要求的更多信息,请参见[多节点安装](../../../installing-on-linux/introduction/multioverview/#步骤1准备-linux-主机)。 + +![高可用架构](/images/docs/v3.3/zh-cn/installing-on-linux/introduction/internal-ha-configuration/internalLoadBalancer.png) + +{{< notice note >}} + +在生产环境中,请确保准备了 6 台 Linux 机器,其中 3 台充当主节点,另外 3 台充当工作节点。 + +{{}} + +## 下载 KubeKey + +请按照以下步骤下载 KubeKey。 + +{{< tabs >}} + +{{< tab "如果您能正常访问 GitHub 和 Googleapis" >}} + +从 [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) 下载 KubeKey 或直接使用以下命令。 + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "如果您访问 GitHub 和 Googleapis 受限" >}} + +先执行以下命令以确保您从正确的区域下载 KubeKey。 + +```bash +export KKZONE=cn +``` + +执行以下命令下载 KubeKey: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +在您下载 KubeKey 后,如果您将其传至新的机器,且访问 Googleapis 同样受限,在您执行以下步骤之前请务必再次执行 `export KKZONE=cn` 命令。 + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +执行以上命令会下载最新版 KubeKey,您可以修改命令中的版本号下载指定版本。 + +{{}} + +为 `kk` 添加可执行权限: + +```bash +chmod +x kk +``` + +创建包含默认配置的示例配置文件。这里使用 Kubernetes v1.22.12 作为示例。 + +```bash +./kk create config --with-kubesphere v3.3.2 --with-kubernetes v1.22.12 +``` + +{{< notice note >}} + +- 安装 KubeSphere 3.3 的建议 Kubernetes 版本:v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如需使用边缘节点,推荐安装 v1.21.x。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.23.10。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../../installing-on-linux/introduction/kubekey/#支持矩阵)。 + +- 如果您在这一步的命令中不添加标志 `--with-kubesphere`,则不会部署 KubeSphere,只能使用配置文件中的 `addons` 字段安装,或者在您后续使用 `./kk create cluster` 命令时再次添加这个标志。 + +- 如果您添加标志 `--with-kubesphere` 时不指定 KubeSphere 版本,则会安装最新版本的 KubeSphere。 + +{{}} + +## 部署 KubeSphere 和 Kubernetes + +运行以上命令后,会创建一个配置文件 `config-sample.yaml`。编辑该文件以添加机器信息、配置负载均衡器和其他内容。 + +{{< notice note >}} + +如果您自定义文件名,文件名称可能会不同。 + +{{}} + +### config-sample.yaml 示例 + +```yaml +spec: + hosts: + - {name: master1, address: 192.168.0.2, internalAddress: 192.168.0.2, user: ubuntu, password: Testing123} + - {name: master2, address: 192.168.0.3, internalAddress: 192.168.0.3, user: ubuntu, password: Testing123} + - {name: master3, address: 192.168.0.4, internalAddress: 192.168.0.4, user: ubuntu, password: Testing123} + - {name: node1, address: 192.168.0.5, internalAddress: 192.168.0.5, user: ubuntu, password: Testing123} + - {name: node2, address: 192.168.0.6, internalAddress: 192.168.0.6, user: ubuntu, password: Testing123} + - {name: node3, address: 192.168.0.7, internalAddress: 192.168.0.7, user: ubuntu, password: Testing123} + roleGroups: + etcd: + - master1 + - master2 + - master3 + control-plane: + - master1 + - master2 + - master3 + worker: + - node1 + - node2 + - node3 +``` + +有关该配置文件中不同字段的更多信息,请参见 [Kubernetes 集群配置](../../../installing-on-linux/introduction/vars/)和[多节点安装](../../../installing-on-linux/introduction/multioverview/#2-编辑配置文件)。 + +### 开启内置高可用模式 + +```yaml +spec: + controlPlaneEndpoint: + ##Internal loadbalancer for apiservers + #internalLoadbalancer: haproxy + + domain: lb.kubesphere.local + address: "" + port: 6443 +``` + +{{< notice note >}} + +- 开启内置高可用模式,需要将 `internalLoadbalancer` 字段取消注释。 +- `config-sample.yaml` 文件中的 `address` 和 `port` 应缩进两个空格。 +- 负载均衡器默认的内部访问域名是 `lb.kubesphere.local`。 + +{{}} + +### 持久化存储插件配置 + +在生产环境中,您需要准备持久化存储并在 `config-sample.yaml` 中配置存储插件(例如 CSI),以明确您想使用哪一种存储服务。有关更多信息,请参见[持久化存储配置](../../../installing-on-linux/persistent-storage-configurations/understand-persistent-storage/)。 + +### 启用可插拔组件(可选) + +自 v2.1.0 起,KubeSphere 解耦了一些核心功能组件。您可以在安装之前或者之后启用这些可插拔组件。如果您不启用这些组件,KubeSphere 将默认以最小化安装。 + +您可以根据您的需求来启用任意可插拔组件。强烈建议您安装这些可插拔组件,以便体验 KubeSphere 提供的全栈特性和功能。启用前,请确保您的机器有足够的 CPU 和内存。有关详情请参见[启用可插拔组件](../../../pluggable-components/)。 + +### 开始安装 + +配置完成后,您可以执行以下命令来开始安装: + +```bash +./kk create cluster -f config-sample.yaml +``` + +### 验证安装 + +1. 运行以下命令查看安装日志。 + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + +2. 若您看到以下信息,您的高可用集群便已创建成功。 + + ```bash + ##################################################### + ### Welcome to KubeSphere! ### + ##################################################### + + Console: http://192.168.0.3:30880 + Account: admin + Password: P@88w0rd + + NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. + + ##################################################### + https://kubesphere.io 2020-xx-xx xx:xx:xx + ##################################################### + ``` diff --git a/content/zh/docs/v3.4/installing-on-linux/high-availability-configurations/set-up-ha-cluster-using-keepalived-haproxy.md b/content/zh/docs/v3.4/installing-on-linux/high-availability-configurations/set-up-ha-cluster-using-keepalived-haproxy.md new file mode 100644 index 000000000..c62a8ee5e --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-linux/high-availability-configurations/set-up-ha-cluster-using-keepalived-haproxy.md @@ -0,0 +1,413 @@ +--- +title: "使用 Keepalived 和 HAproxy 创建高可用 Kubernetes 集群" +keywords: 'Kubernetes, KubeSphere, HA, 高可用, 安装, 配置, Keepalived, HAproxy' +description: '如何使用 Keepalived 和 HAproxy 配置高可用 Kubernetes 集群。' +linkTitle: "使用 Keepalived 和 HAproxy 创建高可用集群" +weight: 3220 +--- + +高可用 Kubernetes 集群能够确保应用程序在运行时不会出现服务中断,这也是生产的需求之一。为此,有很多方法可供选择以实现高可用。 + +本教程演示了如何配置 Keepalived 和 HAproxy 使负载均衡、实现高可用。步骤如下: + +1. 准备主机。 +2. 配置 Keepalived 和 HAproxy。 +3. 使用 KubeKey 创建 Kubernetes 集群,并安装 KubeSphere。 + +## 集群架构 + +示例集群有三个主节点,三个工作节点,两个用于负载均衡的节点,以及一个虚拟 IP 地址。本示例中的虚拟 IP 地址也可称为“浮动 IP 地址”。这意味着在节点故障的情况下,该 IP 地址可在节点之间漂移,从而实现高可用。 + +![architecture-ha-k8s-cluster](/images/docs/v3.3/installing-on-linux/high-availability-configurations/set-up-ha-cluster-using-keepalived-haproxy/architecture-ha-k8s-cluster.png) + +请注意,在本示例中,Keepalived 和 HAproxy 没有安装在任何主节点上。但您也可以这样做,并同时实现高可用。然而,配置两个用于负载均衡的特定节点(您可以按需增加更多此类节点)会更加安全。这两个节点上只安装 Keepalived 和 HAproxy,以避免与任何 Kubernetes 组件和服务的潜在冲突。 + +## 准备主机 + +| IP 地址 | 主机名 | 角色 | +| ----------- | ------- | -------------------- | +| 172.16.0.2 | lb1 | Keepalived & HAproxy | +| 172.16.0.3 | lb2 | Keepalived & HAproxy | +| 172.16.0.4 | master1 | master, etcd | +| 172.16.0.5 | master2 | master, etcd | +| 172.16.0.6 | master3 | master, etcd | +| 172.16.0.7 | worker1 | worker | +| 172.16.0.8 | worker2 | worker | +| 172.16.0.9 | worker3 | worker | +| 172.16.0.10 | | 虚拟 IP 地址 | + +有关更多节点、网络、依赖项等要求的信息,请参见[多节点安装](../../../installing-on-linux/introduction/multioverview/#step-1-prepare-linux-hosts)。 + +## 配置负载均衡 + +[Keepalived](https://www.keepalived.org/) 提供 VRRP 实现,并允许您配置 Linux 机器使负载均衡,预防单点故障。[HAProxy](http://www.haproxy.org/) 提供可靠、高性能的负载均衡,能与 Keepalived 完美配合。 + +由于 `lb1` 和 `lb2` 上安装了 Keepalived 和 HAproxy,如果其中一个节点故障,虚拟 IP 地址(即浮动 IP 地址)将自动与另一个节点关联,使集群仍然可以正常运行,从而实现高可用。若有需要,也可以此为目的,添加更多安装 Keepalived 和 HAproxy 的节点。 + +先运行以下命令安装 Keepalived 和 HAproxy。 + +```bash +yum install keepalived haproxy psmisc -y +``` + +### HAproxy + +1. 在两台用于负载均衡的机器上运行以下命令以配置 Proxy(两台机器的 Proxy 配置相同): + + ```bash + vi /etc/haproxy/haproxy.cfg + ``` + +2. 以下是示例配置,供您参考(请注意 `server` 字段。请记住 `6443` 是 `apiserver` 端口): + + ```bash + global + log /dev/log local0 warning + chroot /var/lib/haproxy + pidfile /var/run/haproxy.pid + maxconn 4000 + user haproxy + group haproxy + daemon + + stats socket /var/lib/haproxy/stats + + defaults + log global + option httplog + option dontlognull + timeout connect 5000 + timeout client 50000 + timeout server 50000 + + frontend kube-apiserver + bind *:6443 + mode tcp + option tcplog + default_backend kube-apiserver + + backend kube-apiserver + mode tcp + option tcplog + option tcp-check + balance roundrobin + default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100 + server kube-apiserver-1 172.16.0.4:6443 check # Replace the IP address with your own. + server kube-apiserver-2 172.16.0.5:6443 check # Replace the IP address with your own. + server kube-apiserver-3 172.16.0.6:6443 check # Replace the IP address with your own. + ``` + +3. 保存文件并运行以下命令以重启 HAproxy。 + + ```bash + systemctl restart haproxy + ``` + +4. 使 HAproxy 在开机后自动运行: + + ```bash + systemctl enable haproxy + ``` + +5. 确保您在另一台机器 (`lb2`) 上也配置了 HAproxy。 + +### Keepalived + +两台机器上必须都安装 Keepalived,但在配置上略有不同。 + +1. 运行以下命令以配置 Keepalived。 + + ```bash + vi /etc/keepalived/keepalived.conf + ``` + +2. 以下是示例配置 (`lb1`),供您参考: + + ```bash + global_defs { + notification_email { + } + router_id LVS_DEVEL + vrrp_skip_check_adv_addr + vrrp_garp_interval 0 + vrrp_gna_interval 0 + } + + vrrp_script chk_haproxy { + script "killall -0 haproxy" + interval 2 + weight 2 + } + + vrrp_instance haproxy-vip { + state BACKUP + priority 100 + interface eth0 # Network card + virtual_router_id 60 + advert_int 1 + authentication { + auth_type PASS + auth_pass 1111 + } + unicast_src_ip 172.16.0.2 # The IP address of this machine + unicast_peer { + 172.16.0.3 # The IP address of peer machines + } + + virtual_ipaddress { + 172.16.0.10/24 # The VIP address + } + + track_script { + chk_haproxy + } + } + ``` + + {{< notice note >}} + + - 对于 `interface` 字段,您必须提供自己的网卡信息。您可以在机器上运行 `ifconfig` 以获取该值。 + + - 为 `unicast_src_ip` 提供的 IP 地址是您当前机器的 IP 地址。对于也安装了 HAproxy 和 Keepalived 进行负载均衡的其他机器,必须在字段 `unicast_peer` 中输入其 IP 地址。 + + {{}} + +3. 保存文件并运行以下命令以重启 Keepalived。 + + ```bash + systemctl restart keepalived + ``` + +4. 使 Keepalived 在开机后自动运行: + + ```bash + systemctl enable keepalived + ``` + +5. 确保您在另一台机器 (`lb2`) 上也配置了 Keepalived。 + +## 验证高可用 + +在开始创建 Kubernetes 集群之前,请确保已经测试了高可用。 + +1. 在机器 `lb1` 上,运行以下命令: + + ```bash + [root@lb1 ~]# ip a s + 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever + 2: eth0: mtu 1500 qdisc mq state UP group default qlen 1000 + link/ether 52:54:9e:27:38:c8 brd ff:ff:ff:ff:ff:ff + inet 172.16.0.2/24 brd 172.16.0.255 scope global noprefixroute dynamic eth0 + valid_lft 73334sec preferred_lft 73334sec + inet 172.16.0.10/24 scope global secondary eth0 # The VIP address + valid_lft forever preferred_lft forever + inet6 fe80::510e:f96:98b2:af40/64 scope link noprefixroute + valid_lft forever preferred_lft forever + ``` + +2. 如上图所示,虚拟 IP 地址已经成功添加。模拟此节点上的故障: + + ```bash + systemctl stop haproxy + ``` + +3. 再次检查浮动 IP 地址,您可以看到该地址在 `lb1` 上消失了。 + + ```bash + [root@lb1 ~]# ip a s + 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever + 2: eth0: mtu 1500 qdisc mq state UP group default qlen 1000 + link/ether 52:54:9e:27:38:c8 brd ff:ff:ff:ff:ff:ff + inet 172.16.0.2/24 brd 172.16.0.255 scope global noprefixroute dynamic eth0 + valid_lft 72802sec preferred_lft 72802sec + inet6 fe80::510e:f96:98b2:af40/64 scope link noprefixroute + valid_lft forever preferred_lft forever + ``` + +4. 理论上讲,若配置成功,该虚拟 IP 会漂移到另一台机器 (`lb2`) 上。在 `lb2` 上运行以下命令,这是预期的输出: + + ```bash + [root@lb2 ~]# ip a s + 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever + 2: eth0: mtu 1500 qdisc mq state UP group default qlen 1000 + link/ether 52:54:9e:3f:51:ba brd ff:ff:ff:ff:ff:ff + inet 172.16.0.3/24 brd 172.16.0.255 scope global noprefixroute dynamic eth0 + valid_lft 72690sec preferred_lft 72690sec + inet 172.16.0.10/24 scope global secondary eth0 # The VIP address + valid_lft forever preferred_lft forever + inet6 fe80::f67c:bd4f:d6d5:1d9b/64 scope link noprefixroute + valid_lft forever preferred_lft forever + ``` + +5. 如上所示,高可用已经配置成功。 + +## 使用 KubeKey 创建 Kubernetes 集群 + +[KubeKey](https://github.com/kubesphere/kubekey) 是一款用来创建 Kubernetes 集群的工具,高效而便捷。请按照以下步骤下载 KubeKey。 + +{{< tabs >}} + +{{< tab "如果您能正常访问 GitHub/Googleapis" >}} + +从 [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) 下载 KubeKey 或者直接使用以下命令。 + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "如果您访问 GitHub/Googleapis 受限" >}} + +首先运行以下命令,以确保您从正确的区域下载 KubeKey。 + +```bash +export KKZONE=cn +``` + +运行以下命令来下载 KubeKey: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +下载 KubeKey 之后,如果您将其转移到访问 Googleapis 受限的新机器上,请务必再次运行 `export KKZONE=cn`,然后继续执行以下步骤。 + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +通过以上命令,可以下载 KubeKey 的最新版本。您可以更改命令中的版本号来下载特定的版本。 + +{{}} + +使 `kk` 成为可执行文件: + +```bash +chmod +x kk +``` + +使用默认配置创建一个示例配置文件。此处以 Kubernetes v1.22.12 作为示例。 + +```bash +./kk create config --with-kubesphere v3.3.2 --with-kubernetes v1.22.12 +``` + +{{< notice note >}} + +- 安装 KubeSphere 3.3 的建议 Kubernetes 版本:v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如需使用边缘节点,推荐安装 v1.21.x。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.23.10。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../../installing-on-linux/introduction/kubekey/#支持矩阵)。 + +- 如果您没有在本步骤的命令中添加标志 `--with-kubesphere`,那么除非您使用配置文件中的 `addons` 字段进行安装,或者稍后使用 `./kk create cluster` 时再添加该标志,否则 KubeSphere 将不会被部署。 +- 如果您添加标志 `--with-kubesphere` 时未指定 KubeSphere 版本,则会安装最新版本的 KubeSphere。 + +{{}} + +## 部署 KubeSphere 和 Kubernetes + +运行上述命令后,将创建配置文件 `config-sample.yaml`。编辑文件以添加机器信息、配置负载均衡器等。 + +{{< notice note >}} + +如果自定义文件名,那么文件名可能会有所不同。 + +{{}} + +### config-sample.yaml 示例 + +```yaml +... +spec: + hosts: + - {name: master1, address: 172.16.0.4, internalAddress: 172.16.0.4, user: root, password: Testing123} + - {name: master2, address: 172.16.0.5, internalAddress: 172.16.0.5, user: root, password: Testing123} + - {name: master3, address: 172.16.0.6, internalAddress: 172.16.0.6, user: root, password: Testing123} + - {name: worker1, address: 172.16.0.7, internalAddress: 172.16.0.7, user: root, password: Testing123} + - {name: worker2, address: 172.16.0.8, internalAddress: 172.16.0.8, user: root, password: Testing123} + - {name: worker3, address: 172.16.0.9, internalAddress: 172.16.0.9, user: root, password: Testing123} + roleGroups: + etcd: + - master1 + - master2 + - master3 + control-plane: + - master1 + - master2 + - master3 + worker: + - worker1 + - worker2 + - worker3 + controlPlaneEndpoint: + domain: lb.kubesphere.local + address: 172.16.0.10 # The VIP address + port: 6443 +... +``` + +{{< notice note >}} + +- 请使用您自己的 VIP 地址来替换 `controlPlaneEndpoint.address` 的值。 +- 有关更多本配置文件中不同参数的信息,请参见[多节点安装](../../../installing-on-linux/introduction/multioverview/#2-edit-the-configuration-file)。 + +{{}} + +### 开始安装 + +完成配置之后,可以执行以下命令开始安装: + +```bash +./kk create cluster -f config-sample.yaml +``` + +### 验证安装 + +1. 运行以下命令以检查安装日志。 + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + +2. 看到以下信息时,表明高可用集群已成功创建。 + + ```bash + ##################################################### + ### Welcome to KubeSphere! ### + ##################################################### + + Console: http://172.16.0.4:30880 + Account: admin + Password: P@88w0rd + + NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. + + ##################################################### + https://kubesphere.io 2020-xx-xx xx:xx:xx + ##################################################### + ``` diff --git a/content/zh/docs/v3.4/installing-on-linux/introduction/_index.md b/content/zh/docs/v3.4/installing-on-linux/introduction/_index.md new file mode 100644 index 000000000..bd4e0a63d --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-linux/introduction/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "简介" +weight: 3100 + +_build: + render: false +--- diff --git a/content/zh/docs/v3.4/installing-on-linux/introduction/air-gapped-installation.md b/content/zh/docs/v3.4/installing-on-linux/introduction/air-gapped-installation.md new file mode 100644 index 000000000..50e967fce --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-linux/introduction/air-gapped-installation.md @@ -0,0 +1,587 @@ +--- +title: "离线安装" +keywords: '离线, 安装, KubeSphere' +description: '了解如何在离线环境下安装 KubeSphere 和 Kubernetes。' + +linkTitle: "离线安装" +weight: 3130 +--- + +KubeKey 是一个用于部署 Kubernetes 集群的开源轻量级工具。它提供了一种灵活、快速、便捷的方式来仅安装 Kubernetes/K3s,或同时安装 Kubernetes/K3s 和 KubeSphere,以及其他云原生插件。除此之外,它也是扩展和升级集群的有效工具。 + +KubeKey v2.1.0 版本新增了清单(manifest)和制品(artifact)的概念,为用户离线部署 Kubernetes 集群提供了一种解决方案。manifest 是一个描述当前 Kubernetes 集群信息和定义 artifact 制品中需要包含哪些内容的文本文件。在过去,用户需要准备部署工具,镜像 tar 包和其他相关的二进制文件,每位用户需要部署的 Kubernetes 版本和需要部署的镜像都是不同的。现在使用 KubeKey,用户只需使用清单 manifest 文件来定义将要离线部署的集群环境需要的内容,再通过该 manifest 来导出制品 artifact 文件即可完成准备工作。离线部署时只需要 KubeKey 和 artifact 就可快速、简单的在环境中部署镜像仓库和 Kubernetes 集群。 + +## 前提条件 + +要开始进行多节点安装,您需要参考如下示例准备至少三台主机。 + +| 主机 IP | 主机名称 | 角色 | +| ---------------- | ---- | ---------------- | +| 192.168.0.2 | node1 | 联网主机用于制作离线包 | +| 192.168.0.3 | node2 | 离线环境主节点 | +| 192.168.0.4 | node3 | 离线环境镜像仓库节点 | + +## 部署准备 + +1. 执行以下命令下载 KubeKey 并解压: + + {{< tabs >}} + + {{< tab "如果您能正常访问 GitHub/Googleapis" >}} + + 从 [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) 下载 KubeKey 或者直接运行以下命令。 + + ```bash + curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - + ``` + + {{}} + + {{< tab "如果您访问 GitHub/Googleapis 受限" >}} + + 首先运行以下命令,以确保您从正确的区域下载 KubeKey。 + + ```bash + export KKZONE=cn + ``` + + 运行以下命令来下载 KubeKey: + + ```bash + curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - + ``` + {{}} + + {{}} + +2. 在联网主机上执行以下命令,并复制示例中的 manifest 内容。关于更多信息,请参阅 [manifest-example](https://github.com/kubesphere/kubekey/blob/master/docs/manifest-example.md)。 + + ```bash + vim manifest.yaml + ``` + + ```yaml + --- + apiVersion: kubekey.kubesphere.io/v1alpha2 + kind: Manifest + metadata: + name: sample + spec: + arches: + - amd64 + operatingSystems: + - arch: amd64 + type: linux + id: centos + version: "7" + repository: + iso: + localPath: + url: https://github.com/kubesphere/kubekey/releases/download/v3.0.7/centos7-rpms-amd64.iso + - arch: amd64 + type: linux + id: ubuntu + version: "20.04" + repository: + iso: + localPath: + url: https://github.com/kubesphere/kubekey/releases/download/v3.0.7/ubuntu-20.04-debs-amd64.iso + kubernetesDistributions: + - type: kubernetes + version: v1.22.12 + components: + helm: + version: v3.9.0 + cni: + version: v0.9.1 + etcd: + version: v3.4.13 + ## For now, if your cluster container runtime is containerd, KubeKey will add a docker 20.10.8 container runtime in the below list. + ## The reason is KubeKey creates a cluster with containerd by installing a docker first and making kubelet connect the socket file of containerd which docker contained. + containerRuntimes: + - type: docker + version: 20.10.8 + crictl: + version: v1.24.0 + docker-registry: + version: "2" + harbor: + version: v2.5.3 + docker-compose: + version: v2.2.2 + images: + - registry.cn-beijing.aliyuncs.com/kubesphereio/kube-apiserver:v1.22.12 + - registry.cn-beijing.aliyuncs.com/kubesphereio/kube-controller-manager:v1.22.12 + - registry.cn-beijing.aliyuncs.com/kubesphereio/kube-proxy:v1.22.12 + - registry.cn-beijing.aliyuncs.com/kubesphereio/kube-scheduler:v1.22.12 + - registry.cn-beijing.aliyuncs.com/kubesphereio/pause:3.5 + - registry.cn-beijing.aliyuncs.com/kubesphereio/coredns:1.8.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/cni:v3.23.2 + - registry.cn-beijing.aliyuncs.com/kubesphereio/kube-controllers:v3.23.2 + - registry.cn-beijing.aliyuncs.com/kubesphereio/node:v3.23.2 + - registry.cn-beijing.aliyuncs.com/kubesphereio/pod2daemon-flexvol:v3.23.2 + - registry.cn-beijing.aliyuncs.com/kubesphereio/typha:v3.23.2 + - registry.cn-beijing.aliyuncs.com/kubesphereio/flannel:v0.12.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/provisioner-localpv:3.3.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/linux-utils:3.3.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/haproxy:2.3 + - registry.cn-beijing.aliyuncs.com/kubesphereio/nfs-subdir-external-provisioner:v4.0.2 + - registry.cn-beijing.aliyuncs.com/kubesphereio/k8s-dns-node-cache:1.15.12 + - registry.cn-beijing.aliyuncs.com/kubesphereio/ks-installer:v3.3.2 + - registry.cn-beijing.aliyuncs.com/kubesphereio/ks-apiserver:v3.3.2 + - registry.cn-beijing.aliyuncs.com/kubesphereio/ks-console:v3.3.2 + - registry.cn-beijing.aliyuncs.com/kubesphereio/ks-controller-manager:v3.3.2 + - registry.cn-beijing.aliyuncs.com/kubesphereio/ks-upgrade:v3.3.2 + - registry.cn-beijing.aliyuncs.com/kubesphereio/kubectl:v1.22.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/kubectl:v1.21.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/kubectl:v1.20.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/kubefed:v0.8.1 + - registry.cn-beijing.aliyuncs.com/kubesphereio/tower:v0.2.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/minio:RELEASE.2019-08-07T01-59-21Z + - registry.cn-beijing.aliyuncs.com/kubesphereio/mc:RELEASE.2019-08-07T23-14-43Z + - registry.cn-beijing.aliyuncs.com/kubesphereio/snapshot-controller:v4.0.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/nginx-ingress-controller:v1.1.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/defaultbackend-amd64:1.4 + - registry.cn-beijing.aliyuncs.com/kubesphereio/metrics-server:v0.4.2 + - registry.cn-beijing.aliyuncs.com/kubesphereio/redis:5.0.14-alpine + - registry.cn-beijing.aliyuncs.com/kubesphereio/haproxy:2.0.25-alpine + - registry.cn-beijing.aliyuncs.com/kubesphereio/alpine:3.14 + - registry.cn-beijing.aliyuncs.com/kubesphereio/openldap:1.3.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/netshoot:v1.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/cloudcore:v1.9.2 + - registry.cn-beijing.aliyuncs.com/kubesphereio/iptables-manager:v1.9.2 + - registry.cn-beijing.aliyuncs.com/kubesphereio/edgeservice:v0.2.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/gatekeeper:v3.5.2 + - registry.cn-beijing.aliyuncs.com/kubesphereio/openpitrix-jobs:v3.3.2 + - registry.cn-beijing.aliyuncs.com/kubesphereio/devops-apiserver:ks-v3.3.2 + - registry.cn-beijing.aliyuncs.com/kubesphereio/devops-controller:ks-v3.3.2 + - registry.cn-beijing.aliyuncs.com/kubesphereio/devops-tools:ks-v3.3.2 + - registry.cn-beijing.aliyuncs.com/kubesphereio/ks-jenkins:v3.3.0-2.319.1 + - registry.cn-beijing.aliyuncs.com/kubesphereio/inbound-agent:4.10-2 + - registry.cn-beijing.aliyuncs.com/kubesphereio/builder-base:v3.2.2 + - registry.cn-beijing.aliyuncs.com/kubesphereio/builder-nodejs:v3.2.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/builder-maven:v3.2.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/builder-maven:v3.2.1-jdk11 + - registry.cn-beijing.aliyuncs.com/kubesphereio/builder-python:v3.2.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/builder-go:v3.2.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/builder-go:v3.2.2-1.16 + - registry.cn-beijing.aliyuncs.com/kubesphereio/builder-go:v3.2.2-1.17 + - registry.cn-beijing.aliyuncs.com/kubesphereio/builder-go:v3.2.2-1.18 + - registry.cn-beijing.aliyuncs.com/kubesphereio/builder-base:v3.2.2-podman + - registry.cn-beijing.aliyuncs.com/kubesphereio/builder-nodejs:v3.2.0-podman + - registry.cn-beijing.aliyuncs.com/kubesphereio/builder-maven:v3.2.0-podman + - registry.cn-beijing.aliyuncs.com/kubesphereio/builder-maven:v3.2.1-jdk11-podman + - registry.cn-beijing.aliyuncs.com/kubesphereio/builder-python:v3.2.0-podman + - registry.cn-beijing.aliyuncs.com/kubesphereio/builder-go:v3.2.0-podman + - registry.cn-beijing.aliyuncs.com/kubesphereio/builder-go:v3.2.2-1.16-podman + - registry.cn-beijing.aliyuncs.com/kubesphereio/builder-go:v3.2.2-1.17-podman + - registry.cn-beijing.aliyuncs.com/kubesphereio/builder-go:v3.2.2-1.18-podman + - registry.cn-beijing.aliyuncs.com/kubesphereio/s2ioperator:v3.2.1 + - registry.cn-beijing.aliyuncs.com/kubesphereio/s2irun:v3.2.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/s2i-binary:v3.2.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/tomcat85-java11-centos7:v3.2.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/tomcat85-java11-runtime:v3.2.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/tomcat85-java8-centos7:v3.2.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/tomcat85-java8-runtime:v3.2.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/java-11-centos7:v3.2.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/java-8-centos7:v3.2.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/java-8-runtime:v3.2.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/java-11-runtime:v3.2.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/nodejs-8-centos7:v3.2.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/nodejs-6-centos7:v3.2.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/nodejs-4-centos7:v3.2.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/python-36-centos7:v3.2.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/python-35-centos7:v3.2.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/python-34-centos7:v3.2.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/python-27-centos7:v3.2.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/argocd:v2.3.3 + - registry.cn-beijing.aliyuncs.com/kubesphereio/argocd-applicationset:v0.4.1 + - registry.cn-beijing.aliyuncs.com/kubesphereio/dex:v2.30.2 + - registry.cn-beijing.aliyuncs.com/kubesphereio/redis:6.2.6-alpine + - registry.cn-beijing.aliyuncs.com/kubesphereio/configmap-reload:v0.5.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/prometheus:v2.34.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/prometheus-config-reloader:v0.55.1 + - registry.cn-beijing.aliyuncs.com/kubesphereio/prometheus-operator:v0.55.1 + - registry.cn-beijing.aliyuncs.com/kubesphereio/kube-rbac-proxy:v0.11.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/kube-state-metrics:v2.5.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/node-exporter:v1.3.1 + - registry.cn-beijing.aliyuncs.com/kubesphereio/alertmanager:v0.23.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/thanos:v0.25.2 + - registry.cn-beijing.aliyuncs.com/kubesphereio/grafana:8.3.3 + - registry.cn-beijing.aliyuncs.com/kubesphereio/kube-rbac-proxy:v0.8.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/notification-manager-operator:v1.4.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/notification-manager:v1.4.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/notification-tenant-sidecar:v3.2.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/elasticsearch-curator:v5.7.6 + - registry.cn-beijing.aliyuncs.com/kubesphereio/elasticsearch-oss:6.8.22 + - registry.cn-beijing.aliyuncs.com/kubesphereio/fluentbit-operator:v0.13.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/docker:19.03 + - registry.cn-beijing.aliyuncs.com/kubesphereio/fluent-bit:v1.8.11 + - registry.cn-beijing.aliyuncs.com/kubesphereio/log-sidecar-injector:1.1 + - registry.cn-beijing.aliyuncs.com/kubesphereio/filebeat:6.7.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/kube-events-operator:v0.4.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/kube-events-exporter:v0.4.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/kube-events-ruler:v0.4.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/kube-auditing-operator:v0.2.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/kube-auditing-webhook:v0.2.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/pilot:1.11.1 + - registry.cn-beijing.aliyuncs.com/kubesphereio/proxyv2:1.11.1 + - registry.cn-beijing.aliyuncs.com/kubesphereio/jaeger-operator:1.27 + - registry.cn-beijing.aliyuncs.com/kubesphereio/jaeger-agent:1.27 + - registry.cn-beijing.aliyuncs.com/kubesphereio/jaeger-collector:1.27 + - registry.cn-beijing.aliyuncs.com/kubesphereio/jaeger-query:1.27 + - registry.cn-beijing.aliyuncs.com/kubesphereio/jaeger-es-index-cleaner:1.27 + - registry.cn-beijing.aliyuncs.com/kubesphereio/kiali-operator:v1.38.1 + - registry.cn-beijing.aliyuncs.com/kubesphereio/kiali:v1.38 + - registry.cn-beijing.aliyuncs.com/kubesphereio/busybox:1.31.1 + - registry.cn-beijing.aliyuncs.com/kubesphereio/nginx:1.14-alpine + - registry.cn-beijing.aliyuncs.com/kubesphereio/wget:1.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/hello:plain-text + - registry.cn-beijing.aliyuncs.com/kubesphereio/wordpress:4.8-apache + - registry.cn-beijing.aliyuncs.com/kubesphereio/hpa-example:latest + - registry.cn-beijing.aliyuncs.com/kubesphereio/fluentd:v1.4.2-2.0 + - registry.cn-beijing.aliyuncs.com/kubesphereio/perl:latest + - registry.cn-beijing.aliyuncs.com/kubesphereio/examples-bookinfo-productpage-v1:1.16.2 + - registry.cn-beijing.aliyuncs.com/kubesphereio/examples-bookinfo-reviews-v1:1.16.2 + - registry.cn-beijing.aliyuncs.com/kubesphereio/examples-bookinfo-reviews-v2:1.16.2 + - registry.cn-beijing.aliyuncs.com/kubesphereio/examples-bookinfo-details-v1:1.16.2 + - registry.cn-beijing.aliyuncs.com/kubesphereio/examples-bookinfo-ratings-v1:1.16.3 + - registry.cn-beijing.aliyuncs.com/kubesphereio/scope:1.13.0 + ``` + + {{< notice note >}} + + - 若需要导出的 artifact 文件中包含操作系统依赖文件(如:conntarck、chrony 等),可在 **operationSystem** 元素中的 **.repostiory.iso.url** 中配置相应的 ISO 依赖文件下载地址或者提前下载 ISO 包到本地在 **localPath** 里填写本地存放路径并删除 **url** 配置项。 + + - 开启 **harbor** 和 **docker-compose** 配置项,为后面通过 KubeKey 自建 harbor 仓库推送镜像使用。 + + - 默认创建的 manifest 里面的镜像列表从 **docker.io** 获取。 + + - 可根据实际情况修改 **manifest-sample.yaml** 文件的内容,用于之后导出期望的 artifact 文件。 + + - 您可以访问 https://github.com/kubesphere/kubekey/releases/tag/v3.0.7 下载 ISO 文件。 + + {{}} + +3. (可选)如果您已经拥有集群,那么可以在已有集群中执行 KubeKey 命令生成 manifest 文件,并参照步骤 2 中的示例配置 manifest 文件内容。 + ```bash + ./kk create manifest + ``` +4. 导出制品 artifact。 + + {{< tabs >}} + + {{< tab "如果您能正常访问 GitHub/Googleapis" >}} + + 执行以下命令: + + ```bash + ./kk artifact export -m manifest-sample.yaml -o kubesphere.tar.gz + ``` + + {{}} + + {{< tab "如果您访问 GitHub/Googleapis 受限" >}} + + 依次运行以下命令: + + ```bash + export KKZONE=cn + + ./kk artifact export -m manifest-sample.yaml -o kubesphere.tar.gz + ``` + + {{}} + + {{}} + + {{< notice note >}} + + 制品(artifact)是一个根据指定的 manifest 文件内容导出的包含镜像 tar 包和相关二进制文件的 tgz 包。在 KubeKey 初始化镜像仓库、创建集群、添加节点和升级集群的命令中均可指定一个 artifact,KubeKey 将自动解包该 artifact 并在执行命令时直接使用解包出来的文件。 + + - 导出时请确保网络连接正常。 + + - KubeKey 会解析镜像列表中的镜像名,若镜像名中的镜像仓库需要鉴权信息,可在 manifest 文件中的 **.registry.auths** 字段中进行配置。 + + {{}} + +## 离线安装集群 + +1. 将下载的 KubeKey 和制品 artifact 通过 U 盘等介质拷贝至离线环境安装节点。 + +2. 执行以下命令创建离线集群配置文件: + + ```bash + ./kk create config --with-kubesphere v3.3.2 --with-kubernetes v1.22.12 -f config-sample.yaml + ``` + +3. 执行以下命令修改配置文件: + + ```bash + vim config-sample.yaml + ``` + + {{< notice note >}} + + - 按照实际离线环境配置修改节点信息。 + - 必须指定 `registry` 仓库部署节点(用于 KubeKey 部署自建 Harbor 仓库)。 + - `registry` 里必须指定 `type` 类型为 `harbor`,否则默认安装 docker registry。 + + {{}} + + ```yaml + apiVersion: kubekey.kubesphere.io/v1alpha2 + kind: Cluster + metadata: + name: sample + spec: + hosts: + - {name: master, address: 192.168.0.3, internalAddress: 192.168.0.3, user: root, password: ""} + - {name: node1, address: 192.168.0.4, internalAddress: 192.168.0.4, user: root, password: ""} + + roleGroups: + etcd: + - master + control-plane: + - master + worker: + - node1 + # 如需使用 kk 自动部署镜像仓库,请设置该主机组 (建议仓库与集群分离部署,减少相互影响) + registry: + - node1 + controlPlaneEndpoint: + ## Internal loadbalancer for apiservers + # internalLoadbalancer: haproxy + + domain: lb.kubesphere.local + address: "" + port: 6443 + kubernetes: + version: v1.22.12 + clusterName: cluster.local + network: + plugin: calico + kubePodsCIDR: 10.233.64.0/18 + kubeServiceCIDR: 10.233.0.0/18 + ## multus support. https://github.com/k8snetworkplumbingwg/multus-cni + multusCNI: + enabled: false + registry: + # 如需使用 kk 部署 harbor, 可将该参数设置为 harbor,不设置该参数且需使用 kk 创建容器镜像仓库,将默认使用docker registry。 + type: harbor + # 如使用 kk 部署的 harbor 或其他需要登录的仓库,可设置对应仓库的auths,如使用 kk 创建的 docker registry 仓库,则无需配置该参数。 + # 注意:如使用 kk 部署 harbor,该参数请于 harbor 启动后设置。 + #auths: + # "dockerhub.kubekey.local": + # username: admin + # password: Harbor12345 + # 设置集群部署时使用的私有仓库 + privateRegistry: "" + namespaceOverride: "" + registryMirrors: [] + insecureRegistries: [] + addons: [] + ``` + +4. 执行以下命令安装镜像仓库: + + ```bash + ./kk init registry -f config-sample.yaml -a kubesphere.tar.gz + ``` + + {{< notice note >}} + + 命令中的参数解释如下: + + - **config-sample.yaml** 指离线环境集群的配置文件。 + + - **kubesphere.tar.gz** 指源集群打包出来的 tar 包镜像。 + + {{}} + +5. 创建 Harbor 项目。 + + {{< notice note >}} + + 由于 Harbor 项目存在访问控制(RBAC)的限制,即只有指定角色的用户才能执行某些操作。如果您未创建项目,则镜像不能被推送到 Harbor。Harbor 中有两种类型的项目: + + - 公共项目(Public):任何用户都可以从这个项目中拉取镜像。 + - 私有项目(Private):只有作为项目成员的用户可以拉取镜像。 + + Harbor 管理员账号:**admin**,密码:**Harbor12345**。Harbor 安装文件在 **/opt/harbor** , 如需运维 Harbor,可至该目录下。 + + {{}} + + 方法 1:执行脚本创建 Harbor 项目。 + + a. 执行以下命令下载指定脚本初始化 Harbor 仓库: + + ```bash + curl -O https://raw.githubusercontent.com/kubesphere/ks-installer/master/scripts/create_project_harbor.sh + ``` + + b. 执行以下命令修改脚本配置文件: + + ```bash + vim create_project_harbor.sh + ``` + + ```yaml + #!/usr/bin/env bash + + # Copyright 2018 The KubeSphere Authors. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + url="https://dockerhub.kubekey.local" #修改url的值为https://dockerhub.kubekey.local + user="admin" + passwd="Harbor12345" + + harbor_projects=(library + kubesphereio + kubesphere + calico + coredns + openebs + csiplugin + minio + mirrorgooglecontainers + osixia + prom + thanosio + jimmidyson + grafana + elastic + istio + jaegertracing + jenkins + weaveworks + openpitrix + joosthofman + nginxdemos + fluent + kubeedge + ) + + for project in "${harbor_projects[@]}"; do + echo "creating $project" + curl -u "${user}:${passwd}" -X POST -H "Content-Type: application/json" "${url}/api/v2.0/projects" -d "{ \"project_name\": \"${project}\", \"public\": true}" -k #curl命令末尾加上 -k + done + + ``` + + {{< notice note >}} + + - 修改 **url** 的值为 **https://dockerhub.kubekey.local**。 + + - 需要指定仓库项目名称和镜像列表的项目名称保持一致。 + + - 脚本末尾 `curl` 命令末尾加上 `-k`。 + + {{}} + + c. 执行以下命令创建 Harbor 项目: + + ```bash + chmod +x create_project_harbor.sh + ``` + + ```bash + ./create_project_harbor.sh + ``` + + 方法 2:登录 Harbor 仓库创建项目。将项目设置为**公开**以便所有用户都能够拉取镜像。关于如何创建项目,请参阅[创建项目](https://goharbor.io/docs/1.10/working-with-projects/create-projects/)。 + + ![harbor-login-7](/images/docs/v3.3/zh-cn/appstore/built-in-apps/deploy-harbor-on-ks/harbor-login-7.PNG) + +6. 再次执行以下命令修改集群配置文件: + + ```bash + vim config-sample.yaml + ``` + + ```yaml + ... + registry: + type: harbor + auths: + "dockerhub.kubekey.local": + username: admin + password: Harbor12345 + privateRegistry: "dockerhub.kubekey.local" + namespaceOverride: "kubesphereio" + registryMirrors: [] + insecureRegistries: [] + addons: [] + ``` + + {{< notice note >}} + + - 新增 **auths** 配置增加 **dockerhub.kubekey.local** 和账号密码。 + - **privateRegistry** 增加 **dockerhub.kubekey.local**。 + - **namespaceOverride** 增加 **kubesphereio**。 + + {{}} + +7. 执行以下命令安装 KubeSphere 集群: + + ```bash + ./kk create cluster -f config-sample.yaml -a kubesphere.tar.gz --with-packages + ``` + + 参数解释如下: + + - **config-sample.yaml**:离线环境集群的配置文件。 + - **kubesphere.tar.gz**:源集群打包出来的 tar 包镜像。 + - **--with-packages**:若需要安装操作系统依赖,需指定该选项。 + +8. 执行以下命令查看集群状态: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + 安装完成后,您会看到以下内容: + + ```bash + ************************************************** + ##################################################### + ### Welcome to KubeSphere! ### + ##################################################### + + Console: http://192.168.0.3:30880 + Account: admin + Password: P@88w0rd + + NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 1. Please change the default password after login. + + ##################################################### + https://kubesphere.io 2022-02-28 23:30:06 + ##################################################### + ``` + +9. 通过 `http://{IP}:30880` 使用默认帐户和密码 `admin/P@88w0rd` 访问 KubeSphere 的 Web 控制台。 + + ![kubesphere-login](/images/docs/v3.3/zh-cn/upgrade/air-gapped-upgrade-with-ks-installer/kubesphere-login.PNG) + + + {{< notice note >}} + + 要访问控制台,请确保在您的安全组中打开端口 30880。 + + {{}} diff --git a/content/zh/docs/v3.4/installing-on-linux/introduction/intro.md b/content/zh/docs/v3.4/installing-on-linux/introduction/intro.md new file mode 100644 index 000000000..4ff35ff15 --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-linux/introduction/intro.md @@ -0,0 +1,70 @@ +--- +title: "概述" +keywords: 'Kubernetes, KubeSphere, Linux, 安装' +description: '浏览本章的概述,包括安装准备,安装工具和方法以及存储设置。' +linkTitle: "概述" +weight: 3110 +--- + +KubeSphere 是 [GitHub](https://github.com/kubesphere) 上的一个开源项目,是成千上万名社区用户的聚集地。很多用户都在使用 KubeSphere 运行工作负载。对于在 Linux 上的安装,KubeSphere 既可以部署在云端,也可以部署在本地环境中,例如 AWS EC2、Azure VM 和裸机等。 + +KubeSphere 为用户提供轻量级安装程序 [KubeKey](https://github.com/kubesphere/kubekey)(该程序支持安装 Kubernetes、KubeSphere 及相关插件),安装过程简单而友好。KubeKey 不仅能帮助用户在线创建集群,还能作为离线安装解决方案。 + +以下是可用的安装选项: + +- [All-in-One](../../../quick-start/all-in-one-on-linux/):在单个节点上安装 KubeSphere(仅为让用户快速熟悉 KubeSphere)。 +- [多节点安装](../multioverview/):在多个节点上安装 KubeSphere(用于测试或开发)。 +- [在 Linux 上离线安装](../air-gapped-installation/):将 KubeSphere 的所有镜像打包(便于在 Linux 上进行离线安装)。 +- [高可用安装](../../../installing-on-linux/high-availability-configurations/ha-configuration/):安装具有多个节点的高可用 KubeSphere 集群,该集群用于生产环境。 +- 最小化安装:仅安装 KubeSphere 所需的最少系统组件。以下是最低资源要求: + - 2 个 CPU + - 4 GB 运行内存 + - 40 GB 存储空间 +- [全家桶安装](../../../pluggable-components/):安装 KubeSphere 的所有可用系统组件,例如 DevOps、服务网格、告警等。 + +{{< notice note >}} + +并非所有选项都相互排斥,例如,您可以在离线环境中使用最小化安装将 KubeSphere 部署在多个节点上。 + +{{}} + +如果您已有 Kubernetes 集群,请参阅[在 Kubernetes 上安装 KubeSphere 概述](../../../installing-on-kubernetes/introduction/overview/)。 + +## 安装前 + +- 由于需要从互联网上拉取镜像,因此必须在联网环境下进行。否则,需要改用[离线环境安装 KubeSphere](../air-gapped-installation/)。 +- 对于 All-in-One,唯一的节点既是主节点,也是工作节点。 +- 对于多节点安装,需要在配置文件中提供主机信息。 +- 在安装之前,请参见[端口要求](../port-firewall/)。 + +## KubeKey + +[KubeKey](https://github.com/kubesphere/kubekey) 为集群的安装和配置提供了一种有效的方法。您可以使用它来创建、扩缩和升级 Kubernetes 集群。您也可以在设置集群时使用 KubeKey 安装云原生组件(YAML 或 Chart)。有关更多信息,请参见 [KubeKey](../kubekey/)。 + +## 快速安装用于开发和测试 + +自 v2.1.0 以来,KubeSphere 已经解耦了一些组件。默认情况下,KubeKey 仅安装必要的组件,这样安装速度快,资源消耗也最少。如果要启用增强的可插拔功能,请参见[启用可插拔组件](../../../pluggable-components/)了解详细信息。 + +快速安装 KubeSphere 仅用于开发或测试,因为默认情况下它使用了基于 [openEBS](https://openebs.io/) 的 [Local Volume](https://kubernetes.io/zh/docs/concepts/storage/volumes/#local) 提供储存服务。如果需要在生产环境安装,请参见[高可用配置安装](../../../installing-on-linux/high-availability-configurations/ha-configuration/)。 + +## 存储配置 + +您可以在 KubeSphere 安装前或安装后配置持久化储存服务。同时,KubeSphere 支持各种开源存储解决方案(例如 Ceph 和 GlusterFS)以及商业存储产品。有关在安装 KubeSphere 之前配置存储类型的详细说明,请参考[持久化存储配置](../../../installing-on-linux/persistent-storage-configurations/understand-persistent-storage/)。 + +有关如何在安装 KubeSphere 之后配置存储类型,请参考[存储类](../../../cluster-administration/storageclass/)。 + +## 集群运维 + +### 添加新节点 + +通过 KubeKey,您可以在安装后增加节点数量,以满足更高的资源需求,尤其是在生产环境中。有关更多信息,请参见[添加新节点](../../../installing-on-linux/cluster-operation/add-new-nodes/)。 + +### 删除节点 + +您需要清空节点负载,然后再删除节点。有关更多信息,请参见[删除节点](../../cluster-operation/remove-nodes/)。 + +## 卸载 + +卸载 KubeSphere 意味着将其从您的机器上移除,该操作不可逆,请谨慎操作。 + +有关更多信息,请参见[卸载 KubeSphere 和 Kubernetes](../../../installing-on-linux/uninstall-kubesphere-and-kubernetes/)。 diff --git a/content/zh/docs/v3.4/installing-on-linux/introduction/kubekey.md b/content/zh/docs/v3.4/installing-on-linux/introduction/kubekey.md new file mode 100644 index 000000000..b9a8a710b --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-linux/introduction/kubekey.md @@ -0,0 +1,90 @@ +--- +title: "KubeKey" +keywords: 'KubeKey,安装,KubeSphere' +description: '了解 KubeKey 概念以及 KubeKey 如何帮您创建、扩缩和升级 Kubernetes 集群。' +linkTitle: "KubeKey" +weight: 3120 +--- + +[KubeKey](https://github.com/kubesphere/kubekey)(由 Go 语言开发)是一种全新的安装工具,替代了以前使用的基于 ansible 的安装程序。KubeKey 为您提供灵活的安装选择,您可以仅安装 Kubernetes,也可以同时安装 Kubernetes 和 KubeSphere。 + +KubeKey 的几种使用场景: + +- 仅安装 Kubernetes; +- 使用一个命令同时安装 Kubernetes 和 KubeSphere; +- 扩缩集群; +- 升级集群; +- 安装 Kubernetes 相关的插件(Chart 或 YAML)。 + +## KubeKey 如何运作 + +下载 KubeKey 之后,您可以使用可执行文件 `kk` 来进行不同的操作。无论您是使用它来创建,扩缩还是升级集群,都必须事先使用 `kk` 准备配置文件。此配置文件包含集群的基本参数,例如主机信息、网络配置(CNI 插件以及 Pod 和 Service CIDR)、仓库镜像、插件(YAML 或 Chart)和可插拔组件选项(如果您安装 KubeSphere)。有关更多信息,请参见[示例配置文件](https://github.com/kubesphere/kubekey/blob/release-2.2/docs/config-example.md)。 + +准备好配置文件后,您需要使用 `./kk` 命令以及不同的标志来进行不同的操作。这之后,KubeKey 会自动安装 Docker,并拉取所有必要的镜像以进行安装。安装完成后,您还可以检查安装日志。 + +## 为什么选择 KubeKey + +- 以前基于 ansible 的安装程序依赖于许多软件,例如 Python。KubeKey 由 Go 语言开发,可以消除在多种环境中出现的问题,确保成功安装。 +- KubeKey 支持多种安装选项,例如 [All-in-One](../../../quick-start/all-in-one-on-linux/)、[多节点安装](../multioverview/)以及[离线安装](../air-gapped-installation/)。 +- KubeKey 使用 Kubeadm 在节点上尽可能多地并行安装 Kubernetes 集群,使安装更简便,提高效率。与旧版的安装程序相比,它极大地节省了安装时间。 +- KubeKey 提供[内置高可用模式](../../high-availability-configurations/internal-ha-configuration/),支持一键安装高可用 Kubernetes 集群。 +- KubeKey 旨在将集群作为对象来进行安装,即 CaaO。 + +## 下载 KubeKey + +{{< tabs >}} + +{{< tab "如果您能正常访问 GitHub/Googleapis" >}} + +从 [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) 下载 KubeKey 或者直接运行以下命令。 + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "如果您访问 GitHub/Googleapis 受限" >}} + +首先运行以下命令,以确保您从正确的区域下载 KubeKey。 + +```bash +export KKZONE=cn +``` + +运行以下命令来下载 KubeKey: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +下载 KubeKey 之后,如果您将其转移到访问 Googleapis 受限的新机器上,请务必再次运行 `export KKZONE=cn`,然后继续执行以下步骤。 + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +通过以上的命令,可以下载 KubeKey 的最新版本。您可以更改命令中的版本号来下载特定的版本。 + +{{}} + +## 支持矩阵 + +若需使用 KubeKey 来安装 Kubernetes 和 KubeSphere 3.3,请参见下表以查看所有受支持的 Kubernetes 版本。 + +| KubeSphere 版本 | 受支持的 Kubernetes 版本 | +| ------------------ | ------------------------------------------------------------ | +| v3.3 | v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x | + +{{< notice note >}} + +- 您也可以运行 `./kk version --show-supported-k8s`,查看能使用 KubeKey 安装的所有受支持的 Kubernetes 版本。 +- 能使用 KubeKey 安装的 Kubernetes 版本与 KubeSphere 3.3 支持的 Kubernetes 版本不同。如需[在现有 Kubernetes 集群上安装 KubeSphere 3.3](../../../installing-on-kubernetes/introduction/overview/),您的 Kubernetes 版本必须为 v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。 +- 带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如果您需要使用 KubeEdge,为了避免兼容性问题,建议安装 v1.21.x 版本的 Kubernetes。 +{{}} \ No newline at end of file diff --git a/content/zh/docs/v3.4/installing-on-linux/introduction/multioverview.md b/content/zh/docs/v3.4/installing-on-linux/introduction/multioverview.md new file mode 100644 index 000000000..147ebf274 --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-linux/introduction/multioverview.md @@ -0,0 +1,354 @@ +--- +title: "多节点安装" +keywords: '多节点, 安装, KubeSphere' +description: '了解在多节点集群上安装 KubeSphere 和 Kubernetes 的一般步骤。' +linkTitle: "多节点安装" +weight: 3120 +--- + +在生产环境中,由于单节点集群资源有限、计算能力不足,无法满足大部分需求,因此不建议在处理大规模数据时使用单节点集群。此外,单节点集群只有一个节点,因此也不具有高可用性。相比之下,在应用程序部署和分发方面,多节点架构是最常见的首选架构。 + +本节概述了多节点安装,包括概念、[KubeKey](https://github.com/kubesphere/kubekey/) 和操作步骤。有关高可用安装的信息,请参考[高可用配置](../../../installing-on-linux/high-availability-configurations/ha-configuration/)、[在公有云上安装](../../../installing-on-linux/public-cloud/install-kubesphere-on-azure-vms/)和[在本地环境中安装](../../../installing-on-linux/on-premises/install-kubesphere-on-bare-metal/)。 + +## 视频演示 + + + +## 概念 + +多节点集群由至少一个主节点和一个工作节点组成。您可以使用任何节点作为**任务机**来执行安装任务,也可以在安装之前或之后根据需要新增节点(例如,为了实现高可用性)。 + +- **Control plane node**:主节点,通常托管控制平面,控制和管理整个系统。 + +- **Worker node**:工作节点,运行部署在工作节点上的实际应用程序。 + +## 步骤 1:准备 Linux 主机 + +请参见下表列出的硬件和操作系统要求。在本教程所演示多节点安装示例中,您需要按照下列要求准备至少三台主机。如果您节点的资源充足,也可以将 [KubeSphere 容器平台](https://kubesphere.com.cn/)安装在两个节点上。 + +### 系统要求 + +| 系统 | 最低要求(每个节点) | +| ------------------------------------------------------------ | -------------------------------- | +| **Ubuntu** *16.04,18.04,20.04* | CPU:2 核,内存:4 G,硬盘:40 G | +| **Debian** *Buster,Stretch* | CPU:2 核,内存:4 G,硬盘:40 G | +| **CentOS** *7*.x | CPU:2 核,内存:4 G,硬盘:40 G | +| **Red Hat Enterprise Linux** *7* | CPU:2 核,内存:4 G,硬盘:40 G | +| **SUSE Linux Enterprise Server** *15* **/openSUSE Leap** *15.2* | CPU:2 核,内存:4 G,硬盘:40 G | + +{{< notice note >}} + +- `/var/lib/docker` 路径主要用于存储容器数据,在使用和操作过程中数据量会逐渐增加。因此,在生产环境中,建议为 `/var/lib/docker` 单独挂载一个硬盘。 + + +- CPU 必须为 x86_64,暂时不支持 Arm 架构的 CPU。 + +{{}} + +### 节点要求 + +- 所有节点必须都能通过 `SSH` 访问。 +- 所有节点时间同步。 +- 所有节点都应使用 `sudo`/`curl`/`openssl`/`tar`。 + +### 容器运行时 + +您的集群必须有一个可用的容器运行时。如果您使用 KubeKey 搭建集群,KubeKey 会默认安装最新版本的 Docker。或者,您也可以在创建集群前手动安装 Docker 或其他容器运行时。 + +{{< content "common/container-runtime-requirements.md" >}} + + +### 依赖项要求 + +KubeKey 可以一同安装 Kubernetes 和 KubeSphere。根据要安装的 Kubernetes 版本,需要安装的依赖项可能会不同。您可以参考下表,查看是否需要提前在节点上安装相关依赖项。 + +| 依赖项 | Kubernetes 版本 ≥ 1.18 | Kubernetes 版本 < 1.18 | +| ----------- | ---------------------- | ---------------------- | +| `socat` | 必须 | 可选,但建议安装 | +| `conntrack` | 必须 | 可选,但建议安装 | +| `ebtables` | 可选,但建议安装 | 可选,但建议安装 | +| `ipset` | 可选,但建议安装 | 可选,但建议安装 | + +### 网络和 DNS 要求 + +{{< content "common/network-requirements.md" >}} + +{{< notice tip >}} + +- 建议您使用干净的操作系统(即不安装任何其他软件)。否则,可能会产生冲突。 +- 如果您从 `dockerhub.io` 下载镜像时遇到问题,建议提前准备仓库的镜像地址(即加速器)。请参见[为安装配置加速器](../../../faq/installation/configure-booster/)或[为 Docker Daemon 配置仓库镜像](https://docs.docker.com/registry/recipes/mirror/#configure-the-docker-daemon)。 + +{{}} + +本示例包括以下三台主机,其中主节点充当任务机。 + +| 主机 IP | 主机名 | 角色 | +| ----------- | ------ | ------------ | +| 192.168.0.2 | control plane | control plane, etcd | +| 192.168.0.3 | node1 | worker | +| 192.168.0.4 | node2 | worker | + +## 步骤 2:下载 KubeKey + +请按照以下步骤下载 [KubeKey](../kubekey)。 + +{{< tabs >}} + +{{< tab "如果您能正常访问 GitHub/Googleapis" >}} + +从 [GitHub 发布页面](https://github.com/kubesphere/kubekey/releases)下载 KubeKey 或直接使用以下命令。 + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "如果您访问 GitHub/Googleapis 受限" >}} + +先执行以下命令以确保您从正确的区域下载 KubeKey。 + +```bash +export KKZONE=cn +``` + +执行以下命令下载 KubeKey: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +下载 KubeKey 后,如果您将其传输至访问 Googleapis 同样受限的新机器,请您在执行以下步骤之前务必再次执行 `export KKZONE=cn` 命令。 + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +执行以上命令会下载最新版 KubeKey,您可以修改命令中的版本号下载指定版本。 + +{{}} + +为 `kk` 添加可执行权限: + +```bash +chmod +x kk +``` + +## 步骤 3:创建集群 + +对于多节点安装,您需要通过指定配置文件来创建集群。 + +### 1. 创建示例配置文件 + +命令如下: + +```bash +./kk create config [--with-kubernetes version] [--with-kubesphere version] [(-f | --file) path] +``` + +{{< notice note >}} + +- 安装 KubeSphere 3.3 的建议 Kubernetes 版本:v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如需使用边缘节点,推荐安装 v1.21.x。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.23.10。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../../installing-on-linux/introduction/kubekey/#支持矩阵)。 + +- 如果您在此步骤的命令中不添加标志 `--with-kubesphere`,则不会部署 KubeSphere,只能使用配置文件中的 `addons` 字段安装,或者在您后续使用 `./kk create cluster` 命令时再次添加这个标志。 + +- 如果您添加标志 `--with-kubesphere` 时不指定 KubeSphere 版本,则会安装最新版本的 KubeSphere。 + +{{}} + +以下是一些示例,供您参考: + +- 您可以使用默认配置创建示例配置文件,也可以为该文件指定其他文件名或其他文件夹。 + + ```bash + ./kk create config [-f ~/myfolder/abc.yaml] + ``` + +- 您可以指定要安装的 KubeSphere 版本(例如 `--with-kubesphere v3.3.2`)。 + + ```bash + ./kk create config --with-kubesphere [version] + ``` + +### 2. 编辑配置文件 + +如果您不更改名称,那么将创建默认文件 `config-sample.yaml`。编辑文件,以下是多节点集群(具有一个主节点)配置文件的示例。 + +{{< notice note >}} + +若要自定义 Kubernetes 相关参数,请参考 [Kubernetes 集群配置](../../../installing-on-linux/introduction/vars/)。 + +{{}} + +```yaml +spec: + hosts: + - {name: master, address: 192.168.0.2, internalAddress: 192.168.0.2, user: ubuntu, password: Testing123} + - {name: node1, address: 192.168.0.3, internalAddress: 192.168.0.3, user: ubuntu, password: Testing123} + - {name: node2, address: 192.168.0.4, internalAddress: 192.168.0.4, user: ubuntu, password: Testing123} + roleGroups: + etcd: + - master + control-plane: + - master + worker: + - node1 + - node2 + controlPlaneEndpoint: + domain: lb.kubesphere.local + address: "" + port: 6443 +``` + +#### 主机 + +请参照上方示例在 `hosts` 下列出您的所有机器并添加详细信息。 + +`name`:实例的主机名。 + +`address`:任务机和其他实例通过 SSH 相互连接所使用的 IP 地址。根据您的环境,可以是公有 IP 地址或私有 IP 地址。例如,一些云平台为每个实例提供一个公有 IP 地址,用于通过 SSH 访问。在这种情况下,您可以在该字段填入这个公有 IP 地址。 + +`internalAddress`:实例的私有 IP 地址。 + +此外,您必须提供用于连接至每台实例的登录信息,以下示例供您参考: + +- 使用密码登录示例: + + ```yaml + hosts: + - {name: master, address: 192.168.0.2, internalAddress: 192.168.0.2, port: 8022, user: ubuntu, password: Testing123} + ``` + + {{< notice note >}} + + 在本教程中,端口 `22` 是 SSH 的默认端口,因此您无需将它添加至该 YAML 文件中。否则,您需要在 IP 地址后添加对应端口号,如上所示。 + + {{}} + +- 默认 root 用户示例: + + ```yaml + hosts: + - {name: master, address: 192.168.0.2, internalAddress: 192.168.0.2, password: Testing123} + ``` + +- 使用 SSH 密钥的无密码登录示例: + + ```yaml + hosts: + - {name: master, address: 192.168.0.2, internalAddress: 192.168.0.2, privateKeyPath: "~/.ssh/id_rsa"} + ``` + +{{< notice tip >}} + +- 在安装 KubeSphere 之前,您可以使用 `hosts` 下提供的信息(例如 IP 地址和密码)通过 SSH 的方式测试任务机和其他实例之间的网络连接。 +- 在安装前,请确保端口 `6443` 没有被其他服务占用,否则在安装时会产生冲突(`6443` 为 API 服务器的默认端口)。 + +{{}} + +#### roleGroups + +- `etcd`:etcd 节点名称 +- `control-plane`:主节点名称 +- `worker`:工作节点名称 + +#### controlPlaneEndpoint(仅适用于高可用安装) + +您需要在 `controlPlaneEndpoint` 部分为高可用集群提供外部负载均衡器信息。当且仅当您安装多个主节点时,才需要准备和配置外部负载均衡器。请注意,`config-sample.yaml` 中的地址和端口应缩进两个空格,`address` 应为您的负载均衡器地址。有关详细信息,请参见[高可用配置](../../../installing-on-linux/high-availability-configurations/ha-configuration/)。 + +#### addons + +您可以在 `config-sample.yaml` 的 `addons` 字段下指定存储,从而自定义持久化存储插件,例如 NFS 客户端、Ceph RBD、GlusterFS 等。有关更多信息,请参见[持久化存储配置](../../../installing-on-linux/persistent-storage-configurations/understand-persistent-storage/)。 + +KubeSphere 会默认安装 [OpenEBS](https://openebs.io/),为开发和测试环境配置 [LocalPV](https://kubernetes.io/docs/concepts/storage/volumes/#local),方便新用户使用。在本多节点安装示例中,使用了默认存储类型(本地存储卷)。对于生产环境,您可以使用 Ceph/GlusterFS/CSI 或者商业存储产品作为持久化存储解决方案。 + +{{< notice tip >}} + +- 您可以编辑配置文件,启用多集群功能。有关更多信息,请参见[多集群管理](../../../multicluster-management/)。 +- 您也可以选择要安装的组件。有关更多信息,请参见[启用可插拔组件](../../../pluggable-components/)。有关完整的 `config-sample.yaml` 文件的示例,请参见[此文件](https://github.com/kubesphere/kubekey/blob/release-2.2/docs/config-example.md)。 + +{{}} + +完成编辑后,请保存文件。 + +### 3. 使用配置文件创建集群 + +```bash +./kk create cluster -f config-sample.yaml +``` + +{{< notice note >}} + +如果使用其他名称,则需要将上面的 `config-sample.yaml` 更改为您自己的文件。 + +{{}} + +整个安装过程可能需要 10 到 20 分钟,具体取决于您的计算机和网络环境。 + +### 4. 验证安装 + +安装完成后,您会看到如下内容: + +```bash +##################################################### +### Welcome to KubeSphere! ### +##################################################### + +Console: http://192.168.0.2:30880 +Account: admin +Password: P@88w0rd + +NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. + +##################################################### +https://kubesphere.io 20xx-xx-xx xx:xx:xx +##################################################### +``` + +现在,您可以通过 `}} + +若要访问控制台,您可能需要根据您的环境配置端口转发规则。还请确保在您的安全组中打开了端口 `30880`。 + +{{}} + +## 启用 kubectl 自动补全 + +KubeKey 不会启用 kubectl 自动补全功能,请参见以下内容并将其打开: + +{{< notice note >}} + +请确保已安装 bash-autocompletion 并可以正常工作。 + +{{}} + +```bash +# Install bash-completion +apt-get install bash-completion + +# Source the completion script in your ~/.bashrc file +echo 'source <(kubectl completion bash)' >>~/.bashrc + +# Add the completion script to the /etc/bash_completion.d directory +kubectl completion bash >/etc/bash_completion.d/kubectl +``` + +详细信息[见此](https://kubernetes.io/docs/tasks/tools/install-kubectl/#enabling-shell-autocompletion)。 + +## 代码演示 + \ No newline at end of file diff --git a/content/zh/docs/v3.4/installing-on-linux/introduction/port-firewall.md b/content/zh/docs/v3.4/installing-on-linux/introduction/port-firewall.md new file mode 100644 index 000000000..86bee0f00 --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-linux/introduction/port-firewall.md @@ -0,0 +1,31 @@ +--- +title: "端口要求" +keywords: 'Kubernetes, KubeSphere, 端口要求, 防火墙规则' +description: '了解 KubeSphere 中不同服务的特定端口要求。' + +linkTitle: "端口要求" +weight: 3150 +--- + +KubeSphere 需要某些端口用于服务之间的通信。如果您的网络配置有防火墙规则,则需要确保基础设施组件可以通过特定端口相互通信。这些端口用作某些进程或服务的通信端点。 + +|服务|协议|行为|起始端口|结束端口|备注 +|---|---|---|---|---|---| +|ssh|TCP|allow|22| +|etcd|TCP|allow|2379|2380| +|apiserver|TCP|allow|6443| +|calico|TCP|allow|9099|9100| +|bgp|TCP|allow|179|| +|nodeport|TCP|allow|30000|32767| +|master|TCP|allow|10250|10258| +|dns|TCP|allow|53| +|dns|UDP|allow|53| +|local-registry|TCP|allow|5000||离线环境需要| +|local-apt|TCP|allow|5080||离线环境需要| +|rpcbind|TCP|allow|111|| 使用 NFS 时需要| +|ipip| IPENCAP / IPIP|allow| | |Calico 需要使用 IPIP 协议 | +|metrics-server| TCP|allow|8443| + +{{< notice note >}} +当您使用 Calico 网络插件并且在云平台上使用经典网络运行集群时,您需要对源地址启用 IPENCAP 和 IPIP 协议。 +{{}} diff --git a/content/zh/docs/v3.4/installing-on-linux/introduction/vars.md b/content/zh/docs/v3.4/installing-on-linux/introduction/vars.md new file mode 100644 index 000000000..438d5bc58 --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-linux/introduction/vars.md @@ -0,0 +1,130 @@ +--- +title: "Kubernetes 集群配置" +keywords: 'Kubernetes, 集群, 配置, KubeKey' +description: '在集群的配置文件中设置 Kubernetes 自定义配置。' +linkTitle: "Kubernetes 集群配置" +weight: 3160 +--- + +当创建 Kubernetes 集群时,您可以使用 [KubeKey](../kubekey/) 去生成含有集群基本信息的配置文件 (`config-sample.yaml`)。有关配置文件中的 Kubernetes 相关参数,请参阅以下示例。 + +```yaml + kubernetes: + version: v1.22.12 + imageRepo: kubesphere + clusterName: cluster.local + masqueradeAll: false + maxPods: 110 + nodeCidrMaskSize: 24 + proxyMode: ipvs + network: + plugin: calico + calico: + ipipMode: Always + vxlanMode: Never + vethMTU: 1440 + kubePodsCIDR: 10.233.64.0/18 + kubeServiceCIDR: 10.233.0.0/18 + registry: + registryMirrors: [] + insecureRegistries: [] + privateRegistry: "" + addons: [] +``` + +以下表格会详细描述上面的参数。 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
参数描述
kubernetes
versionKubernetes 安装版本。如未指定 Kubernetes 版本,{{< contentLink "docs/installing-on-linux/introduction/kubekey" "KubeKey" >}} v3.0.7 默认安装 Kubernetes v1.23.10。有关更多信息,请参阅{{< contentLink "docs/installing-on-linux/introduction/kubekey/#support-matrix" "支持矩阵" >}}。
imageRepo用于下载镜像的 Docker Hub 仓库
clusterNameKubernetes 集群名称。
masqueradeAll*如果使用纯 iptables 代理模式,masqueradeAll 即让 kube-proxy 对所有流量进行源地址转换 (SNAT)。它默认值为 false
maxPods*Kubelet 可运行 Pod 的最大数量,默认值为 110
nodeCidrMaskSize*集群中节点 CIDR 的掩码大小,默认值为 24
proxyMode*使用的代理模式,默认为 ipvs
network
plugin是否使用 CNI 插件。KubeKey 默认安装 Calico,您也可以指定为 Flannel。请注意,只有使用 Calico 作为 CNI 插件时,才能使用某些功能,例如 Pod IP 池。
calico.ipipMode*用于集群启动时创建 IPv4 池的 IPIP 模式。如果值设置除 Never 以外的值,则参数 vxlanMode 应该被设置成 Never。此参数允许设置值 AlwaysCrossSubnetNever。默认值为 Always。 +
calico.vxlanMode*用于集群启动时创建 IPv4 池的 VXLAN 模式。如果该值不设为 Never,则参数 ipipMode 应该设为 Never。此参数允许设置值 AlwaysCrossSubnetNever。默认值为 Never
calico.vethMTU*最大传输单元(maximum transmission unit 简称 MTU)设置可以通过网络传输的最大数据包大小。默认值为 1440
kubePodsCIDRKubernetes Pod 子网的有效 CIDR 块。CIDR 块不应与您的节点子网和 Kubernetes 服务子网重叠。
kubeServiceCIDRKubernetes 服务的有效 CIDR 块。CIDR 块不应与您的节点子网和 Kubernetes Pod 子网重叠。
registry
registryMirrors配置 Docker 仓库镜像以加速下载。有关详细信息,请参阅{{< contentLink "https://docs.docker.com/registry/recipes/mirror/#configure-the-docker-daemon" "配置 Docker 守护进程" >}}。
insecureRegistries设置不安全镜像仓库的地址。有关详细信息,请参阅{{< contentLink "https://docs.docker.com/registry/insecure/" "测试不安全仓库" >}}。
privateRegistry*配置私有镜像仓库,用于离线安装(例如,Docker 本地仓库或 Harbor)。有关详细信息,请参阅{{< contentLink "docs/v3.3/installing-on-linux/introduction/air-gapped-installation/" "离线安装" >}}。
+ + + + +{{< notice note >}} + +- \*默认情况下,KubeKey 不会在配置文件中定义这些参数,您可以手动添加这些参数并自定义其值。 +- `addons` 用于安装云原生扩展 (Addon)(YAML 或 Chart)。有关详细信息,请参阅此[文件](https://github.com/kubesphere/kubekey/blob/release-2.2/docs/addons.md)。 +- 此页面仅列出 KubeKey 创建的配置文件中的部分参数。有关其他参数的详细信息,请参阅此[示例文件](https://github.com/kubesphere/kubekey/blob/release-2.2/docs/config-example.md)。 + +{{}} + diff --git a/content/zh/docs/v3.4/installing-on-linux/on-premises/_index.md b/content/zh/docs/v3.4/installing-on-linux/on-premises/_index.md new file mode 100644 index 000000000..78a591e99 --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-linux/on-premises/_index.md @@ -0,0 +1,9 @@ +--- +linkTitle: "在本地环境中安装" +weight: 3500 + +_build: + render: false +--- + +在本章中,我们将演示如何使用KubeKey或Kubeadm在某些本地环境(例如VMware vSphere,OpenStack,Bare Metal等)上配置新的Kubernetes和KubeSphere集群。在开始安装前,您只需要准备支持的操作系统的机器即可。 离线安装指南也包括在本章中。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/installing-on-linux/on-premises/install-kubesphere-and-k3s.md b/content/zh/docs/v3.4/installing-on-linux/on-premises/install-kubesphere-and-k3s.md new file mode 100644 index 000000000..6906cff1f --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-linux/on-premises/install-kubesphere-and-k3s.md @@ -0,0 +1,184 @@ +--- +title: "部署 K3s 和 KubeSphere" +keywords: 'Kubernetes, KubeSphere, K3s' +description: '了解如何使用 KubeKey 安装 K3s 和 KubeSphere。' +linkTitle: "部署 K3s 和 KubeSphere" +weight: 3530 +--- + +[K3s](https://www.rancher.cn/k3s/) 是专为物联网和边缘计算打造的轻量级 Kubernetes 发行版,最大程度上剔除了外部依赖项。它打包为单个二进制文件,减少了搭建 Kubernetes 集群所需的依赖项和步骤。 + +您可以使用 KubeKey 同时安装 K3s 和 KubeSphere,也可以将 KubeSphere 部署在现有的 K3s 集群上。 + +{{< notice note >}} + +目前,由于功能尚未充分测试,在 K3s 上部署 KubeSphere 仅用于测试和开发。 + +{{}} + +## 准备工作 + +- 有关安装 K3s 的准备工作的更多信息,请参阅 [K3s 文档](https://docs.rancher.cn/docs/k3s/installation/installation-requirements/_index)。 +- 取决于您的网络环境,您可能需要配置防火墙规则和端口转发规则。有关更多信息,请参见[端口要求](../../../installing-on-linux/introduction/port-firewall/)。 + +## 步骤 1:下载 KubeKey + +执行以下步骤下载 [KubeKey](../../../installing-on-linux/introduction/kubekey/)。 + +{{< tabs >}} + +{{< tab "如果您能正常访问 GitHub/Googleapis" >}} + +从 [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) 下载 KubeKey 或直接运行以下命令: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "如果您访问 GitHub/Googleapis 受限" >}} + +首先运行以下命令,以确保您从正确的区域下载 KubeKey。 + +```bash +export KKZONE=cn +``` + +运行以下命令来下载 KubeKey: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +下载 KubeKey 之后,如果您将其转移到访问 Googleapis 受限的新机器上,请务必再次运行 `export KKZONE=cn`,然后继续执行以下步骤。 + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +通过以上的命令可以下载 KubeKey 的最新版本。请注意,更早版本的 KubeKey 无法下载 K3s。 + +{{}} + +执行以下命令为 `kk` 文件增加执行权限: + +```bash +chmod +x kk +``` + +## 步骤 2:创建集群 + +1. 执行以下命令为集群创建一个配置文件: + + ```bash + ./kk create config --with-kubernetes v1.21.4-k3s --with-kubesphere v3.3.2 + ``` + + {{< notice note >}} + + - KubeKey v3.0.7 支持安装 K3s v1.21.4。 + + - 您可以在以上命令中使用 `-f` 或 `--file` 参数指定配置文件的路径和名称。如未指定路径和名称,KubeKey 将默认在当前目录下创建 `config-sample.yaml` 配置文件。 + + {{}} + +2. 执行以下命令编辑配置文件(以下以默认配置文件名为例): + + ```bash + vi config-sample.yaml + ``` + + ```yaml + ... + metadata: + name: sample + spec: + hosts: + - {name: master, address: 192.168.0.2, internalAddress: 192.168.0.2, user: ubuntu, password: Testing123} + - {name: node1, address: 192.168.0.3, internalAddress: 192.168.0.3, user: ubuntu, password: Testing123} + - {name: node2, address: 192.168.0.4, internalAddress: 192.168.0.4, user: ubuntu, password: Testing123} + roleGroups: + etcd: + - master + control-plane: + - master + worker: + - node1 + - node2 + controlPlaneEndpoint: + domain: lb.kubesphere.local + address: "" + port: 6443 + kubernetes: + version: v1.21.4-k3s + imageRepo: kubesphere + clusterName: cluster.local + network: + plugin: calico + kubePodsCIDR: 10.233.64.0/18 + kubeServiceCIDR: 10.233.0.0/18 + registry: + registryMirrors: [] + insecureRegistries: [] + addons: [] + ... + ``` + + {{< notice note >}} + + 有关配置文件中每个字段的更多信息,请参阅[示例文件](https://github.com/kubesphere/kubekey/blob/release-2.2/docs/config-example.md)。 + + {{}} + +3. 保存文件并执行以下命令安装 K3s 和 KubeSphere: + + ``` + ./kk create cluster -f config-sample.yaml + ``` + +4. 安装完成后,可运行以下命令查看安装日志: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + 如果显示如下信息则安装成功: + + ```bash + ##################################################### + ### Welcome to KubeSphere! ### + ##################################################### + + Console: http://192.168.0.2:30880 + Account: admin + Password: P@88w0rd + + NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. + + ##################################################### + https://kubesphere.io 20xx-xx-xx xx:xx:xx + ##################################################### + ``` + + +5. 从安装日志的 `Console`、`Account` 和 `Password` 参数分别获取 KubeSphere Web 控制台的地址、系统管理员用户名和系统管理员密码,并使用 Web 浏览器登录 KubeSphere Web 控制台。 + + {{< notice note >}} + + 您可以在安装后启用 KubeSphere 的可插拔组件,但由于在 KubeSphere 上部署 K3s 目前处于测试阶段,某些功能可能不兼容。 + + {{}} + diff --git a/content/zh/docs/v3.4/installing-on-linux/on-premises/install-kubesphere-on-bare-metal.md b/content/zh/docs/v3.4/installing-on-linux/on-premises/install-kubesphere-on-bare-metal.md new file mode 100644 index 000000000..b549f9a61 --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-linux/on-premises/install-kubesphere-on-bare-metal.md @@ -0,0 +1,398 @@ +--- +title: "在裸机上安装 KubeSphere" +keywords: 'Kubernetes, KubeSphere, 裸机' +description: '了解如何在裸机上部署一个单 master 的多节点 KubeSohere 集群。' +linkTitle: "在裸机上安装 KubeSphere" +weight: 3520 +--- + +## 介绍 + +KubeSphere 除了可以在云上安装,还可以在裸机上安装。由于在裸机上没有虚拟层,基础设施的开销大大降低,从而可以给部署的应用提供更多的计算和存储资源,硬件效率得到提高。以下示例介绍如何在裸机上安装 KubeSphere。 + +## 准备工作 + +- 您需要了解如何在多节点集群中安装 KubeSphere。有关详情,请参见[多节点安装](../../../installing-on-linux/introduction/multioverview/)。 +- 您的环境中需要有足够的服务器和网络冗余。 +- 如果搭建生产环境,建议您提前准备持久化存储并创建 StorageClass。如果搭建开发测试环境,您可以直接使用集成的 OpenEBS 配置 LocalPV 存储服务。 + +## 准备 Linux 主机 + +本教程使用 3 台物理机,硬件配置为 **DELL 620 Intel (R) Xeon (R) CPU E5-2640 v2 @ 2.00GHz (32G memory)**。在这 3 台物理机上将安装 **CentOS Linux release 7.6.1810 (Core)** 操作系统,用于 KubeSphere 最小化安装。 + +### 安装 CentOS + +请提前下载并安装[ CentOS 镜像](https://www.centos.org/download/),推荐版本为 CentOS Linux release 7.6.1810 (Core)。请确保根目录已至少分配 200 GB 空间用于存储 Docker 镜像(如果 KubeSphere 仅用于测试,您可以跳过这一步)。 + +有关系统要求的更多信息,请参见[系统要求](../../../installing-on-linux/introduction/multioverview/)。 + +三台主机的角色分配如下,供参考。 + + +| 主机 IP 地址 | 主机名 | 角色 | +| --- | --- | --- | +|192.168.60.152|master1|master1, etcd| +|192.168.60.153|worker1|worker| +|192.168.60.154|worker2|worker| + +### 设置网卡 + +1. 清空网卡配置。 + + ```bash + ifdown em1 + ``` + + ```bash + ifdown em2 + ``` + + ```bash + rm -rf /etc/sysconfig/network-scripts/ifcfg-em1 + ``` + + ```bash + rm -rf /etc/sysconfig/network-scripts/ifcfg-em2 + ``` + +2. 创建 bond 网卡。 + + ```bash + nmcli con add type bond con-name bond0 ifname bond0 mode 802.3ad ip4 192.168.60.152/24 gw4 192.168.60.254 + ``` + +3. 设置 bond 模式。 + + ```bash + nmcli con mod id bond0 bond.options mode=802.3ad,miimon=100,lacp_rate=fast,xmit_hash_policy=layer2+3 + ``` + +4. 将物理网卡绑定至 bond。 + + ```bash + nmcli con add type bond-slave ifname em1 con-name em1 master bond0 + ``` + + ```bash + nmcli con add type bond-slave ifname em2 con-name em2 master bond0 + ``` + +5. 修改网卡模式。 + + ```bash + vi /etc/sysconfig/network-scripts/ifcfg-bond0 + BOOTPROTO=static + ``` + +6. 重启 Network Manager。 + + ```bash + systemctl restart NetworkManager + ``` + + ```bash + nmcli con # Display NIC information + ``` + +7. 修改主机名和 DNS。 + + ```bash + hostnamectl set-hostname worker-1 + ``` + + ```bash + vim /etc/resolv.conf + ``` + +### 设置时间 + +1. 开启时间同步。 + + ```bash + yum install -y chrony + ``` + + ```bash + systemctl enable chronyd + ``` + + ```bash + systemctl start chronyd + ``` + + ```bash + timedatectl set-ntp true + ``` + +2. 设置时区。 + + ```bash + timedatectl set-timezone Asia/Shanghai + ``` + +3. 检查 ntp-server 是否可用。 + + ```bash + chronyc activity -v + ``` + +### 设置防火墙 + +执行以下命令停止并禁用 firewalld 服务: + +```bash +iptables -F +``` + +```bash +systemctl status firewalld +``` + +```bash +systemctl stop firewalld +``` + +```bash +systemctl disable firewalld +``` + +### 更新系统包和依赖项 + +执行以下命令更新系统包并安装依赖项: + +```bash +yum update +``` + +```bash +yum install openssl openssl-devel +``` + +```bash +yum install socat +``` + +```bash +yum install epel-release +``` + +```bash +yum install conntrack-tools +``` + + +{{< notice note >}} + +取决于将要安装的 Kubernetes 版本,您可能不需要安装所有依赖项。有关更多信息,请参见[依赖项要求](../../../installing-on-linux/introduction/multioverview/)。 + +{{}} + +## 下载 KubeKey + +[KubeKey](https://github.com/kubesphere/kubekey) 是新一代 Kubernetes 和 KubeSphere 安装器,可帮助您以简单、快速、灵活的方式安装 Kubernetes 和 KubeSphere。 + +请按照以下步骤下载 KubeKey。 + +{{< tabs >}} + +{{< tab "如果您能正常访问 GitHub/Googleapis" >}} + +从 [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) 下载 KubeKey 或使用以下命令: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "如果您访问 GitHub/Googleapis 受限" >}} + +先执行以下命令以确保您从正确的区域下载 KubeKey: + +```bash +export KKZONE=cn +``` + +执行以下命令下载 KubeKey: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +在您下载 KubeKey 后,如果您将其传至新的机器,且访问 Googleapis 同样受限,在您执行以下步骤之前请务必再次执行 `export KKZONE=cn` 命令。 + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +执行以上命令会下载最新版 KubeKey,您可以修改命令中的版本号下载指定版本。 + +{{}} + +为 `kk` 文件添加可执行权限。 + +```bash +chmod +x kk +``` + +## 创建多节点集群 + +您可用使用 KubeKey 同时安装 Kubernetes 和 KubeSphere,通过自定义配置文件中的参数创建多节点集群。 + +创建安装有 KubeSphere 的 Kubernetes 集群(例如使用 `--with-kubesphere v3.3.2`): + +```bash +./kk create config --with-kubernetes v1.22.12 --with-kubesphere v3.3.2 +``` + +{{< notice note >}} + +- 安装 KubeSphere 3.3 的建议 Kubernetes 版本:v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如需使用边缘节点,推荐安装 v1.21.x。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.23.10。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../../installing-on-linux/introduction/kubekey/#支持矩阵)。 + +- 如果您在这一步的命令中不添加标志 `--with-kubesphere`,则不会部署 KubeSphere,只能使用配置文件中的 `addons` 字段安装 KubeSphere,或者在您后续使用 `./kk create cluster` 命令时再次添加该标志。 +- 如果您添加标志 `--with-kubesphere` 时不指定 KubeSphere 版本,则会安装最新版本的 KubeSphere。 + +{{}} + +系统将创建默认的 `config-sample.yaml` 文件。您可以根据您的环境修改此文件。 + +```bash +vi config-sample.yaml +``` + +```yaml +apiVersion: kubekey.kubesphere.io/v1alpha1 +kind: Cluster +metadata: + name: config-sample +spec: + hosts: + - {name: master1, address: 192.168.60.152, internalAddress: 192.168.60.152, user: root, password: P@ssw0rd} + - {name: worker1, address: 192.168.60.153, internalAddress: 192.168.60.153, user: root, password: P@ssw0rd} + - {name: worker2, address: 192.168.60.154, internalAddress: 192.168.60.154, user: root, password: P@ssw0rd} + roleGroups: + etcd: + - master1 + control-plane: + - master1 + worker: + - worker1 + - worker2 + controlPlaneEndpoint: + domain: lb.kubesphere.local + address: "" + port: 6443 +``` +执行以下命令使用自定义的配置文件创建集群: + +```bash +./kk create cluster -f config-sample.yaml +``` + +#### 验证安装 + +安装结束后,您可以执行以下命令查看安装日志: + +```bash +kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f +``` + +如果返回欢迎日志,则安装成功。 + +```bash +************************************************** +##################################################### +### Welcome to KubeSphere! ### +##################################################### +Console: http://192.168.60.152:30880 +Account: admin +Password: P@88w0rd +NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. +##################################################### +https://kubesphere.io 20xx-xx-xx xx:xx:xx +##################################################### +``` + +#### 登录控制台 + +您可以使用默认的帐户和密码 `admin/P@88w0rd` 登录 KubeSphere 控制台并开始使用 KubeSphere。请在登录后修改默认密码。 + +#### 启用可插拔组件(可选) +以上示例演示了默认的最小化安装流程。如需启用 KubeSphere 的其他组件,请参考[启用可插拔组件](../../../pluggable-components/)。 + +## 优化系统 + +- 更新系统。 + + ```bash + yum update + ``` + +- 添加所需的内核引导参数。 + + ```bash + sudo /sbin/grubby --update-kernel=ALL --args='cgroup_enable=memory cgroup.memory=nokmem swapaccount=1' + ``` + +- 启用 `overlay2` 内核模块。 + + ```bash + echo "overlay2" | sudo tee -a /etc/modules-load.d/overlay.conf + ``` + +- 刷新动态生成的 grub2 配置。 + + ```bash + sudo grub2-set-default 0 + ``` + +- 调整内核参数并使修改生效。 + + ```bash + cat <}} +vip 所在的是虚拟 IP,并不需要创建主机,所以只需要创建 8 台虚拟机。 +{{}} + +1. 选择可创建的资源池,点击右键,选择**新建虚拟机**(创建虚拟机入口有好几个,请自己选择) + + ![0-1-新创](/images/docs/v3.3/vsphere/kubesphereOnVsphere-zh-0-1-1-create-type.png) + +2. 选择创建类型,创建新虚拟机。 + + ![0-1-1创建类型](/images/docs/v3.3/vsphere/kubesphereOnVsphere-zh-0-1-create.png) + +3. 填写虚拟机名称和存放文件夹。 + + ![0-1-2-name](/images/docs/v3.3/vsphere/kubesphereOnVsphere-zh-0-1-2-name.png) + +4. 选择计算资源。 + + ![0-1-3-资源](/images/docs/v3.3/vsphere/kubesphereOnVsphere-zh-0-1-3-resource.png) + +5. 选择存储。 + + ![0-1-4-存储](/images/docs/v3.3/vsphere/kubesphereOnVsphere-zh-0-1-4-storage.png) + +6. 选择兼容性,这里是 ESXi 7.0 及更高版本。 + + ![0-1-5-兼容性](/images/docs/v3.3/vsphere/kubesphereOnVsphere-zh-0-1-5-compatibility.png) + +7. 选择客户机操作系统,Linux CentOS 7 (64 位)。 + + ![0-1-6-系统](/images/docs/v3.3/vsphere/kubesphereOnVsphere-zh-0-1-6-system.png) + +8. 自定义硬件,这里操作系统是挂载的 ISO 文件(打开电源时连接),网络是 VLAN71(勾选)。 + + ![0-1-7-硬件](/images/docs/v3.3/vsphere/kubesphereOnVsphere-zh-0-1-7-hardware.png) + +9. 在**即将完成**页面上可查看为虚拟机选择的配置。 + + ![0-1-8](/images/docs/v3.3/vsphere/kubesphereOnVsphere-zh-0-1-8.png) + +## 部署 keepalived 和 HAproxy + +生产环境需要单独准备负载均衡器,例如 NGINX、F5、Keepalived + HAproxy 这样的私有化部署负载均衡器方案。如果您是准备搭建开发或测试环境,无需准备负载均衡器,可以跳过此小节。 + +### Yum 安装 + +在主机为 lb-0 和 lb-1 中部署 Keepalived + HAProxy 即 IP 为`10.10.71.77`与`10.10.71.66`的服务器上安装部署 HAProxy 和 psmisc。 + +```bash +yum install keepalived haproxy psmisc -y +``` + +### 配置 HAProxy + +在 IP 为 `10.10.71.77` 与 `10.10.71.66` 的服务器上按如下参数配置 HAProxy (两台 lb 机器配置一致即可,注意后端服务地址)。 + +```yaml +# HAProxy Configure /etc/haproxy/haproxy.cfg +global + log 127.0.0.1 local2 + chroot /var/lib/haproxy + pidfile /var/run/haproxy.pid + maxconn 4000 + user haproxy + group haproxy + daemon + # turn on stats unix socket + stats socket /var/lib/haproxy/stats +#--------------------------------------------------------------------- +# common defaults that all the 'listen' and 'backend' sections will +# use if not designated in their block +#--------------------------------------------------------------------- +defaults + log global + option httplog + option dontlognull + timeout connect 5000 + timeout client 5000 + timeout server 5000 +#--------------------------------------------------------------------- +# main frontend which proxys to the backends +#--------------------------------------------------------------------- +frontend kube-apiserver + bind *:6443 + mode tcp + option tcplog + default_backend kube-apiserver +#--------------------------------------------------------------------- +# round robin balancing between the various backends +#--------------------------------------------------------------------- +backend kube-apiserver + mode tcp + option tcplog + balance roundrobin + default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100 + server kube-apiserver-1 10.10.71.214:6443 check + server kube-apiserver-2 10.10.71.73:6443 check + server kube-apiserver-3 10.10.71.62:6443 check +``` + +启动之前检查语法是否有问题 + +```bash +haproxy -f /etc/haproxy/haproxy.cfg -c +``` + +启动 Haproxy,并设置开机自启动 + +```bash +systemctl restart haproxy && systemctl enable haproxy +``` + +停止 Haproxy + +```bash +systemctl stop haproxy +``` + +### 配置 Keepalived + +主 HAProxy 77 lb-0-10.10.71.77 (/etc/keepalived/keepalived.conf) + +```yaml +global_defs { +notification_email { +} +smtp_connect_timeout 30 #连接超时时间 +router_id LVS_DEVEL01 ##相当于给这个服务器起个昵称 +vrrp_skip_check_adv_addr +vrrp_garp_interval 0 +vrrp_gna_interval 0 +} +vrrp_script chk_haproxy { +script "killall -0 haproxy" +interval 2 +weight 20 +} +vrrp_instance haproxy-vip { +state MASTER #主服务器 是MASTER +priority 100 #主服务器优先级要比备服务器高 +interface ens192 #实例绑定的网卡 +virtual_router_id 60 #定义一个热备组,可以认为这是60号热备组 +advert_int 1 #1秒互相通告一次,检查对方死了没。 +authentication { + auth_type PASS #认证类型 + auth_pass 1111 #认证密码 这些相当于暗号 +} +unicast_src_ip 10.10.71.77 #当前机器地址 +unicast_peer { + 10.10.71.66 #peer中其它机器地址 +} +virtual_ipaddress { + #vip地址 + 10.10.71.67/24 +} +track_script { + chk_haproxy +} +} +``` + +备 HAProxy 66 lb-1-10.10.71.66 (/etc/keepalived/keepalived.conf) + +```yaml +global_defs { +notification_email { +} +router_id LVS_DEVEL02 ##相当于给这个服务器起个昵称 +vrrp_skip_check_adv_addr +vrrp_garp_interval 0 +vrrp_gna_interval 0 +} +vrrp_script chk_haproxy { +script "killall -0 haproxy" +interval 2 +weight 20 +} +vrrp_instance haproxy-vip { +state BACKUP #备份服务器 是 backup +priority 90 #优先级要低(把备份的90修改为100) +interface ens192 #实例绑定的网卡 +virtual_router_id 60 +advert_int 1 +authentication { + auth_type PASS + auth_pass 1111 +} +unicast_src_ip 10.10.71.66 #当前机器地址 +unicast_peer { + 10.10.71.77 #peer 中其它机器地址 +} +virtual_ipaddress { + #加/24 + 10.10.71.67/24 +} +track_script { + chk_haproxy +} +} +``` + +启动 keepalived,设置开机自启动 + +```bash +systemctl restart keepalived && systemctl enable keepalived +systemctl stop keepalived + +``` + +开启 keepalived服务 + +```bash +systemctl start keepalived +``` + +### 验证可用性 + +使用`ip a s`查看各 lb 节点 vip 绑定情况 + +```bash +ip a s +``` + +暂停 vip 所在节点 HAProxy + +```bash +systemctl stop haproxy +``` + +再次使用`ip a s`查看各 lb 节点 vip 绑定情况,查看 vip 是否发生漂移 + +```bash +ip a s +``` + +或者使用下面命令查看 + +```bash +systemctl status -l keepalived +``` + +## 下载 KubeKey 安装程序 + +下载可执行安装程序 `kk` 至一台目标机器: + +{{< tabs >}} + +{{< tab "如果您能正常访问 GitHub/Googleapis" >}} + +从 [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) 下载 KubeKey 或直接使用以下命令。 + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "如果您访问 GitHub/Googleapis 受限" >}} + +先执行以下命令以确保您从正确的区域下载 KubeKey。 + +```bash +export KKZONE=cn +``` + +执行以下命令下载 KubeKey。 + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +在您下载 KubeKey 后,如果您将其传至新的机器,且访问 Googleapis 同样受限,在您执行以下步骤之前请务必再次执行 `export KKZONE=cn` 命令。 + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +执行以上命令会下载最新版 KubeKey,您可以修改命令中的版本号下载指定版本。 + +{{}} + +为 `kk` 添加可执行权限: + +```bash +chmod +x kk +``` + +## 创建多节点集群 + +您可以使用高级安装来控制自定义参数或创建多节点集群。具体来说,通过指定配置文件来创建集群。 + +### KubeKey 部署集群 + +创建配置文件(一个示例配置文件)。 + +```bash +./kk create config --with-kubernetes v1.22.12 --with-kubesphere v3.3.2 +``` + +{{< notice note >}} + +- 安装 KubeSphere 3.3 的建议 Kubernetes 版本:v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如需使用边缘节点,推荐安装 v1.21.x。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.23.10。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../../installing-on-linux/introduction/kubekey/#支持矩阵)。 + +- 如果您在这一步的命令中不添加标志 `--with-kubesphere`,则不会部署 KubeSphere,只能使用配置文件中的 `addons` 字段安装,或者在您后续使用 `./kk create cluster` 命令时再次添加这个标志。 + +- 如果您添加标志 `--with-kubesphere` 时不指定 KubeSphere 版本,则会安装最新版本的 KubeSphere。 + +{{}} + +默认文件 `config-sample.yaml` 创建后,根据您的环境修改该文件。 + +```bash +vi ~/config-sample.yaml +``` + +```yaml +apiVersion: kubekey.kubesphere.io/v1alpha1 +kind: Cluster +metadata: + name: config-sample +spec: + hosts: + - {name: master1, address: 10.10.71.214, internalAddress: 10.10.71.214, password: P@ssw0rd!} + - {name: master2, address: 10.10.71.73, internalAddress: 10.10.71.73, password: P@ssw0rd!} + - {name: master3, address: 10.10.71.62, internalAddress: 10.10.71.62, password: P@ssw0rd!} + - {name: node1, address: 10.10.71.75, internalAddress: 10.10.71.75, password: P@ssw0rd!} + - {name: node2, address: 10.10.71.76, internalAddress: 10.10.71.76, password: P@ssw0rd!} + - {name: node3, address: 10.10.71.79, internalAddress: 10.10.71.79, password: P@ssw0rd!} + roleGroups: + etcd: + - master1 + - master2 + - master3 + control-plane: + - master1 + - master2 + - master3 + worker: + - node1 + - node2 + - node3 + controlPlaneEndpoint: + domain: lb.kubesphere.local + # vip + address: "10.10.71.67" + port: 6443 + kubernetes: + version: v1.22.12 + imageRepo: kubesphere + clusterName: cluster.local + masqueradeAll: false # masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode. [Default: false] + maxPods: 110 # maxPods is the number of pods that can run on this Kubelet. [Default: 110] + nodeCidrMaskSize: 24 # internal network node size allocation. This is the size allocated to each node on your network. [Default: 24] + proxyMode: ipvs # mode specifies which proxy mode to use. [Default: ipvs] + network: + plugin: calico + calico: + ipipMode: Always # IPIP Mode to use for the IPv4 POOL created at start up. If set to a value other than Never, vxlanMode should be set to "Never". [Always | CrossSubnet | Never] [Default: Always] + vxlanMode: Never # VXLAN Mode to use for the IPv4 POOL created at start up. If set to a value other than Never, ipipMode should be set to "Never". [Always | CrossSubnet | Never] [Default: Never] + vethMTU: 1440 # The maximum transmission unit (MTU) setting determines the largest packet size that can be transmitted through your network. [Default: 1440] + kubePodsCIDR: 10.233.64.0/18 + kubeServiceCIDR: 10.233.0.0/18 + registry: + registryMirrors: [] + insecureRegistries: [] + addons: [] + +··· +# 其它配置可以在安装后之后根据需要进行修改 +``` + +#### 持久化存储配置 + +如本文开头的前提条件所说,对于生产环境,我们建议您准备持久性存储,可参考以下说明进行配置。若搭建开发和测试环境,您可以跳过这小节,直接使用默认集成的 OpenEBS 的 LocalPV 存储。 + +继续编辑上述`config-sample.yaml`文件,找到`[addons]`字段,这里支持定义任何持久化存储的插件或客户端,如 NFS Client、Ceph、GlusterFS、CSI,根据您自己的持久化存储服务类型,并参考 [持久化存储服务](../../../installing-on-linux/persistent-storage-configurations/understand-persistent-storage/) 中对应的示例 YAML 文件进行设置。 + +#### 执行创建集群 + +使用上面自定义的配置文件创建集群: + +```bash +./kk create cluster -f config-sample.yaml +``` + +根据表格的系统依赖的前提条件检查,如果相关依赖都显示 **√**,则可以输入 **yes** 继续执行安装。 + +#### 验证安装结果 + +此时可以看到安装日志自动输出,或者可以再打开一个 SSH 手动检查安装日志,然后等待安装成功。 + +```bash +kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f +``` + +如果最后返回`Welcome to KubeSphere`,则表示已安装成功。 + +```yaml +************************************************** +##################################################### +### Welcome to KubeSphere! ### +##################################################### +Console: http://10.10.71.214:30880 +Account: admin +Password: P@88w0rd +NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. +##################################################### +https://kubesphere.io 2020-08-15 23:32:12 +##################################################### +``` + +#### 登录 console 界面 + +使用上述日志中给定的访问地址进行访问,进入到 KubeSphere 的登录界面并使用默认帐户(用户名`admin`,密码`P@88w0rd`)即可登录平台。 + +## 开启可插拔功能组件(可选) + +上面的示例演示了默认的最小安装过程,对于可插拔组件,可以在安装之前或之后启用它们。有关详细信息,请参见[启用可插拔组件](../../../pluggable-components/)。 diff --git a/content/zh/docs/v3.4/installing-on-linux/persistent-storage-configurations/_index.md b/content/zh/docs/v3.4/installing-on-linux/persistent-storage-configurations/_index.md new file mode 100644 index 000000000..887475801 --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-linux/persistent-storage-configurations/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "持久化存储配置" +weight: 3300 + +_build: + render: false +--- diff --git a/content/zh/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-ceph-csi-rbd.md b/content/zh/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-ceph-csi-rbd.md new file mode 100644 index 000000000..1df235901 --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-ceph-csi-rbd.md @@ -0,0 +1,126 @@ +--- +title: "安装 Ceph" +keywords: 'Kubesphere,Kubernetes,Ceph,安装,配置,存储' +description: '如何创建一个使用 Ceph 提供存储服务的 KubeSphere 集群。' +linkTitle: "安装 Ceph" +weight: 3350 +--- + +您可以选择 [Ceph RBD](https://kubernetes.io/zh/docs/concepts/storage/storage-classes/#ceph-rbd) 或 [Ceph CSI ](https://github.com/ceph/ceph-csi) 作为 Ceph 服务器的底层存储插件。Ceph RBD 是 Kubernetes 上的一个树内存储插件,Ceph 容器存储接口(CSI)是一个用于 RBD 和 CephFS 的驱动程序。 + +### Ceph 插件 + +如果你安装的是 Ceph v14.0.0(Nautilus)及以上版本,那么推荐您使用 Ceph CSI RBD。原因如下: + +- 树内存储插件将会被弃用。 +- Ceph RBD 只适用于使用 hyperkube 镜像的 Kubernetes 集群,而 hyperkube 镜像 + [从 Kubernetes 1.17 开始已被弃用](https://github.com/kubernetes/kubernetes/pull/85094)。 +- Ceph CSI 功能更丰富,如克隆,扩容和快照。 + +### Ceph CSI RBD + +您需要安装 Kubernetes(v1.14.0 及以上版本)和 Ceph v14.0.0(Nautilus)及以上版本。有关兼容性的详细信息,请参见 [Ceph CSI 支持矩阵](https://github.com/ceph/ceph-csi#support-matrix)。 + +以下是 Helm Charts 安装的 Ceph CSI RBD 的 KubeKey 插件配置示例。由于 StorageClass 不包含在 chart 中,因此需要在插件中配置 StorageClass。 + +#### Chart 配置 + +```yaml +csiConfig: + - clusterID: "cluster1" + monitors: + - "192.168.0.8:6789" # <--TobeReplaced--> + - "192.168.0.9:6789" # <--TobeReplaced--> + - "192.168.0.10:6789" # <--TobeReplaced--> +``` + +如果你想配置更多的参数,请参见 [ceph-csi-rbd 的 chart 配置](https://github.com/ceph/ceph-csi/tree/master/charts/ceph-csi-rbd)。 + +#### StorageClass 配置(包含保密字典) + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: csi-rbd-secret + namespace: kube-system +stringData: + userID: admin + userKey: "AQDoECFfYD3DGBAAm6CPhFS8TQ0Hn0aslTlovw==" # <--ToBeReplaced--> +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: csi-rbd-sc + annotations: + storageclass.beta.kubernetes.io/is-default-class: "true" + storageclass.kubesphere.io/supported-access-modes: '["ReadWriteOnce","ReadOnlyMany","ReadWriteMany"]' +provisioner: rbd.csi.ceph.com +parameters: + clusterID: "cluster1" + pool: "rbd" # <--ToBeReplaced--> + imageFeatures: layering + csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret + csi.storage.k8s.io/provisioner-secret-namespace: kube-system + csi.storage.k8s.io/controller-expand-secret-name: csi-rbd-secret + csi.storage.k8s.io/controller-expand-secret-namespace: kube-system + csi.storage.k8s.io/node-stage-secret-name: csi-rbd-secret + csi.storage.k8s.io/node-stage-secret-namespace: kube-system + csi.storage.k8s.io/fstype: ext4 +reclaimPolicy: Delete +allowVolumeExpansion: true +mountOptions: + - discard +``` + +#### 插件配置 + +将上面的 chart 配置和 StorageClass 保存到本地(例如 `/root/ceph-csi-rbd.yaml` 和 `/root/ceph-csi-rbd-sc.yaml`)。插件配置如下所示: + +```yaml +addons: +- name: ceph-csi-rbd + namespace: kube-system + sources: + chart: + name: ceph-csi-rbd + repo: https://ceph.github.io/csi-charts + valuesFile: /root/ceph-csi-rbd.yaml +- name: ceph-csi-rbd-sc + sources: + yaml: + path: + - /root/ceph-csi-rbd-sc.yaml +``` + +### Ceph RBD + +Kubekey 没有使用 hyperkube 镜像。因此,树内 Ceph RBD 可能无法在使用 KubeKey 安装的 Kubernetes 上工作。如果你的 Ceph 集群版本低于 14.0.0,Ceph CSI 将不能使用,但是由于 [RBD](https://github.com/kubernetes-incubator/external-storage/tree/master/ceph/rbd) 格式和 Ceph RBD 相同,可以作为 Ceph RBD 的替代选项。下面是由 Helm Charts 安装的 RBD Provisioner 的 KubeKey 插件配置示例,其中包括 StorageClass。 + +#### Chart 配置 + +```yaml +ceph: + mon: "192.168.0.12:6789" # <--ToBeReplaced--> + adminKey: "QVFBS1JkdGRvV0lySUJBQW5LaVpSKzBRY2tjWmd6UzRJdndmQ2c9PQ==" # <--ToBeReplaced--> + userKey: "QVFBS1JkdGRvV0lySUJBQW5LaVpSKzBRY2tjWmd6UzRJdndmQ2c9PQ==" # <--ToBeReplaced--> +sc: + isDefault: false +``` + +如果你想配置更多的参数,请参见 [RBD-Provisioner 的 chart 配置](https://github.com/kubesphere/helm-charts/tree/master/src/test/rbd-provisioner#configuration)。 + +#### 插件配置 + +将上面的 chart 配置保存到本地(例如 `/root/rbd-provisioner.yaml`)。RBD Provisioner Cloud 的插件配置如下所示: + +```yaml +- name: rbd-provisioner + namespace: kube-system + sources: + chart: + name: rbd-provisioner + repo: https://charts.kubesphere.io/test + valuesFile: /root/rbd-provisioner.yaml +``` + diff --git a/content/zh/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-glusterfs.md b/content/zh/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-glusterfs.md new file mode 100644 index 000000000..04c44d6b3 --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-glusterfs.md @@ -0,0 +1,298 @@ +--- +title: "安装 GlusterFS" +keywords: 'KubeSphere, Kubernetes, GlusterFS, 安装, 配置, 存储' +description: '使用 KubeKey 搭建 KubeSphere 集群并配置 GlusterFS 存储。' +linkTitle: "安装 GlusterFS" +weight: 3340 +--- + +[GlusterFS](https://kubernetes.io/docs/concepts/storage/storage-classes/#glusterfs) 是 Kubernetes 中的树内 (in-tree) 存储插件。因此,您只需要安装存储类型。 + +本教程演示了如何使用 KubeKey 搭建 KubeSphere 集群并配置 GlusterFS 以提供存储服务。 + +{{< notice note >}} + +本教程以 Ubuntu 16.04 为例。 + +{{}} + +## 准备工作 + +您需要搭建 GlusterFS 集群并配置 Heketi。有关更多信息,请参见[搭建 GlusterFS 服务器](../../../reference/storage-system-installation/glusterfs-server/)。 + +## 步骤 1:配置客户端机器 + +您需要在全部客户端机器上安装 GlusterFS 客户端安装包。 + +1. 安装 `software-properties-common`。 + + ```bash + apt-get install software-properties-common + ``` + +2. 添加社区 GlusterFS PPA。 + + ```bash + add-apt-repository ppa:gluster/glusterfs-7 + ``` + +3. 请确保使用的是最新安装包。 + + ```bash + apt-get update + ``` + +4. 安装 GlusterFS 服务器。 + + ```bash + apt-get install glusterfs-server -y + ``` + +5. 验证 GlusterFS 版本。 + + ```bash + glusterfs -V + ``` + +## 步骤 2:为 GlusterFS 创建配置文件 + +单独的配置文件包含 GlusterFS 存储的全部参数,KubeKey 在安装过程中会使用这些参数。 + +1. 访问稍后想要在其上下载 KubeKey 的节点(任务机),运行以下命令创建配置文件。 + + ``` + vi glusterfs-sc.yaml + ``` + + 示例配置文件(包括 Heketi 密钥): + + ```yaml + apiVersion: v1 + kind: Secret + metadata: + name: heketi-secret + namespace: kube-system + type: kubernetes.io/glusterfs + data: + key: "MTIzNDU2" #请替换为您自己的密钥。Base64 编码。 + --- + apiVersion: storage.k8s.io/v1 + kind: StorageClass + metadata: + annotations: + storageclass.beta.kubernetes.io/is-default-class: "true" + storageclass.kubesphere.io/supported-access-modes: '["ReadWriteOnce","ReadOnlyMany","ReadWriteMany"]' + name: glusterfs + parameters: + clusterid: "21240a91145aee4d801661689383dcd1" #请替换为您自己的 GlusterFS 集群 ID。 + gidMax: "50000" + gidMin: "40000" + restauthenabled: "true" + resturl: "http://192.168.0.2:8080" #Gluster REST 服务/Heketi 服务 URL 可按需供应 gluster 存储卷。请替换为您自己的 URL。 + restuser: admin + secretName: heketi-secret + secretNamespace: kube-system + volumetype: "replicate:3" #请替换为您自己的存储卷类型。 + provisioner: kubernetes.io/glusterfs + reclaimPolicy: Delete + volumeBindingMode: Immediate + allowVolumeExpansion: true + ``` + + {{< notice note >}} + + - 请使用字段 `storageclass.beta.kubernetes.io/is-default-class` 将 `glusterfs` 设置为默认存储类型。如果选择 `false`,KubeKey 将会安装 OpenEBS 作为默认存储类型。 + - 有关存储类型清单中参数的更多信息,请参见 [Kubernetes 文档](https://kubernetes.io/zh/docs/concepts/storage/storage-classes/#glusterfs)。 + + {{}} + +2. 保存文件。 + +## 步骤 3:下载 KubeKey + +根据以下步骤在任务机上下载 [KubeKey](../../../installing-on-linux/introduction/kubekey/)。 + +{{< tabs >}} + +{{< tab "如果您能正常访问 GitHub/Googleapis" >}} + +从 [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) 下载 KubeKey 或者直接运行以下命令。 + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "如果您访问 GitHub/Googleapis 受限" >}} + +首先运行以下命令,以确保您从正确的区域下载 KubeKey。 + +```bash +export KKZONE=cn +``` + +运行以下命令来下载 KubeKey: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +下载 KubeKey 之后,如果您将其转移到访问 Googleapis 受限的新机器上,请务必再次运行 `export KKZONE=cn`,然后继续执行以下步骤。 + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +通过以上的命令,可以下载 KubeKey 的最新版本。您可以更改命令中的版本号来下载特定的版本。 + +{{}} + +使 `kk` 可执行: + +```bash +chmod +x kk +``` + +## 步骤 4:创建集群 + +1. 指定想要安装的 Kubernetes 版本和 KubeSphere 版本,例如: + + ```bash + ./kk create config --with-kubernetes v1.22.12 --with-kubesphere v3.3.2 + ``` + + {{< notice note >}} + + - 安装 KubeSphere 3.3 的建议 Kubernetes 版本:v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如需使用边缘节点,推荐安装 v1.21.x。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.23.10。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../../installing-on-linux/introduction/kubekey/#支持矩阵)。 + + - 如果您在此步骤的命令中不添加标志 `--with-kubesphere`,则不会部署 KubeSphere,只能使用配置文件中的 `addons` 字段安装,或者在您后续使用 `./kk create cluster` 命令时再次添加这个标志。 + - 如果您添加标志 `--with-kubesphere` 时不指定 KubeSphere 版本,则会安装最新版本的 KubeSphere。 + + {{}} + +2. 如果您不自定义名称,将创建默认文件 `config-sample.yaml`。编辑文件: + + ```bash + vi config-sample.yaml + ``` + + ```yaml + ... + metadata: + name: sample + spec: + hosts: + - {name: client1, address: 192.168.0.5, internalAddress: 192.168.0.5, user: ubuntu, password: Testing123} + - {name: client2, address: 192.168.0.6, internalAddress: 192.168.0.6, user: ubuntu, password: Testing123} + - {name: client3, address: 192.168.0.7, internalAddress: 192.168.0.7, user: ubuntu, password: Testing123} + roleGroups: + etcd: + - client1 + control-plane: + - client1 + worker: + - client2 + - client3 + controlPlaneEndpoint: + domain: lb.kubesphere.local + address: "" + port: 6443 + kubernetes: + version: v1.22.12 + imageRepo: kubesphere + clusterName: cluster.local + network: + plugin: calico + kubePodsCIDR: 10.233.64.0/18 + kubeServiceCIDR: 10.233.0.0/18 + registry: + registryMirrors: [] + insecureRegistries: [] + addons: + - name: glusterfs + namespace: kube-system + sources: + yaml: + path: + - /root/glusterfs-sc.yaml + ... + ``` + +3. 请特别注意 `addons` 字段,您必须在该字段下提供要创建的存储类型以及 Heketi 密钥的信息。有关文件中每个参数的更多信息,请参见[多节点安装](../../../installing-on-linux/introduction/multioverview/#2-编辑配置文件)。 + +4. 保存文件,执行以下命令安装 Kubernetes 和 KubeSphere: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +5. 安装完成后,可以运行以下命令检查安装日志: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + 预期输出: + + ```bash + ##################################################### + ### Welcome to KubeSphere! ### + ##################################################### + + Console: http://192.168.0.4:30880 + Account: admin + Password: P@88w0rd + + NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. + + ##################################################### + https://kubesphere.io 20xx-xx-xx xx:xx:xx + ##################################################### + ``` + +## 步骤 5:验证安装 + +您可以使用命令行或者在 KubeSphere 的 Web 控制台验证 GlusterFS 是否成功安装。 + +### 命令行 + +运行以下命令行检查您的存储类型。 + +```bash +kubectl get sc +``` + +预期输出: + +```bash +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +glusterfs (default) kubernetes.io/glusterfs Delete Immediate true 104m +``` + +### KubeSphere 控制台 + +1. 使用默认帐户和密码 (`admin/P@88w0rd`) 通过 `:30880` 登录 Web 控制台。点击左上角的**平台管理**,选择**集群管理**。 + +3. 访问**存储**下的**持久卷声明**,可以看到 PVC 正在使用。 + + {{< notice note >}} + + 有关如何在 KubeSphere 控制台上创建持久卷声明的更多信息,请参见[持久卷声明](../../../project-user-guide/storage/volumes/)。 + + {{}} + +3. 在**存储类**页面,可以看到集群中可用的存储类型。 + diff --git a/content/zh/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-nfs-client.md b/content/zh/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-nfs-client.md new file mode 100644 index 000000000..405a59c95 --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-nfs-client.md @@ -0,0 +1,270 @@ +--- +title: "安装 NFS Client" +keywords: 'KubeSphere, Kubernetes, 存储, 安装, 配置, NFS' +description: '使用 KubeKey 搭建 KubeSphere 集群并配置 NFS 存储。' +linkTitle: "安装 NFS Client" +weight: 3330 +--- + +本教程演示了如何搭建 KubeSphere 集群并配置 NFS 存储。 + +{{< notice note >}} + +- 本教程以 Ubuntu 16.04 为例。 +- NFS 与部分应用不兼容(例如 Prometheus),可能会导致容器组创建失败。如果确实需要在生产环境中使用 NFS,请确保您了解相关风险或咨询 KubeSphere 技术支持 support@kubesphere.cloud。 + +{{}} + +## 准备工作 + +您必须准备好提供外部存储服务的 NFS 服务器。请确保已在客户端机器允许访问的 NFS 服务器上创建并导出目录。有关更多信息,请参见[搭建 NFS 服务器](../../../reference/storage-system-installation/nfs-server/)。 + +## 步骤 1:配置客户端机器 + +请在所有客户端上安装 `nfs-common`,它提供必要的 NFS 功能,而无需安装其他服务器组件。 + +1. 执行以下命令确保使用最新软件包。 + + ```bash + sudo apt-get update + ``` + +2. 在所有客户端上安装 `nfs-common`。 + + ```bash + sudo apt-get install nfs-common + ``` + +3. 访问稍后想要下载 KubeKey 到其上的一台客户端机器(任务机)。创建一个配置文件,其中包含 NFS 服务器的全部必要参数,KubeKey 将在安装过程中引用该文件。 + + ```bash + vi nfs-client.yaml + ``` + + 示例配置文件: + + ```yaml + nfs: + server: "192.168.0.2" # This is the server IP address. Replace it with your own. + path: "/mnt/demo" # Replace the exported directory with your own. + storageClass: + defaultClass: false + ``` + + {{< notice note >}} + + - 如果想要配置更多的值,请参见 [NFS-client Chart 配置](https://github.com/kubesphere/helm-charts/tree/master/src/main/nfs-client-provisioner#configuration)。 + - `storageClass.defaultClass` 字段决定是否将 NFS-client Provisioner 的存储类型设置为默认存储类型。如果您输入 `false`,KubeKey 将安装 [OpenEBS](https://github.com/openebs/openebs) 来提供本地卷,您在集群上创建工作负载时,不会动态供应本地持久卷。安装 KubeSphere 之后,您可以直接在控制台上更改默认存储类型。 + + {{}} + +4. 保存文件。 + +## 步骤 2:下载 KubeKey + +根据以下步骤在任务机上下载 [KubeKey](../../../installing-on-linux/introduction/kubekey/)。 + +{{< tabs >}} + +{{< tab "如果您能正常访问 GitHub/Googleapis" >}} + +从 [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) 下载 KubeKey 或者直接运行以下命令。 + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "如果您访问 GitHub/Googleapis 受限" >}} + +首先运行以下命令,确保您从正确的区域下载 KubeKey。 + +```bash +export KKZONE=cn +``` + +运行以下命令来下载 KubeKey: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +下载 KubeKey 之后,如果您将其转移到访问 Googleapis 受限的新机器上,请务必再次运行 `export KKZONE=cn`,然后继续执行以下步骤。 + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +通过以上命令,可以下载 KubeKey 的最新版本。您可以更改命令中的版本号来下载特定的版本。 + +{{}} + +使 `kk` 可执行: + +```bash +chmod +x kk +``` + +## 步骤 3:创建集群 + +1. 指定您想要安装的 Kubernetes 版本和 KubeSphere 版本,例如: + + ```bash + ./kk create config --with-kubernetes v1.22.12 --with-kubesphere v3.3.2 + ``` + + {{< notice note >}} + + - 安装 KubeSphere 3.3 的建议 Kubernetes 版本:v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如需使用边缘节点,推荐安装 v1.21.x。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.23.10。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../../installing-on-linux/introduction/kubekey/#支持矩阵)。 + + - 如果您在此步骤的命令中不添加标志 `--with-kubesphere`,则不会部署 KubeSphere,只能使用配置文件中的 `addons` 字段安装,或者在您后续使用 `./kk create cluster` 命令时再次添加这个标志。 + - 如果您添加标志 `--with-kubesphere` 时不指定 KubeSphere 版本,则会安装最新版本的 KubeSphere。 + + {{}} + +2. 如果您不自定义名称,将创建默认文件 `config-sample.yaml`。编辑文件: + + ```bash + vi config-sample.yaml + ``` + + ```yaml + ... + metadata: + name: sample + spec: + hosts: + - {name: client1, address: 192.168.0.3, internalAddress: 192.168.0.3, user: ubuntu, password: Testing123} + - {name: client2, address: 192.168.0.4, internalAddress: 192.168.0.4, user: ubuntu, password: Testing123} + - {name: client3, address: 192.168.0.5, internalAddress: 192.168.0.5, user: ubuntu, password: Testing123} + roleGroups: + etcd: + - client1 + control-plane: + - client1 + worker: + - client2 + - client3 + controlPlaneEndpoint: + domain: lb.kubesphere.local + address: "" + port: 6443 + kubernetes: + version: v1.22.12 + imageRepo: kubesphere + clusterName: cluster.local + network: + plugin: calico + kubePodsCIDR: 10.233.64.0/18 + kubeServiceCIDR: 10.233.0.0/18 + registry: + registryMirrors: [] + insecureRegistries: [] + addons: + - name: nfs-client + namespace: kube-system + sources: + chart: + name: nfs-client-provisioner + repo: https://charts.kubesphere.io/main + valuesFile: /home/ubuntu/nfs-client.yaml # Use the path of your own NFS-client configuration file. + ... + ``` + +3. 请特别注意 `addons` 字段,您必须在该字段下提供 NFS-client 的信息。有关文件中每个参数的更多信息,请参见[多节点安装](../../../installing-on-linux/introduction/multioverview/#2-编辑配置文件)。 + +4. 保存文件,执行以下命令安装 Kubernetes 和 KubeSphere: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +5. 安装完成后,可以使用以下命令检查安装日志: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + 预期输出: + + ```bash + ##################################################### + ### Welcome to KubeSphere! ### + ##################################################### + + Console: http://192.168.0.3:30880 + Account: admin + Password: P@88w0rd + + NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. + + ##################################################### + https://kubesphere.io 20xx-xx-xx xx:xx:xx + ##################################################### + ``` + +## 步骤 4:验证安装 + +您可以使用命令行或者从 KubeSphere 的 Web 控制台来验证 NFS-client 是否安装成功。 + +### 命令行 + +1. 运行以下命令检查存储类型: + + ```bash + kubectl get sc + ``` + + 预期输出: + + ```bash + NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE + local (default) openebs.io/local Delete WaitForFirstConsumer false 16m + nfs-client cluster.local/nfs-client-nfs-client-provisioner Delete Immediate true 16m + ``` + + {{< notice note >}} + + 若将 `nfs-client` 设置为默认存储类型,KubeKey 则不会安装 OpenEBS。 + + {{}} + +2. 运行以下命令检查 Pod 的状态。 + + ```bash + kubectl get pod -n kube-system + ``` + + 请注意,`nfs-client` 安装在命名空间 `kube-system` 中,预期输出(不包括无关 Pod): + + ```bash + NAME READY STATUS RESTARTS AGE + nfs-client-nfs-client-provisioner-6fc95f4f79-92lsh 1/1 Running 0 16m + ``` + +### KubeSphere 控制台 + +1. 使用默认帐户和密码 (`admin/P@88w0rd`) 通过 `:30880` 登录 Web 控制台。点击左上角的**平台管理**,选择**集群管理**。 + +2. 选择**应用负载** > **容器组**,从下拉菜单中选择 `kube-system`,可以看到 `nfs-client` 的 Pod 正常运行。 + +3. 选择**存储** > **存储类型**,可以看到集群中可用的存储类型。 + + {{< notice note >}} + + 有关如何在 KubeSphere 控制台上创建持久卷声明的更多信息,请参见[持久卷声明](../../../project-user-guide/storage/volumes/)。 + + {{}} diff --git a/content/zh/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-qingcloud-csi.md b/content/zh/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-qingcloud-csi.md new file mode 100644 index 000000000..4733e766a --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-linux/persistent-storage-configurations/install-qingcloud-csi.md @@ -0,0 +1,274 @@ +--- +title: "安装 QingCloud CSI" +keywords: 'KubeSphere, Kubernetes, QingCloud CSI, 安装, 配置, 存储' +description: '使用 KubeKey 搭建 KubeSphere 集群并配置 QingCloud CSI 存储。' +linkTitle: "安装 QingCloud CSI" +weight: 3320 +--- + +如果您打算在[青云QingCloud](https://www.qingcloud.com/) 上安装 KubeSphere,可以选择 [QingCloud CSI](https://github.com/yunify/qingcloud-csi) 作为底层存储插件。 + +本教程演示了如何使用 KubeKey 搭建 KubeSphere 集群及配置 QingCloud CSI 以提供存储服务。 + +## 准备工作 + +您需要在[青云QingCloud 平台](https://intl.qingcloud.com/)上创建集群节点。 + +## 步骤 1:在青云QingCloud 平台上创建 API 密钥 + +若要确保该平台可以为集群创建云磁盘,就需要在单独的 QingCloud CSI 配置文件中提供 API 密钥(`qy_access_key_id` 和 `qy_secret_access_key`)。 + +1. 登录[青云QingCloud](https://console.qingcloud.com/login) 的 Web 控制台,从右上角的下拉菜单中选择 **API 密钥**。 + + ![access-key](/images/docs/v3.3/zh-cn/installing-on-linux/persistent-storage-configurations/qingcloud-csi/access-key.png) + +2. 点击**创建**生成密钥。创建完成后,下载密钥,该密钥存储在一个 csv 文件中。 + +## 步骤 2:为 QingCloud CSI 创建配置文件 + +单独的配置文件中包含 QingCloud CSI 的全部参数,KubeKey 将在安装过程中使用这些参数。 + +1. 访问您稍后想要下载 KubeKey 到其上的节点(任务机),运行以下命令创建配置文件。 + + ``` + vi csi-qingcloud.yaml + ``` + + 示例配置文件: + + ```yaml + config: + qy_access_key_id: "MBKTPXWCIRIEDQYQKXYL" #请替换为您自己的密钥 id。 + qy_secret_access_key: "cqEnHYZhdVCVif9qCUge3LNUXG1Cb9VzKY2RnBdX" #请替换为您自己的 API 密钥。 + zone: "pek3a" #仅支持小写字母。 + sc: + isDefaultClass: true #将其设置为默认存储类型。 + ``` + +2. 字段 `zone` 指定云磁盘创建的可用区。在青云QingCloud 平台,您必须在创建云磁盘之前指定一个可用区。 + + ![storage-zone](/images/docs/v3.3/zh-cn/installing-on-linux/persistent-storage-configurations/qingcloud-csi/storage-zone.jpg) + + 请确保为 `zone` 指定的值与以下区域 ID 匹配: + + | 可用区 | 区域 ID | + | --------------------------------------- | ----------------------- | + | 上海1区-A/上海1区-B | sh1a/sh1b | + | 北京3区-A/北京3区-B/北京3区-C/北京3区-D | pek3a/pek3b/pek3c/pek3d | + | 广东2区-A/广东2区-B | gd2a/gd2b | + | 亚太2区-A | ap2a | + + 如果想要配置更多的值,请参见 [QingCloud CSI Chart 配置](https://github.com/kubesphere/helm-charts/tree/master/src/test/csi-qingcloud#configuration)。 + +3. 保存文件。 + +## 步骤 3:下载 KubeKey + +根据以下步骤在任务机上下载 [KubeKey](../../../installing-on-linux/introduction/kubekey/)。 + +{{< tabs >}} + +{{< tab "如果您能够正常访问 GitHub/Googleapis" >}} + +从 [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) 下载 KubeKey 或者直接运行以下命令。 + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "如果您访问 GitHub/Googleapis 受限" >}} + +首先运行以下命令,确保您从正确的区域下载 KubeKey。 + +```bash +export KKZONE=cn +``` + +运行以下命令下载 KubeKey: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +下载 KubeKey 之后,如果您将其转移到访问 Googleapis 受限的新机器上,请务必再次运行 `export KKZONE=cn`,然后继续执行以下步骤。 + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +通过以上的命令,可以下载 KubeKey 的最新版本。您可以更改命令中的版本号来下载特定的版本。 + +{{}} + +使 `kk` 可执行: + +```bash +chmod +x kk +``` + +## 步骤 4:创建集群 + +1. 指定您想要安装的 Kubernetes 版本和 KubeSphere 版本,例如: + + ```bash + ./kk create config --with-kubernetes v1.22.12 --with-kubesphere v3.3.2 + ``` + + {{< notice note >}} + + - 安装 KubeSphere 3.3 的建议 Kubernetes 版本:v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如需使用边缘节点,推荐安装 v1.21.x。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.23.10。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../../installing-on-linux/introduction/kubekey/#支持矩阵)。 + + - 如果您在此步骤的命令中不添加标志 `--with-kubesphere`,则不会部署 KubeSphere,只能使用配置文件中的 `addons` 字段安装,或者在您后续使用 `./kk create cluster` 命令时再次添加这个标志。 + - 如果您添加标志 `--with-kubesphere` 时不指定 KubeSphere 版本,则会安装最新版本的 KubeSphere。 + + {{}} + +2. 如果您不自定义名称,将创建默认文件 `config-sample.yaml`。编辑文件: + + ```bash + vi config-sample.yaml + ``` + + ```yaml + ... + metadata: + name: sample + spec: + hosts: + - {name: master, address: 192.168.0.2, internalAddress: 192.168.0.2, user: root, password: Testing123} + - {name: node1, address: 192.168.0.3, internalAddress: 192.168.0.3, user: root, password: Testing123} + - {name: node2, address: 192.168.0.4, internalAddress: 192.168.0.4, user: root, password: Testing123} + roleGroups: + etcd: + - master + control-plane: + - master + worker: + - node1 + - node2 + controlPlaneEndpoint: + domain: lb.kubesphere.local + address: "" + port: 6443 + kubernetes: + version: v1.22.12 + imageRepo: kubesphere + clusterName: cluster.local + network: + plugin: calico + kubePodsCIDR: 10.233.64.0/18 + kubeServiceCIDR: 10.233.0.0/18 + registry: + registryMirrors: [] + insecureRegistries: [] + addons: + - name: csi-qingcloud + namespace: kube-system + sources: + chart: + name: csi-qingcloud + repo: https://charts.kubesphere.io/test + valuesFile: /root/csi-qingcloud.yaml + ... + ``` + +3. 请特别注意 `addons` 字段,您必须在该字段下提供 QingCloud CSI 的信息。有关文件中每个参数的更多信息,请参见[多节点安装](../../../installing-on-linux/introduction/multioverview/#2-编辑配置文件)。 + + {{< notice note >}} + + KubeKey 将通过 Helm Chart 安装 QingCloud CSI 及其 StorageClass。 + + {{}} + +4. 保存文件,执行以下命令安装 Kubernetes 和 KubeSphere: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +5. 安装完成后,可以使用以下命令检查安装日志: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + 预期输出: + + ```bash + ##################################################### + ### Welcome to KubeSphere! ### + ##################################################### + + Console: http://192.168.0.3:30880 + Account: admin + Password: P@88w0rd + + NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. + + ##################################################### + https://kubesphere.io 20xx-xx-xx xx:xx:xx + ##################################################### + ``` + +## 步骤 5:验证安装 + +您可以使用命令行或者通过 KubeSphere 的 Web 控制台来验证 QingCloud CSI 是否安装成功。 + +### 命令行 + +1. 运行以下命令检查存储类型。 + + ```bash + kubectl get sc + ``` + + 预期输出: + + ```bash + NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE + csi-qingcloud (default) disk.csi.qingcloud.com Delete WaitForFirstConsumer true 28m + ``` + +2. 运行以下命令检查 Pod 的状态。 + + ```bash + kubectl get pod -n kube-system + ``` + + 请注意,`csi-qingcloud` 安装在命名空间 `kube-system` 中,预期输出(不包括其他无关 Pod): + + ```bash + NAME READY STATUS RESTARTS AGE + csi-qingcloud-controller-f95dcddfb-2gfck 5/5 Running 0 28m + csi-qingcloud-node-7dzz8 2/2 Running 0 28m + csi-qingcloud-node-k4hsj 2/2 Running 0 28m + csi-qingcloud-node-sptdb 2/2 Running 0 28m + ``` + +### KubeSphere 控制台 + +1. 使用默认帐户和密码 (`admin/P@88w0rd`) 通过 `:30880` 登录 Web 控制台。点击左上角的**平台管理**,选择**集群管理**。 + +2. 选择**应用负载** > **容器组**,从下拉菜单中选择 `kube-system`。可以看到 `csi-qingcloud` 的 Pod 正常运行。 + +3. 选择**存储**下的**存储类型**,可以看到集群中可用的存储类型。 + + {{< notice note >}} + + 有关如何在 KubeSphere 控制台创建持久卷声明的更多信息,请参见[持久卷声明](../../../project-user-guide/storage/volumes/)。 + + {{}} diff --git a/content/zh/docs/v3.4/installing-on-linux/persistent-storage-configurations/understand-persistent-storage.md b/content/zh/docs/v3.4/installing-on-linux/persistent-storage-configurations/understand-persistent-storage.md new file mode 100644 index 000000000..c131a0163 --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-linux/persistent-storage-configurations/understand-persistent-storage.md @@ -0,0 +1,49 @@ +--- +title: "安装持久化存储" +keywords: 'KubeSphere, Kubernetes, 存储, 安装, 配置' +description: '理解持久化存储。' +linkTitle: "安装持久化存储" +weight: 3310 +--- + +持久化存储是安装 KubeSphere 的**必备条件**。使用 [KubeKey](../../../installing-on-linux/introduction/kubekey/) 搭建 KubeSphere 集群时,可以安装不同的存储系统作为[插件](https://github.com/kubesphere/kubekey/blob/master/docs/addons.md)。在 Linux 上通过 KubeKey 安装 KubeSphere 的一般步骤如下: + +1. 安装 Kubernetes。 +2. 安装所提供的任何插件。 +3. 通过 [ks-installer](https://github.com/kubesphere/ks-installer) 安装 KubeSphere。 + +在第 2 步中,**必须**安装可用的 StorageClass,包括: + +- StorageClass 本身 +- 必要情况下,还需为 StorageClass 安装存储插件 + +{{< notice note >}} + +某些存储系统需要您预先准备存储服务器,以提供外部存储服务。 + +{{}} + +## KubeKey 如何安装不同的存储系统 + +KubeKey 会为集群创建[一个配置文件](../../../installing-on-linux/introduction/multioverview/#2-编辑配置文件)(默认为 `config-sample.yaml`),其中包含定义不同资源(包括各种插件)的全部必要参数。QingCloud CSI 等不同的存储系统也能通过 Helm Chart 或 YAML 作为插件进行安装。若要让 KubeKey 以预期的方式来安装这些存储系统,就必须为 KubeKey 提供这些存储系统的必要配置。 + +通常,有两种方法能使 KubeKey 应用即将安装的存储系统的配置。 + +1. 直接在 `config-sample.yaml` 中的 `addons` 字段下输入必要的参数。 +2. 为插件创建一个单独的配置文件,列出所有必要的参数,并在 `config-sample.yaml` 中提供文件的路径,以便 KubeKey 在安装过程中引用该路径。 + +有关更多信息,请参见[插件](https://github.com/kubesphere/kubekey/blob/master/docs/addons.md)。 + +## 默认存储类型 + +KubeKey 支持安装不同的存储插件和存储类型。无论您要安装哪种存储系统,都可以在其配置文件中指定是否设为默认存储类型。如果 KubeKey 检测到未指定默认存储类型,则将默认安装 [OpenEBS](https://github.com/openebs/openebs)。 + +OpenEBS 本地 PV 动态供应器可以使用节点上的唯一 HostPath(目录)来创建 Kubernetes 本地持久卷,以持久化数据。用户没有特定的存储系统时,可以通过默认的 OpenEBS 快速上手。 + +## 多存储解决方案 + +如果打算安装多个存储插件,那么只能将其中一个设置为默认存储类型。否则,KubeKey 将无法识别使用哪种存储类型。 + +## 支持的 CSI 插件 + +{{< content "common/csi-plugins.md" >}} \ No newline at end of file diff --git a/content/zh/docs/v3.4/installing-on-linux/public-cloud/_index.md b/content/zh/docs/v3.4/installing-on-linux/public-cloud/_index.md new file mode 100644 index 000000000..28eb7b9b7 --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-linux/public-cloud/_index.md @@ -0,0 +1,8 @@ +--- +linkTitle: "在公共云上安装" +weight: 3400 + +_build: + render: false +--- +Translation installing on linux chapter, from en version to zh \ No newline at end of file diff --git a/content/zh/docs/v3.4/installing-on-linux/public-cloud/install-kubesphere-on-ali-ecs.md b/content/zh/docs/v3.4/installing-on-linux/public-cloud/install-kubesphere-on-ali-ecs.md new file mode 100644 index 000000000..1611c9e2d --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-linux/public-cloud/install-kubesphere-on-ali-ecs.md @@ -0,0 +1,273 @@ +--- +title: "KubeSphere 在阿里云 ECS 高可用实例" +keywords: "Kubesphere 安装, 阿里云, ECS, 高可用性, 高可用性, 负载均衡器" +description: "了解如何在阿里云虚拟机上创建高可用的 KubeSphere 集群。" + +Weight: 3240 +--- + +对于生产环境,我们需要考虑集群的高可用性。本文教您部署如何在多台阿里 ECS 实例快速部署一套高可用的生产环境。要满足 Kubernetes 集群服务需要做到高可用,需要保证 kube-apiserver 的 HA ,可使用以下下列两种方式: + +- 阿里云 SLB (推荐) +- keepalived + haproxy [keepalived + haproxy](https://ask.kubesphere.io/forum/d/1566-kubernetes-keepalived-haproxy)对 kube-apiserver 进行负载均衡,实现高可用 kubernetes 集群。 + +## 前提条件 + +- 考虑到数据的持久性,对于生产环境,我们建议您准备持久化存储。若搭建开发和测试,您可以直接使用默认集成的 OpenEBS 准备 LocalPV; +- SSH 可以访问所有节点; +- 所有节点的时间同步; +- Red Hat 在其 Linux 发行版本中包括了 SELinux,建议关闭 SELinux 或者将 SELinux 的模式切换为 Permissive [宽容]工作模式。 + +## 部署架构 + +![部署架构](/images/docs/v3.3/ali-ecs/ali.png) + +## 创建主机 + +本示例创建 SLB + 6 台 **CentOS Linux release 7.6.1810 (Core)** 的虚拟机,每台配置为 **2 Core 4 GB 40 G**,仅用于最小化安装,若资源充足建议使用每台配置 **4 Core 8 GB 100 G** 以上的虚拟机。 + +| 主机IP | 主机名称 | 角色 | +| --- | --- | --- | +|39.104.82.170|Eip|slb| +|172.24.107.72|master1|master1, etcd| +|172.24.107.73|master2|master2, etcd| +|172.24.107.74|master3|master3, etcd| +|172.24.107.75|node1|node| +|172.24.107.76|node2|node| +|172.24.107.77|node3|node| + +> 注意: 由于演示机器有限,所以把 etcd 跟 master 放在同样 3 台机器,在生产环境建议单独部署至少 3 台 etcd,提高稳定性。 + +## 使用阿里 SLB 部署 + +以下创建一个 SLB,设置监听集群的 6443 端口。 + +### 创建 SLB + +进入到阿里云控制, 在左侧列表选择'负载均衡', 选择'实例管理' 进入下图, 选择'创建负载均衡' + +![1-1-创建slb](/images/docs/v3.3/ali-ecs/ali-slb-create.png) + +### 配置 SLB + +配置规格根据自身流量规模创建 + +![2-1-创建slb](/images/docs/v3.3/ali-ecs/ali-slb-config.png) + +注意在后面的 config.yaml 需要配置 slb 分配的地址 + +```yaml +controlPlaneEndpoint: + domain: lb.kubesphere.local + address: "39.104.82.170" + port: 6443 +``` + +### 配置SLB 主机实例 + +需要在服务器组添加需要负载的3台 master 主机后按下图顺序配置监听 TCP 6443 端口 (api-server) + +![3-1-添加主机](/images/docs/v3.3/ali-ecs/ali-slb-add.png) + +![3-2-配置监听端口](/images/docs/v3.3/ali-ecs/ali-slb-listen-conf1.png) + +![3-3-配置监听端口](/images/docs/v3.3/ali-ecs/ali-slb-listen-conf2.png) + +![3-4-配置监听端口](/images/docs/v3.3/ali-ecs/ali-slb-listen-conf3.png) + +{{< notice note >}} +- 现在的健康检查暂时是失败的,因为还没部署 master 的服务,所以端口 telnet 不通的。 +- 完成上述操作后,提交审核即可 +{{}} + +### 获取 Installer + +下载可执行安装程序 `kk` 至一台目标机器: + +{{< tabs >}} + +{{< tab "如果您能正常访问 GitHub/Googleapis" >}} + +从 [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) 下载 KubeKey 或直接使用以下命令。 + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "如果您访问 GitHub/Googleapis 受限" >}} + +先执行以下命令以确保您从正确的区域下载 KubeKey。 + +```bash +export KKZONE=cn +``` + +执行以下命令下载 KubeKey。 + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +在您下载 KubeKey 后,如果您将其传至新的机器,且访问 Googleapis 同样受限,在您执行以下步骤之前请务必再次执行 `export KKZONE=cn` 命令。 + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +执行以上命令会下载最新版 KubeKey,您可以修改命令中的版本号下载指定版本。 + +{{}} + +为 `kk` 添加可执行权限: + +```bash +chmod +x kk +``` + +{{< notice tip >}} +您可以使用高级安装来控制自定义参数或创建多节点集群。具体来说,通过指定配置文件来创建集群。 +{{}} + +### 使用 KubeKey 部署集群 + +在当前位置创建配置文件 `config-sample.yaml`: + +```bash +./kk create config --with-kubesphere v3.3.2 --with-kubernetes v1.22.12 -f config-sample.yaml +``` + +### 集群配置调整 + +修改配置文件 `config-sample.yaml`: + +``` +vi config-sample.yaml +``` + +参考以下 `config-sample.yaml` 的主机节点配置,替换为您 + +```yaml +#vi ~/config-sample.yaml +apiVersion: kubekey.kubesphere.io/v1alpha1 +kind: Cluster +metadata: + name: config-sample + spec: + hosts: + - {name: master1, address: 172.24.107.72, internalAddress: 172.24.107.72, user: root, password: QWEqwe123} + - {name: master2, address: 172.24.107.73, internalAddress: 172.24.107.73, user: root, password: QWEqwe123} + - {name: master3, address: 172.24.107.74, internalAddress: 172.24.107.74, user: root, password: QWEqwe123} + - {name: node1, address: 172.24.107.75, internalAddress: 172.24.107.75, user: root, password: QWEqwe123} + - {name: node2, address: 172.24.107.76, internalAddress: 172.24.107.76, user: root, password: QWEqwe123} + - {name: node3, address: 172.24.107.77, internalAddress: 172.24.107.77, user: root, password: QWEqwe123} + roleGroups: + etcd: + - master1 + - master2 + - master3 + control-plane: + - master1 + - master2 + - master3 + worker: + - node1 + - node2 + - node3 + controlPlaneEndpoint: + domain: lb.kubesphere.local + address: "39.104.82.170" + port: 6443 + kubernetes: + version: v1.17.9 + imageRepo: kubesphere + clusterName: cluster.local + masqueradeAll: false # masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode. [Default: false] + maxPods: 110 # maxPods is the number of pods that can run on this Kubelet. [Default: 110] + nodeCidrMaskSize: 24 # internal network node size allocation. This is the size allocated to each node on your network. [Default: 24] + proxyMode: ipvs # mode specifies which proxy mode to use. [Default: ipvs] + network: + plugin: calico + calico: + ipipMode: Always # IPIP Mode to use for the IPv4 POOL created at start up. If set to a value other than Never, vxlanMode should be set to "Never". [Always | CrossSubnet | Never] [Default: Always] + vxlanMode: Never # VXLAN Mode to use for the IPv4 POOL created at start up. If set to a value other than Never, ipipMode should be set to "Never". [Always | CrossSubnet | Never] [Default: Never] + vethMTU: 1440 # The maximum transmission unit (MTU) setting determines the largest packet size that can be transmitted through your network. [Default: 1440] + kubePodsCIDR: 10.233.64.0/18 + kubeServiceCIDR: 10.233.0.0/18 + registry: + registryMirrors: [] + insecureRegistries: [] + addons: [] + +··· +# 其它配置可以在安装后之后根据需要进行修改 +``` + +#### 持久化存储配置 + +如本文开头的前提条件所说,对于生产环境,我们建议您准备持久性存储,可参考以下说明进行配置。若搭建开发和测试,您可以直接使用默认集成的 OpenEBS 准备 LocalPV,则可以跳过这小节。 + +{{< notice note >}} +- 继续编辑上述 `config-sample.yaml` 文件,找到 `[addons]` 字段,这里支持定义任何持久化存储的插件或客户端,如 CSI ( +alibaba-cloud-csi-driver)、NFS Client、Ceph、GlusterFS,您可以根据您自己的持久化存储服务类型,并参考 [持久化存储服务](../../../installing-on-linux/persistent-storage-configurations/understand-persistent-storage/) 中对应的示例 YAML 文件进行设置。 +- 只需要将 CSI 存储插件安装时需要 apply 的所有 yaml 文件在 `[addons]` 中列出即可,注意预先参考 [Alibaba Cloud Kubernetes CSI Plugin](https://github.com/kubernetes-sigs/alibaba-cloud-csi-driver#alibaba-cloud-kubernetes-csi-plugin),选择您需要的存储类型的 CSI 插件,如 Cloud Disk CSI Plugin、NAS CSI Plugin、NAS CSI Plugin、OSS CSI Plugin,然后在 CSI 的相关 yaml 中配置对接阿里云的相关信息。 +{{}} + +### 执行命令创建集群 + +完成上述配置后,通过配置文件创建集群。 + +```bash +./kk create cluster -f config-sample.yaml + +# 查看 KubeSphere 安装日志 -- 直到出现控制台的访问地址和登录帐户 +kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f +``` + +``` +************************************************** +##################################################### +### Welcome to KubeSphere! ### +##################################################### + +Console: http://172.24.107.72:30880 +Account: admin +Password: P@88w0rd + +NOTES: +1. After you log into the console, please check the +monitoring status of service components in +the "Cluster Management". If any service is not +ready, please wait patiently until all components +are up and running. +2. Please change the default password after login. + +##################################################### +https://kubesphere.io 2020-08-24 23:30:06 +##################################################### +``` + +- 访问公网 IP + Port 为部署后的使用情况,使用默认帐户密码 (`admin/P@88w0rd`),文章安装为最小化,登录点击`工作台` 可看到下图安装组件列表和机器情况。 + +![面板图](/images/docs/v3.3/ali-ecs/succes.png) + +## 如何自定义开启可插拔组件 + +- 点击**集群管理** > **定制资源定义**,在过滤条件框输入 `ClusterConfiguration`。 + +- 点击 `ClusterConfiguration` 详情,对 `ks-installer` 编辑保存退出即可,组件描述介绍:[文档说明](https://github.com/kubesphere/ks-installer/blob/master/deploy/cluster-configuration.yaml)。 + +## FAQ + +> 提示: 如果安装过程中碰到 `Failed to add worker to cluster: Failed to exec command...` +>
+``` bash 处理方式 +kubeadm reset +``` diff --git a/content/zh/docs/v3.4/installing-on-linux/public-cloud/install-kubesphere-on-azure-vms.md b/content/zh/docs/v3.4/installing-on-linux/public-cloud/install-kubesphere-on-azure-vms.md new file mode 100644 index 000000000..4496fdfe0 --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-linux/public-cloud/install-kubesphere-on-azure-vms.md @@ -0,0 +1,273 @@ +--- +title: "在 Azure VM 实例上部署 KubeSphere" +keywords: "KubeSphere, Installation, HA, high availability, load balancer, Azure" +description: "了解如何在 Azure 虚拟机上创建高可用 KubeSphere 集群。" +linkTitle: "在 Azure VM 实例上部署 KubeSphere" +Weight: 3410 + +--- + +您可以使用 [Azure 云平台](https://azure.microsoft.com/zh-cn/overview/what-is-azure/)自行安装和管理 Kubernetes,或采用托管 Kubernetes 解决方案。如果要使用完全托管平台解决方案,请参阅 [在 AKS 上部署 KubeSphere](../../../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks/)。 + +此外,您也可以在 Azure 实例上搭建高可用集群。本指南演示如何创建生产就绪的 Kubernetes 和 KubeSphere 集群。 + +## 简介 + +本教程使用 Azure 虚拟机的两个主要功能: + +- [虚拟机规模集](https://docs.microsoft.com/zh-cn/azure/virtual-machine-scale-sets/overview)(Virtual Machine Scale Sets 简称 VMSS):使用 Azure VMSS 可以创建和管理一组负载均衡的虚拟机。虚拟机实例的数量可以根据需求或者定义的计划自动增加或减少(支持 Kubernetes Autoscaler,本教程未介绍。更多信息请参考 [autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/azure)),非常适合工作节点。 +- [可用性集](https://docs.microsoft.com/zh-cn/azure/virtual-machines/availability-set-overview):可用性集是数据中心内自动分布在容错域中的虚拟机的逻辑分组。这种方法限制了潜在的硬件故障、网络中断或电源中断的影响。所有充当主节点和 etcd 节点的虚拟机将被置于一个可用性集中,以实现高可用性。 + +除这些虚拟机外,还将使用负载均衡器、虚拟网络和网络安全组等其他资源。 + +## 准备工作 + +- 需要一个 [Azure](https://portal.azure.com) 帐户来创建所有资源。 +- 了解 [Azure 资源管理器](https://docs.microsoft.com/zh-cn/azure/azure-resource-manager/templates/)(Azure Resource Manager 简称 ARM)模板的基本知识,这些模板文件定义您项目的基础结构和配置。 +- 对于生产环境,建议准备持久化存储并创建 StorageClass。对于开发和测试环境,可以使用 [OpenEBS](https://openebs.io/)(由 KubeKey 默认安装)提供 LocalPV。 + +## 架构 + +六台 **Ubuntu 18.04** 的机器会被部署至 Azure 资源组中。其中三台机器会分至同一个可用性集,同时充当主节点和 etcd 节点。其他三个虚拟机会被定义为 VMSS,工作节点将在其中运行。 + +![Architecture](/images/docs/v3.3/aks/Azure-architecture.png) + +这些虚拟机将连接至负载均衡器,其中两个包含预定义规则: + +- **入站 NAT**:为每台机器映射 SSH 端口,以便管理虚拟机。 +- **负载均衡**:默认情况下,http 和 https 端口将映射至节点池。后续可根据需求添加其他端口。 + +| 服务 | 协议 | 规则 | 后端端口 | 前端端口 | 节点池 | +| ---------- | ---- | -------- | -------- | -------------------------------- | ---------------- | +| ssh | TCP | 入站 NAT | 22 | 50200, 50201, 50202, 50100~50199 | 主节点, 普通节点 | +| api 服务器 | TCP | 负载均衡 | 6443 | 6443 | 主节点 | +| ks 控制台 | TCP | 负载均衡 | 30880 | 30880 | 主节点 | +| http | TCP | 负载均衡 | 80 | 80 | 普通节点 | +| https | TCP | 负载均衡 | 443 | 443 | 普通节点 | + +## 创建高可用集群基础设施 + +您不必逐个创建这些资源。基于在 Azure 上**基础设施即代码**的概念,在这个架构下所有资源已经被定义成 ARM 模板。 + +### 准备机器 + +1. 点击 **Deploy** 按钮,页面将会被重定向至 Azure 并被要求填写部署参数。 + + Deploy to Azure Visualize + +2. 在显示页面上,只需更改几个参数。点击 **Resource group** 下方的 **Create new**,输入名称,例如:`KubeSphereVMRG`。 + +3. 在 **Admin Username** 中输入管理员用户名。 + +4. 复制您的 SSH 公钥至 **Admin Key** 中。或者,使用 `ssh-keygen` 创建一个新的密钥。 + + ![azure-template-parameters](/images/docs/v3.3/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-azure-vms/azure-template-parameters.png) + + {{< notice note >}} + +Linux 只接受 SSH 验证,密码身份验证在其配置中受限。 + +{{}} + +5. 点击底部的 **Purchase** 继续。 + +### 查看门户中的 Azure 资源 + +创建成功后,所有资源会显示在 `KubeSphereVMRG` 资源组中。记录负载均衡器的公用 IP 和虚拟机的私有 IP 地址,以备后续使用。 + +![New Created Resources](/images/docs/v3.3/aks/azure-vm-all-resources.png) + +## 部署 Kubernetes 和 KubeSphere + +在设备上执行以下命令,或者通过 SSH 连接其中一台主节点虚拟机。在安装过程中,文件会被下载并分配到每个虚拟机中。 + +```bash +# copy your private ssh to master-0 +scp -P 50200 ~/.ssh/id_rsa kubesphere@40.81.5.xx:/home/kubesphere/.ssh/ + +# ssh to the master-0 +ssh -i .ssh/id_rsa2 -p50200 kubesphere@40.81.5.xx +``` + +### 下载 KubeKey + +[Kubekey](../../../installing-on-linux/introduction/kubekey/) 是一个全新下载工具,提供简单、快速和灵活的方式来安装 Kubernetes 和 KubeSphere。 + +1. 下载 KubeKey,便于下一步生成配置文件。 + + {{< tabs >}} + + {{< tab "如果您能正常访问 GitHub/Googleapis">}} + +从 KubeKey 的 [Github 发布页面](https://github.com/kubesphere/kubekey/releases)下载,或执行以下命令: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "如果您访问 GitHub/Googleapis 受限" >}} + +运行以下命令,确保从正确区域下载 KubeKey。 + +```bash +export KKZONE=cn +``` + +运行以下命令下载 KubeKey: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +下载 KubeKey 之后,如果在与 Googleapis 网络连接不良的新机器上,则必须再次运行 `export KKZONE=cn`,然后继续执行一下步骤。 + +{{}} + +{{}} + +{{}} + + {{< notice note >}} + +上面的命令会下载 KubeKey 最新版本。您可以在命令中更改版本号以下载特定版本。 + +{{}} + + 给予 `kk` 执行权限: + + ```bash + chmod +x kk + ``` + + + +1. 使用默认配置创建示例配置文件,这里以 Kubernetes v1.22.12 为例。 + + ```bash + ./kk create config --with-kubesphere v3.3.2 --with-kubernetes v1.22.12 + ``` + + {{< notice note >}} + +- KubeSphere 3.3 对应 Kubernetes 版本推荐:v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如需使用边缘节点,推荐安装 v1.21.x。如果未指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.23.10。有关支持的 Kubernetes 版本请参阅[支持矩阵](../../../installing-on-linux/introduction/kubekey/#support-matrix)。 +- 如果在此步骤中的命令中未添加标志 `--with-kubesphere`,则不会部署 KubeSphere,除非您使用配置文件中的 `addons` 字段进行安装,或稍后使用 `./kk create cluster` 时再次添加此标志。 + +- 如果在未指定 KubeSphere 版本的情况下添加标志 --with kubesphere`,将安装 KubeSphere 的最新版本。 + +{{}} + +### 配置文件示例 + +```yaml +spec: + hosts: + - {name: master-0, address: 40.81.5.xx, port: 50200, internalAddress: 10.0.1.4, user: kubesphere, privateKeyPath: "~/.ssh/id_rsa"} + - {name: master-1, address: 40.81.5.xx, port: 50201, internalAddress: 10.0.1.5, user: kubesphere, privateKeyPath: "~/.ssh/id_rsa"} + - {name: master-2, address: 40.81.5.xx, port: 50202, internalAddress: 10.0.1.6, user: kubesphere, privateKeyPath: "~/.ssh/id_rsa"} + - {name: node000000, address: 40.81.5.xx, port: 50100, internalAddress: 10.0.0.4, user: kubesphere, privateKeyPath: "~/.ssh/id_rsa"} + - {name: node000001, address: 40.81.5.xx, port: 50101, internalAddress: 10.0.0.5, user: kubesphere, privateKeyPath: "~/.ssh/id_rsa"} + - {name: node000002, address: 40.81.5.xx, port: 50102, internalAddress: 10.0.0.6, user: kubesphere, privateKeyPath: "~/.ssh/id_rsa"} + roleGroups: + etcd: + - master-0 + - master-1 + - master-2 + control-plane: + - master-0 + - master-1 + - master-2 + worker: + - node000000 + - node000001 + - node000002 +``` + +有关更多信息,请参阅[文件](https://github.com/kubesphere/kubekey/blob/release-2.2/docs/config-example.md)。 + +### 配置负载均衡器 + +除了节点信息外,还需要在同一 YAML 文件中配置负载均衡器。对于 IP 地址,您可以在 **Azure > KubeSphereVMRG > PublicLB** 中找到它。假设负载均衡器的 IP 地址和监听端口分别为 `40.81.5.xx` 和 `6443`,您可以参考以下示例。 + +```yaml +## Public LB config example +## apiserver_loadbalancer_domain_name: "lb.kubesphere.local" + controlPlaneEndpoint: + domain: lb.kubesphere.local + address: "40.81.5.xx" + port: 6443 +``` + +{{< notice note >}} + +由于 Azure [负载均衡器限制](https://docs.microsoft.com/zh-cn/azure/load-balancer/load-balancer-troubleshoot#cause-4-accessing-the-internal-load-balancer-frontend-from-the-participating-load-balancer-backend-pool-vm),直接使用公有的负载均衡器而不是内置的负载均衡器。 + +{{}} + +### 持久化存储插件配置 + +有关详细信息,请参阅[持久化存储配置](../../../installing-on-linux/persistent-storage-configurations/understand-persistent-storage/)。 + +### 配置网络插件 + +Azure 虚拟网络不支持 [Calico](https://docs.projectcalico.org/reference/public-cloud/azure#about-calico-on-azure) 使用 IPIP 模式,需要将网络插件更改为 `flannel`。 + +```yaml + network: + plugin: flannel + kubePodsCIDR: 10.233.64.0/18 + kubeServiceCIDR: 10.233.0.0/18 +``` + +### 创建集群 + +1. 在完成配置之后,执行以下命令开始安装: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +2. 检查安装日志: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + +3. 当安装完成,会出现如下信息: + + ```bash + ##################################################### + ### Welcome to KubeSphere! ### + ##################################################### + Console: http://10.128.0.44:30880 + Account: admin + Password: P@88w0rd + NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. + ##################################################### + https://kubesphere.io 2020-xx-xx xx:xx:xx + ``` + +4. 使用 `:30880` 和默认的帐户和密码 (`admin/p@88w0rd`) 访问 KubeShpere 控制台。 + + +## 添加额外端口 + +由于 Kubernetes 集群直接搭建在 Azure 实例上,因此负载均衡器未与 [Kubernetes 服务](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer)集成。但是,您仍然可以手动将 NodePort 映射到负载均衡器。这需要两个步骤: + +1. 在负载均衡器中创建新的负载均衡规则。 + + ![Load Balancer](/images/docs/v3.3/aks/azure-vm-loadbalancer-rule.png) + +2. 在网络安全组中创建入站安全规则以允许外网访问。 + + ![Firewall](/images/docs/v3.3/aks/azure-vm-firewall.png) + diff --git a/content/zh/docs/v3.4/installing-on-linux/public-cloud/install-kubesphere-on-huaweicloud-ecs.md b/content/zh/docs/v3.4/installing-on-linux/public-cloud/install-kubesphere-on-huaweicloud-ecs.md new file mode 100644 index 000000000..04beabc3c --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-linux/public-cloud/install-kubesphere-on-huaweicloud-ecs.md @@ -0,0 +1,315 @@ +--- +title: "KubeSphere 在华为云 ECS 高可用实例" +keywords: "Kubesphere 安装, 华为云, ECS, 高可用性, 高可用性, 负载均衡器" +description: "了解如何在华为云虚拟机上创建高可用的 KubeSphere 集群。" + +Weight: 3230 +--- + +由于对于生产环境,我们需要考虑集群的高可用性。教您部署如何在华为云 ECS 实例服务快速部署一套高可用的生产环境 +Kubernetes 服务需要做到高可用,需要保证 kube-apiserver 的 HA ,推荐华为云负载均衡器服务. + +## 前提条件 + +- 请遵循该[指南](https://github.com/kubesphere/kubekey),确保您已经知道如何将 KubeSphere 与多节点集群一起安装。有关用于安装的 config.yaml 文件的详细信息。本教程重点介绍配置华为云负载均衡器服务高可用安装。 +- 考虑到数据的持久性,对于生产环境,我们不建议您使用存储OpenEBS,建议 NFS、GlusterFS、Ceph 等存储(需要提前准备)。文章为了进行开发和测试,集成了 OpenEBS 将 LocalPV 设置为默认的存储服务。 +- SSH 可以互相访问所有节点。 +- 所有节点的时间同步。 + +## 创建主机 + +本示例创建 6 台 Ubuntu 18.04 server 64bit 的云服务器,每台配置为 4 核 8 GB + +| 主机IP | 主机名称 | 角色 | +| --- | --- | --- | +|192.168.1.10|master1|master1, etcd| +|192.168.1.11|master2|master2, etcd| +|192.168.1.12|master3|master3, etcd| +|192.168.1.13|node1|node| +|192.168.1.14|node2|node| +|192.168.1.15|node3|node| + +> 注意:机器有限,所以把 etcd 放入 master,在生产环境建议单独部署 etcd,提高稳定性 + +## 华为云负载均衡器部署 +### 创建 VPC + +进入到华为云控制, 在左侧列表选择'虚拟私有云', 选择'创建虚拟私有云' 创建VPC,配置如下图 + +![1-1-创建VPC](/images/docs/v3.3/huawei-ecs/huawei-VPC-create.png) + +### 创建安全组 + +在 `访问控制→ 安全组`下,创建一个安全组,设置入方向的规则参考如下: + +![2-1-创建安全组](/images/docs/v3.3/huawei-ecs/huawei-rules-create.png) +> 提示:后端服务器的安全组规则必须放行 100.125.0.0/16 网段,否则会导致健康检查异常,详见 后端服务器配置安全组 。此外,还应放行 192.168.1.0/24 (主机之间的网络需全放行)。 + +### 创建主机 +![3-1-选择主机配置](/images/docs/v3.3/huawei-ecs/huawei-ECS-basic-settings.png) +在网络配置中,网络选择第一步创建的 VPC 和子网。在安全组中,选择上一步创建的安全组。 +![3-2-选择网络配置](/images/docs/v3.3/huawei-ecs/huawei-ECS-network-settings.png) + +### 创建负载均衡器 +在左侧栏选择 '弹性负载均衡器',进入后选择 购买弹性负载均衡器 +> 以下健康检查结果在部署后才会显示正常,目前状态为异常 +#### 内网LB 配置 +为所有master 节点 添加后端监听器 ,监听端口为 6443 + +![4-1-配置内网LB](/images/docs/v3.3/huawei-ecs/huawei-master-lb-basic-config.png) + +![4-2-配置内网LB](/images/docs/v3.3/huawei-ecs/huawei-master-lb-listeners-config.png) +#### 外网LB 配置 +若集群需要配置公网访问,则需要为外网负载均衡器配置一个公网 IP为 所有节点 添加后端监听器,监听端口为 80(测试使用 30880 端口,此处 80 端口也需要在安全组中开放)。 + +![4-3-配置外网LB](/images/docs/v3.3/huawei-ecs/huawei-public-lb-basic-config.png) + +![4-4-配置外网LB](/images/docs/v3.3/huawei-ecs/huawei-public-lb-listeners-config.png) + +后面配置文件 config.yaml 需要配置在前面创建的 SLB 分配的地址(VIP) + +```yaml + controlPlaneEndpoint: + domain: lb.kubesphere.local + address: "192.168.1.8" + port: 6443 +``` +### 获取安装程序可执行文件 + +下载可执行安装程序 `kk` 至一台目标机器: + +{{< tabs >}} + +{{< tab "如果您能正常访问 GitHub/Googleapis" >}} + +从 [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) 下载 KubeKey 或直接使用以下命令。 + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "如果您访问 GitHub/Googleapis 受限" >}} + +先执行以下命令以确保您从正确的区域下载 KubeKey。 + +```bash +export KKZONE=cn +``` + +执行以下命令下载 KubeKey。 + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +在您下载 KubeKey 后,如果您将其传至新的机器,且访问 Googleapis 同样受限,在您执行以下步骤之前请务必再次执行 `export KKZONE=cn` 命令。 + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +执行以上命令会下载最新版 KubeKey,您可以修改命令中的版本号下载指定版本。 + +{{}} + +为 `kk` 添加可执行权限: + +```bash +chmod +x kk +``` + +{{< notice tip >}} + + 您可以使用高级安装来控制自定义参数或创建多节点集群。具体来说,通过指定配置文件来创建集群。 + +{{}} + +### 使用 kubekey 部署 + +在当前位置创建配置文件 `master-HA.yaml`: + +```bash +./kk create config --with-kubesphere v3.3.2 --with-kubernetes v1.22.12 -f master-HA.yaml +``` + +### 集群配置调整 + +目前当前集群开启了全量的组件,文末也提供了自定义的方法.可默认为 false: + +```yaml +apiVersion: kubekey.kubesphere.io/v1alpha1 +kind: Cluster +metadata: + name: master-HA +spec: + hosts: + - {name: master1, address: 192.168.1.10, internalAddress: 192.168.1.10, password: yourpassword} # Assume that the default port for SSH is 22, otherwise add the port number after the IP address as above + - {name: master2, address: 192.168.1.11, internalAddress: 192.168.1.11, password: yourpassword} # Assume that the default port for SSH is 22, otherwise add the port number after the IP address as above + - {name: master3, address: 192.168.1.12, internalAddress: 192.168.1.12, password: yourpassword} # Assume that the default port for SSH is 22, otherwise add the port number after the IP address as above + - {name: node1, address: 192.168.1.13, internalAddress: 192.168.1.13, password: yourpassword} # Assume that the default port for SSH is 22, otherwise add the port number after the IP address as above + - {name: node2, address: 192.168.1.14, internalAddress: 192.168.1.14, password: yourpassword} # Assume that the default port for SSH is 22SSH is 22, otherwise add the port number after the IP address as above + - {name: node3, address: 192.168.1.15, internalAddress: 192.168.1.15, password: yourpassword} # Assume that the default port for SSH is 22, otherwise add the port number after the IP address as above + roleGroups: + etcd: + - master[1:3] + control-plane: + - master[1:3] + worker: + - node[1:3] + controlPlaneEndpoint: + domain: lb.kubesphere.local + address: "192.168.1.8" + port: 6443 + kubernetes: + version: v1.17.9 + imageRepo: kubesphere + clusterName: cluster.local + masqueradeAll: false # masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode. [Default: false] + maxPods: 110 # maxPods is the number of pods that can run on this Kubelet. [Default: 110] + nodeCidrMaskSize: 24 # internal network node size allocation. This is the size allocated to each node on your network. [Default: 24] + proxyMode: ipvs # mode specifies which proxy mode to use. [Default: ipvs] + network: + plugin: calico + calico: + ipipMode: Always # IPIP Mode to use for the IPv4 POOL created at start up. If set to a value other than Never, vxlanMode should be set to "Never". [Always | CrossSubnet | Never] [Default: Always] + vxlanMode: Never # VXLAN Mode to use for the IPv4 POOL created at start up. If set to a value other than Never, ipipMode should be set to "Never". [Always | CrossSubnet | Never] [Default: Never] + vethMTU: 1440 # The maximum transmission unit (MTU) setting determines the largest packet size that can be transmitted through your network. [Default: 1440] + kubePodsCIDR: 10.233.64.0/18 + kubeServiceCIDR: 10.233.0.0/18 + registry: + registryMirrors: ["https://*.mirror.aliyuncs.com"] # # input your registryMirrors + insecureRegistries: [] + privateRegistry: "" + storage: + defaultStorageClass: localVolume + localVolume: + storageClassName: local + +--- +apiVersion: installer.kubesphere.io/v1alpha1 +kind: ClusterConfiguration +metadata: + name: ks-installer + namespace: kubesphere-system + labels: + version: v3.3.2 +spec: + local_registry: "" + persistence: + storageClass: "" + authentication: + jwtSecret: "" + etcd: + monitoring: true # Whether to install etcd monitoring dashboard + endpointIps: 192.168.1.10,192.168.1.11,192.168.1.12 # etcd cluster endpointIps + port: 2379 # etcd port + tlsEnable: true + common: + mysqlVolumeSize: 20Gi # MySQL PVC size + minioVolumeSize: 20Gi # Minio PVC size + etcdVolumeSize: 20Gi # etcd PVC size + openldapVolumeSize: 2Gi # openldap PVC size + redisVolumSize: 2Gi # Redis PVC size + es: # Storage backend for logging, tracing, events and auditing. + elasticsearchMasterReplicas: 1 # total number of master nodes, it's not allowed to use even number + elasticsearchDataReplicas: 1 # total number of data nodes + elasticsearchMasterVolumeSize: 4Gi # Volume size of Elasticsearch master nodes + elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes + logMaxAge: 7 # Log retention time in built-in Elasticsearch, it is 7 days by default. + elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log + # externalElasticsearchHost: + # externalElasticsearchPort: + console: + enableMultiLogin: false # enable/disable multiple sing on, it allows a user can be used by different users at the same time. + port: 30880 + alerting: # Whether to install KubeSphere alerting system. It enables Users to customize alerting policies to send messages to receivers in time with different time intervals and alerting levels to choose from. + enabled: true + auditing: # Whether to install KubeSphere audit log system. It provides a security-relevant chronological set of records,recording the sequence of activities happened in platform, initiated by different tenants. + enabled: true + devops: # Whether to install KubeSphere DevOps System. It provides out-of-box CI/CD system based on Jenkins, and automated workflow tools including Source-to-Image & Binary-to-Image + enabled: true + jenkinsMemoryLim: 2Gi # Jenkins memory limit + jenkinsMemoryReq: 1500Mi # Jenkins memory request + jenkinsVolumeSize: 8Gi # Jenkins volume size + jenkinsJavaOpts_Xms: 512m # The following three fields are JVM parameters + jenkinsJavaOpts_Xmx: 512m + jenkinsJavaOpts_MaxRAM: 2g + events: # Whether to install KubeSphere events system. It provides a graphical web console for Kubernetes Events exporting, filtering and alerting in multi-tenant Kubernetes clusters. + enabled: true + logging: # Whether to install KubeSphere logging system. Flexible logging functions are provided for log query, collection and management in a unified console. Additional log collectors can be added, such as Elasticsearch, Kafka and Fluentd. + enabled: true + logsidecarReplicas: 2 + metrics_server: # Whether to install metrics-server. IT enables HPA (Horizontal Pod Autoscaler). + enabled: true + monitoring: # + prometheusReplicas: 1 # Prometheus replicas are responsible for monitoring different segments of data source and provide high availability as well. + prometheusMemoryRequest: 400Mi # Prometheus request memory + prometheusVolumeSize: 20Gi # Prometheus PVC size + alertmanagerReplicas: 1 # AlertManager Replicas + multicluster: + clusterRole: none # host | member | none # You can install a solo cluster, or specify it as the role of host or member cluster + networkpolicy: # Network policies allow network isolation within the same cluster, which means firewalls can be set up between certain instances (Pods). + enabled: true + notification: # It supports notification management in multi-tenant Kubernetes clusters. It allows you to set AlertManager as its sender, and receivers include Email, Wechat Work, and Slack. + enabled: true + openpitrix: # Whether to install KubeSphere App Store. It provides an application store for Helm-based applications, and offer application lifecycle management + enabled: true + servicemesh: # Whether to install KubeSphere Service Mesh (Istio-based). It provides fine-grained traffic management, observability and tracing, and offer visualization for traffic topology + enabled: true +``` + +#### 持久化存储配置 + +如本文开头的前提条件所说,对于生产环境,我们建议您准备持久性存储,可参考以下说明进行配置。若搭建开发和测试,您可以直接使用默认集成的 OpenEBS 准备 LocalPV,则可以跳过这小节。 + +{{< notice note >}} +如果您有已有存储服务端,例如华为云可使用 [弹性文件存储(SFS)](https://support.huaweicloud.com/productdesc-sfs/zh-cn_topic_0034428718.html) 来作为存储服务。继续编辑上述 `config-sample.yaml` 文件,找到 `[addons]` 字段,这里支持定义任何持久化存储的插件或客户端,如 CSI、NFS Client、Ceph、GlusterFS,您可以根据您自己的持久化存储服务类型,并参考 [持久化存储服务](../../../installing-on-linux/persistent-storage-configurations/understand-persistent-storage/) 中对应的示例 YAML 文件进行设置。 +{{}} + +### 执行命令创建集群 + + ```bash + # 指定配置文件创建集群 + ./kk create cluster --with-kubesphere v3.3.2 -f master-HA.yaml + + # 查看 KubeSphere 安装日志 -- 直到出现控制台的访问地址和登录帐户 +kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + +``` +##################################################### +### Welcome to KubeSphere! ### +##################################################### + +Console: http://192.168.1.10:30880 +Account: admin +Password: P@88w0rd + +NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. + +##################################################### +https://kubesphere.io 2020-08-28 01:25:54 +##################################################### +``` + +访问公网 IP + Port 为部署后的使用情况,使用默认帐户密码 (`admin/P@88w0rd`),文章组件安装为最大化,登录点击`平台管理>集群管理`可看到下图安装组件列表和机器情况。 + + +## 如何自定义开启可插拔组件 + +点击**集群管理** > **定制资源定义**,在过滤条件框输入 `ClusterConfiguration`。 +点击 `ClusterConfiguration` 详情,对 `ks-installer` 编辑保存退出即可,组件描述介绍:[文档说明](https://github.com/kubesphere/ks-installer/blob/master/deploy/cluster-configuration.yaml)。 diff --git a/content/zh/docs/v3.4/installing-on-linux/public-cloud/install-kubesphere-on-qingcloud-vms.md b/content/zh/docs/v3.4/installing-on-linux/public-cloud/install-kubesphere-on-qingcloud-vms.md new file mode 100644 index 000000000..588bad916 --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-linux/public-cloud/install-kubesphere-on-qingcloud-vms.md @@ -0,0 +1,339 @@ +--- +title: "在青云QingCloud 主机上部署 KubeSphere" +keywords: "KubeSphere, 安装, HA, 高可用性, LoadBalancer" +description: "了解如何在青云QingCloud 平台上创建高可用 KubeSphere 集群。" +linkTitle: "在青云QingCloud 主机上部署 KubeSphere" +Weight: 3420 +--- + +## 介绍 + +对于生产环境,需要考虑集群的高可用性。如果关键组件(例如 kube-apiserver、kube-scheduler 和 kube-controller-manager)在相同的主节点上运行,一旦主节点出现故障,Kubernetes 和 KubeSphere 将不可用。因此,您需要为多个主节点配置负载均衡器,以搭建高可用集群。您可以使用任何云负载均衡器或任何硬件负载均衡器(例如 F5)。此外,您也可以使用 Keepalived+[HAproxy](https://www.haproxy.com/) 或 NGINX 搭建高可用集群。 + +本教程演示如何创建两个[青云QingCloud 负载均衡器](https://docs.qingcloud.com/product/network/loadbalancer),分别用于内部和外部负载均衡,以及如何使用负载均衡器实现主节点和 etcd 节点的高可用性。 + +## 准备工作 + +- 您需要了解如何在多节点集群上安装 KubeSphere(请参见[多节点安装](../../../installing-on-linux/introduction/multioverview/))。有关安装中用到的配置文件的详细信息,请参见[编辑配置文件](../../../installing-on-linux/introduction/multioverview/#2-编辑配置文件)。本教程主要介绍如何配置负载均衡器。 +- 您需要注册一个[青云QingCloud ](https://console.qingcloud.com/login)帐户才能在青云QingCloud 创建负载均衡器。如在其他云平台上创建负载均衡器,请参考对应云厂商提供的指南。 +- 如果搭建生产环境,建议您提前准备持久化存储并创建 StorageClass。如果搭建开发测试环境,您可以直接使用集成的 OpenEBS 配置 LocalPV 存储服务。 + +## 集群架构 + +本教程使用六台 **Ubuntu 16.04.6** 机器。您需要创建两个负载均衡器,并在其中的三台机器上部署三个主节点和 etcd 节点。您可以在 KubeKey 创建的 `config-sample.yaml` 文件中配置上述节点(`config-sample.yaml` 为文件的默认名称,可以手动更改)。 + +![ha-architecture](/images/docs/v3.3/zh-cn/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-qingcloud-instances/ha-architecture.png) + +{{< notice note >}} + +根据 Kubernetes 官方文档[高可用拓扑选项](https://kubernetes.io/zh/docs/setup/production-environment/tools/kubeadm/ha-topology/),Kubernetes 高可用集群有两种拓扑配置形式,即堆叠 etcd 拓扑和外部 etcd 拓扑。在搭建高可用集群前,您需要根据该文档仔细权衡两种拓扑的利弊。本教程采用堆叠 etcd 拓扑搭建高可用集群作为示例。 + +{{}} + +## 安装高可用集群 + +### 步骤 1:创建负载均衡器 + +本步骤演示如何在青云QingCloud 平台上创建负载均衡器。 + +#### 创建内部负载均衡器 + +1. 登录[青云QingCloud 控制台](https://console.qingcloud.com/login)。在左侧导航栏选择**网络与 CDN** 下的**负载均衡器**,然后点击**创建**。 + + ![create-lb](/images/docs/v3.3/zh-cn/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-qingcloud-instances/create-lb.png) + +2. 在弹出的对话框中,设置负载均衡器的名称,在**网络**下拉列表中选择机器所在的私有网络(在本例中为 `pn`),其他参数可以保持默认,然后点击**提交**。 + + ![qingcloud-lb](/images/docs/v3.3/zh-cn/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-qingcloud-instances/qingcloud-lb.png) + +4. 点击上一步创建的负载均衡器。在其详情页面创建监听器,将**监听协议**设置为 `TCP`,将**端口**设置为 `6443`。 + + ![listener](/images/docs/v3.3/zh-cn/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-qingcloud-instances/listener.png) + + - **名称**:监听器的名称 + - **监听协议**:`TCP` + - **端口**:`6443` + - **负载方式**:`轮询` + + 设置完成后点击**提交**。 + + {{< notice note >}} + + 在创建监听器后需要检查负载均衡器的防火墙规则。请确保 `6443` 端口已添加到防火墙规则中并且外部流量可以通过 `6443` 端口,否则安装将会失败。在青云QingCloud 平台上,您可以在**安全**下的**安全组**页面查看相关信息。 + + {{}} + +5. 点击**添加后端**,选择之前选择的私有网络(在本例中为 `pn`),点击**高级搜索**,选择三个主节点,并将**端口**设置为 `6443`(api-server 的默认安全端口)。 + + ![3-master](/images/docs/v3.3/zh-cn/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-qingcloud-instances/3-master.png) + + 设置完成后点击**提交**。 + +5. 点击**应用修改**。页面上显示三个主节点已添加为内部负载均衡器后端监听器的后端服务器。 + + {{< notice note >}} + + 将三个主节点添加为后端后,页面上可能会显示三个主节点的状态为**不可用**。这属于正常现象。这是由于 api-server 的 `6443` 端口尚未在主节点上启用。安装完成后,主节点的状态将自动变为**活跃**,同时 api-server 的端口将暴露,从而内部负载均衡器将正常工作。 + + {{}} + + ![apply-change](/images/docs/v3.3/zh-cn/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-qingcloud-instances/apply-change.png) + + 记录**网络**区域显示的内网 VIP 地址。该地址将在后续步骤中添加至配置文件。 + +#### 创建外部负载均衡器 + +您需要提前在**网络与 CDN** 下的**公网 IP** 页面申请公网 IP 地址。 + +{{< notice note >}} + +本教程需要用到两个公网 IP 地址。其中一个用于 VPC 网络,另一个用于本步骤创建的外部负载均衡器。同一个公网 IP 地址不能同时与 VPC 网络和负载均衡器绑定。 + +{{}} + +1. 创建外部负载均衡器时,点击**添加公网 IPv4** 将您申请到的公网 IP 地址与负载均衡器绑定,将**网络**设置为**不加入私有网络**。其他步骤与创建内部负载均衡器相同。 + + ![bind-eip](/images/docs/v3.3/zh-cn/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-qingcloud-instances/bind-eip.png) + +2. 在负载均衡器详情页面,创建一个监听器用于监听 `30880` 端口(KubeSphere 控制台 NodePort 端口),将**监听协议**设置为 `HTTP`。 + + {{< notice note >}} + + 在创建监听器后需要检查负载均衡器的防火墙规则。请确保 `30880` 端口已添加到防火墙规则中并且外部流量可以通过 `30880` 端口,否则安装将会失败。在青云QingCloud 平台上,您可以在**安全**下的**安全组**页面查看相关信息。 + + {{}} + + ![listener2](/images/docs/v3.3/zh-cn/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-qingcloud-instances/listener2.png) + +3. 点击**添加后端**。在弹出的对话框中选择私有网络 `pn`,点击**高级搜索**,选择私有网络 `pn` 中的六台机器用于安装 KubeSphere,并将**端口**设置为 `30880`。 + + ![six-instances](/images/docs/v3.3/zh-cn/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-qingcloud-instances/six-instances.png) + + 设置完成后点击**提交**。 + +4. 点击**应用修改**。页面上显示六台机器已添加为外部负载均衡器后端监听器的后端服务器。 + +### 步骤 2:下载 KubeKey + +[KubeKey](https://github.com/kubesphere/kubekey) 是新一代 Kubernetes 和 KubeSphere 安装器,可帮助您以简单、快速、灵活的方式安装 Kubernetes 和 KubeSphere。 + +请按照以下步骤下载 KubeKey。 + +{{< tabs >}} + +{{< tab "如果您能正常访问 GitHub/Googleapis" >}} + +从 [GitHub 发布页面](https://github.com/kubesphere/kubekey/releases)下载 KubeKey 或直接使用以下命令: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "如果您访问 GitHub/Googleapis 受限" >}} + +先执行以下命令以确保您从正确的区域下载 KubeKey: + +```bash +export KKZONE=cn +``` + +执行以下命令下载 KubeKey: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +在您下载 KubeKey 后,如果您将其传至新的机器,且访问 Googleapis 同样受限,在您执行以下步骤之前请务必再次执行 `export KKZONE=cn` 命令。 + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +执行以上命令会下载最新版 KubeKey,您可以修改命令中的版本号下载指定版本。 + +{{}} + +为 `kk` 文件添加可执行权限。 + +```bash +chmod +x kk +``` + +创建包含默认配置的示例配置文件。以下以 Kubernetes v1.22.12 为例。 + +```bash +./kk create config --with-kubesphere v3.3.2 --with-kubernetes v1.22.12 +``` + +{{< notice note >}} + +- 安装 KubeSphere 3.3 的建议 Kubernetes 版本:v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如需使用边缘节点,推荐安装 v1.21.x。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.23.10。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../../installing-on-linux/introduction/kubekey/#支持矩阵)。 + +- 如果您在这一步的命令中不添加标志 `--with-kubesphere`,则不会部署 KubeSphere,只能使用配置文件中的 `addons` 字段安装,或者在您后续使用 `./kk create cluster` 命令时再次添加这个标志。 + +- 如果您添加标志 `--with-kubesphere` 时不指定 KubeSphere 版本,则会安装最新版本的 KubeSphere。 + +{{}} + +### 步骤 3:设置集群节点 + +当您采用包含堆叠控制平面节点的高可用拓扑时,主节点和 etcd 节点在相同的三台机器上。 + +| **参数** | **描述** | +| :------- | :----------------- | +| `hosts` | 所有节点的详细信息 | +| `etcd` | etcd 节点名称 | +| `master` | 主节点名称 | +| `worker` | 工作节点名称 | + +在 `etcd` 和 `master` 参数下分别设置主节点的名称(`master1`、`master2` 和 `master3`)使得三台机器同时作为主节点和 etcd 节点。etcd 节点的数量必须是奇数。此外,由于 etcd 内存占用较高,不建议将 etcd 安装在工作节点上。 + +#### config-sample.yaml 文件示例 + +```yaml +spec: + hosts: + - {name: master1, address: 192.168.0.2, internalAddress: 192.168.0.2, user: ubuntu, password: Testing123} + - {name: master2, address: 192.168.0.3, internalAddress: 192.168.0.3, user: ubuntu, password: Testing123} + - {name: master3, address: 192.168.0.4, internalAddress: 192.168.0.4, user: ubuntu, password: Testing123} + - {name: node1, address: 192.168.0.5, internalAddress: 192.168.0.5, user: ubuntu, password: Testing123} + - {name: node2, address: 192.168.0.6, internalAddress: 192.168.0.6, user: ubuntu, password: Testing123} + - {name: node3, address: 192.168.0.7, internalAddress: 192.168.0.7, user: ubuntu, password: Testing123} + roleGroups: + etcd: + - master1 + - master2 + - master3 + control-plane: + - master1 + - master2 + - master3 + worker: + - node1 + - node2 + - node3 +``` + +有关完整的配置示例说明,请参见[此文件](https://github.com/kubesphere/kubekey/blob/release-2.2/docs/config-example.md)。 + +### 步骤 4:配置负载均衡器 + +在前述 YAML 文件中除了需要配置节点信息外,还需要配置负载均衡器信息。本步骤需要用到[创建内部负载均衡器](#创建内部负载均衡器)时记录的内网 VIP 地址。在本示例中,**内部负载均衡器**的 VIP 地址和监听端口分别为 `192.168.0.253` 和 `6443`。您可以参考如下 YAML 文件配置。 + +#### config-sample.yaml 文件示例 + +```yaml +## Internal LB config example +## apiserver_loadbalancer_domain_name: "lb.kubesphere.local" + controlPlaneEndpoint: + domain: lb.kubesphere.local + address: "192.168.0.253" + port: 6443 +``` + +{{< notice note >}} + +- 在 `config-sample.yaml` 文件中,`address` 和 `port` 字段应缩进两个空格,同时 `address` 字段的值应为 VIP 地址。 +- 负载均衡器的默认域名为 `lb.kubesphere.local`,用于内部访问。您可以在 `domain` 字段修改域名。 + +{{}} + +### 步骤 5:配置 Kubernetes 集群(可选) + +集群管理员可修改 KubeKey 提供的一些字段来自定义 Kubernetes 安装参数,包括 Kubernetes 版本、网络插件和镜像仓库。`config-sample.yaml` 文件中的一些字段有默认值。您可以根据需要修改文件中 Kubernetes 相关的字段。有关更多信息,请参考[ Kubernetes 集群配置](../../../installing-on-linux/introduction/vars/)。 + +### 步骤 6:配置持久化存储插件 + +考虑到生产环境需要数据持久化,您需要准备持久化存储并在 `config-sample.yaml` 文件中配置所需的存储插件(例如 CSI)。 + +{{< notice note >}} + +如搭建测试开发环境,您可以跳过这一步。KubeKey 将直接使用集成的 OpenEBS 配置 LocalPV 存储服务。 + +{{}} + +**可用的存储插件和客户端** + +- Ceph RBD & CephFS +- GlusterFS +- QingCloud CSI +- QingStor CSI +- 未来版本将支持更多插件 + +请确保在安装前配置了存储插件。在安装过程中,KubeKey 将为相关的工作负载创建 StorageClass 和持久卷。有关更多信息,请参见[持久化存储配置](../../../installing-on-linux/persistent-storage-configurations/understand-persistent-storage/)。 + +### 步骤 7:启用可插拔组件(可选) + +从 v2.1.0 版本开始,一些核心功能组件从 KubeSphere 中解耦出来。这些组件被设计成可插拔的形式,您可以在安装前或安装后启用它们。默认情况下,如果您没有启用这些组件,KubeSphere 将以最小化形式安装。 + +您可以根据需要启用任何可插拔组件。强烈建议您安装这些组件以充分发掘 KubeSphere 的全栈特性。如果您启用这些组件,请确保您机器有足够的 CPU 和内存资源。有关详情,请参见[启用可插拔组件](../../../pluggable-components/)。 + +### 步骤 8:搭建集群 + +完成以上配置后,执行以下命令开始安装: + +```bash +./kk create cluster -f config-sample.yaml +``` + +### 步骤 9:验证安装结果 + +检查安装日志。如果显示如下日志,KubeSphere 安装成功。 + +```bash +kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f +``` + +```bash +##################################################### +### Welcome to KubeSphere! ### +##################################################### + +Console: http://192.168.0.3:30880 +Account: admin +Password: P@88w0rd + +NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. + +##################################################### +https://kubesphere.io 2020-08-13 10:50:24 +##################################################### +``` + +### 步骤 10:验证高可用集群 + +安装完成后,打开内部和外部负载均衡器的详情页面查看节点状态。 + +![active](/images/docs/v3.3/zh-cn/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-qingcloud-instances/active.png) + +如果两个监听器中的节点状态都是**活跃**,表明所有节点已启动并运行正常。 + +![active-listener](/images/docs/v3.3/zh-cn/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-qingcloud-instances/active-listener.png) + +进入 KubeSphere 的 Web 控制台,您也可以看到所有节点运行正常。 + +为验证集群的高可用性,可关闭一台主机进行测试。例如,上面的控制台可通过 `IP:30880` 地址访问(此处 IP 地址为绑定到外部负载均衡器的 EIP 地址)。如果集群的高可用性正常,在您关闭一台主节点后,控制台应该仍能正常工作。 + +## 另请参见 + +[多节点安装](../../../installing-on-linux/introduction/multioverview/) + +[Kubernetes 集群配置](../../../installing-on-linux/introduction/vars/) + +[持久化存储配置](../../../installing-on-linux/persistent-storage-configurations/understand-persistent-storage/) + +[启用可插拔组件](../../../pluggable-components/) \ No newline at end of file diff --git a/content/zh/docs/v3.4/installing-on-linux/uninstall-kubesphere-and-Kubernetes.md b/content/zh/docs/v3.4/installing-on-linux/uninstall-kubesphere-and-Kubernetes.md new file mode 100644 index 000000000..9358151e6 --- /dev/null +++ b/content/zh/docs/v3.4/installing-on-linux/uninstall-kubesphere-and-Kubernetes.md @@ -0,0 +1,23 @@ +--- +title: "卸载 KubeSphere 和 Kubernetes" +keywords: 'kubernetes, kubesphere, 卸载, 移除集群' +description: '从机器上移除 KubeSphere 和 Kubernetes。' +linkTitle: "卸载 KubeSphere 和 Kubernetes" +weight: 3700 +--- + +卸载 KubeSphere 和 Kubernetes 意味着将其从您的机器上移除。该操作不可逆,且不会进行任何备份。请谨慎操作。 + +如需删除集群,请执行以下命令。 + +- 如果是按照快速入门 ([All-in-One](../../quick-start/all-in-one-on-linux/)) 安装的 KubeSphere: + + ```bash + ./kk delete cluster + ``` + +- 如果是使用高级模式安装的 KubeSphere([使用配置文件创建](../introduction/multioverview/)): + + ```bash + ./kk delete cluster [-f config-sample.yaml] + ``` diff --git a/content/zh/docs/v3.4/introduction/_index.md b/content/zh/docs/v3.4/introduction/_index.md new file mode 100644 index 000000000..865249118 --- /dev/null +++ b/content/zh/docs/v3.4/introduction/_index.md @@ -0,0 +1,14 @@ +--- +title: "产品介绍" +description: "通过详细的图文介绍帮助您更好地了解 KubeSphere" +layout: "second" + +linkTitle: "产品介绍" + +weight: 1000 + +icon: "/images/docs/v3.3/docs.svg" + +--- + +本章概述了 KubeSphere 的基本概念、功能、优势以及应用场景等。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/introduction/advantages.md b/content/zh/docs/v3.4/introduction/advantages.md new file mode 100644 index 000000000..ffaaa7d4d --- /dev/null +++ b/content/zh/docs/v3.4/introduction/advantages.md @@ -0,0 +1,92 @@ +--- +title: "为什么选择 KubeSphere" +keywords: "KubeSphere, Kubernetes, 优势" +description: "KubeSphere 优势" +linkTitle: "为什么选择 KubeSphere" +weight: 1600 +--- + +## 设计愿景 + +Kubernetes 已经成为在私有云、公有云和混合云等环境中大规模部署容器化应用程序的事实标准。然而,很多人使用 Kubernetes 仍会不知所措,因为 Kubernetes 本身的使用复杂,需要管理的组件繁多,部分组件需要自行安装和部署,比如存储和网络部分。目前,Kubernetes 仅提供开源的解决方案或项目,可能在某种程度上难以安装、维护和操作。对于用户而言,学习成本和门槛都很高,快速上手并不是一件易事。 + +KubeSphere 旨在解决 Kubernetes 在构建、部署、管理和可观测性等方面的痛点,提供全面的服务和自动化的应用供应、伸缩和管理,让您专注于代码编写。具体来说,KubeSphere 包含多种功能,如多集群管理、应用程序生命周期管理、多租户管理、CI/CD 流水线、微服务治理和可观测性(监控日志、告警通知和审计事件)等。 + +作为一个综合性的开源平台,KubeSphere 致力于提供更加友好的用户体验,更强大的操作功能。例如,KubeSphere 的交互式 Web 控制台方便用户直接在平台上进行测试和操作,同时还内置了命令行工具 Kubectl,让习惯使用命令行操作的用户也能快速上手,以最低的学习成本轻松地在平台上创建和修改各类资源。 + +此外,KubeSphere 在存储和网络方面提供了最优的解决方案,比如存储除了支持流行的开源共享存储如 Ceph RBD 和 GlusterFS 之外,还提供[青云QingCloud 云平台块存储](https://docs.qingcloud.com/product/storage/volume/)和青云QingCloud 自研的[分布式存储 QingStor NeonSAN](https://docs.qingcloud.com/product/storage/volume/super_high_performance_shared_volume/) 作为 Kubernetes 的持久化存储,通过集成的 QingCloud CSI 和 NeonSAN CSI 插件,即可使用青云QingCloud 提供的高性能块存储或 NeonSAN 作为持久卷挂载至工作负载,为企业应用和数据提供更稳定安全的存储服务。 + +## 为什么选择 KubeSphere + +KubeSphere 为企业用户提供高性能可伸缩的容器应用管理服务,旨在帮助企业完成新一代互联网技术驱动下的数字化转型,加速应用的快速迭代与业务交付,以满足企业日新月异的业务需求。 + +以下是 KubeSphere 的六大主要优势。 + +### 跨云厂商的多集群统一管理 + +随着容器应用的日渐普及,各个企业跨云或在本地环境中部署多个集群,而集群管理的复杂程度也在不断增加。为满足用户统一管理多个异构集群的需求,KubeSphere 配备了全新的多集群管理功能,帮助用户跨区、跨云等多个环境管理、监控、导入和运维多个集群,全面提升用户体验。 + +多集群功能可在安装 KubeSphere 之前或之后启用。具体来说,该功能有两大特性: + +**统一管理**:用户可以使用直接连接或间接连接导入 Kubernetes 集群。只需简单配置,即可在数分钟内在 KubeSphere 的互动式 Web 控制台上完成整个流程。集群导入后,用户可以通过统一的中央控制平面监控集群状态、运维集群资源。 + +**高可用**:在多集群架构中,一个集群可以运行主要服务,另一集群作为备用集群。一旦该主集群宕机,备用集群可以迅速接管相关服务。此外,当集群跨区域部署时,为最大限度地减少延迟,请求可以发送至距离最近的集群,由此实现跨区跨集群的高可用。 + +有关更多信息,请参见[多集群管理](../../multicluster-management/)。 + +### 强大的可观测性功能 + +KubeSphere 的可观测性功能在 v3.0 中全面升级,进一步优化与改善了其中的重要组件,包括监控日志、审计事件以及告警通知。用户可以借助 KubeSphere 强大的监控系统查看平台中的各类数据,该系统主要的优势包括: + +**自定义配置**:用户可以为应用自定义监控面板,有多种模板和图表模式可供选择。用户可按需添加想要监控的指标,甚至选择指标在图表上所显示的颜色。此外,也可自定义告警策略与规则,包括告警间隔、次数和阈值等。 + +**全维度数据监控与查询**:KubeSphere 提供全维度的资源监控数据,将运维团队从繁杂的数据记录工作中彻底解放,同时配备了高效的通知系统,支持多种通知渠道,包括电子邮件、Slack 与企业微信等。基于 KubeSphere 的多租户管理体系,不同租户可以在控制台上查询对应的监控日志与审计事件,支持关键词过滤、模糊匹配和精确匹配。 + +**图形化交互式界面设计**:KubeSphere 为用户提供图形化 Web 控制台,便于从不同维度监控各个资源。资源的监控数据会显示在交互式图表上,详细记录集群中的资源用量情况。不同级别的资源可以根据用量进行排序,方便用户对数据进行对比与分析。 + +**高精度秒级监控**:整个监控系统提供秒级监控数据,帮助用户快速定位组件异常。此外,所有审计事件均会准确记录在 KubeSphere 中,便于后续数据分析。 + +有关更多信息,请参见[集群管理](../../cluster-administration/)、[项目用户指南](../../project-user-guide/)和[工具箱](../../toolbox/)。 + +### 自动化 DevOps 流程 + +自动化是落地 DevOps 的重要组成部分,自动、精简的流水线为用户通过 CI/CD 流程交付应用提供了良好的条件。 + +**集成 Jenkins**:KubeSphere DevOps 系统内置了 Jenkins 作为引擎,支持多种第三方插件。此外,Jenkins 为扩展开发提供了良好的环境,DevOps 团队的整个工作流程可以在统一的平台上无缝对接,包括开发测试、构建部署、监控日志和通知等。KubeSphere 的帐户可以用登录内置的 Jenkins,满足企业对于 CI/CD 流水线和统一认证多租户隔离的需求。 + +**便捷的内置工具**:无需对 Docker 或 Kubernetes 的底层运作原理有深刻的了解,用户即可快速上手自动化工具,包括 Binary-to-Image 和 Source-to-Image。只需定义镜像仓库地址,上传二进制文件(例如 JAR/WAR/Binary),即可将对应的服务自动发布至 Kubernetes,无需编写 Dockerfile。 + +有关更多信息,请参见 [DevOps 用户指南](../../devops-user-guide/)。 + +### 细粒度权限控制 + +KubeSphere 为用户提供不同级别的权限控制,包括集群、企业空间和项目。拥有特定角色的用户可以操作对应的资源。 + +**自定义角色**:除了系统内置的角色外,KubeSphere 还支持自定义角色,用户可以给角色分配不同的权限以执行不同的操作,以满足企业对不同租户具体工作分配的要求,即可以定义每个租户所应该负责的部分,不被无关资源所影响。 + +**安全**:由于不同级别的租户之间完全隔离,他们在共享部分资源的同时也不会相互影响。租户之间的网络也完全隔离,确保数据安全。 + +有关更多信息,请参见[企业空间](../../workspace-administration/role-and-member-management/)和[项目](../../project-administration/role-and-member-management/)中的角色和成员管理。 + +### 开箱即用的微服务治理 + +KubeSphere 的微服务治理功能基于 Istio,提供多个灰度策略。所有的功能均开箱即用,支持无侵入 (Hack) 的微服务治理,提供一致的用户体验。以下是 KubeSphere 微服务治理(服务网格)的两大优势: + +- **全面的微服务治理功能**:KubeSphere 为用户提供多样化的流量管理功能,包括金丝雀发布、蓝绿部署、流量镜像、和熔断机制等。 +- **可视化界面**:KubeSphere 提供交互式 Web 控制台,让用户可以直观地查看微服务直接相互通信的情况,支持链路追踪、智能路由等完善的微服务治理功能,帮助用户快速监控应用,定位问题并提高系统性能。 + +KubeSphere 旨在为服务间的通信提供一个可靠、迅速的微服务架构。有关更多信息,请参见[灰度发布](../../project-user-guide/grayscale-release/overview/)。 + +### 活跃的开源社区 + +KubeSphere 作为一个开源项目,不仅仅是一个用于应用部署与分发的容器平台。KubeSphere 团队认为真正的开源模式更专注于让所有人进行开放地分享与讨论,并相互帮助解决问题。KubeSphere 团队携手合作伙伴、大使和贡献者,以及其他的社区成员,共同打造一个开源开放的社区,大家可以在其中提出问题、提交 PR、参与见面会并交换创新意见等。 + +KubeSphere 社区具备充分的能力和技术知识,让大家能共享开源模式所带来的红利。更重要的是,这里也是来自世界各地的开源爱好者们的共同家园,正是由于他们的贡献 KubeSphere 才能取得今天的成就。 + +**合作伙伴**:KubeSphere 合作伙伴对 KubeSphere 的 Go-to-Market 策略至关重要,合作伙伴可以是开发者、技术公司、云厂商或 Go-to-Market 合作伙伴,他们在各自的领域都推动着社区的发展。 + +**大使**:作为 KubeSphere 社区的代表,大使负责在多个方面(活动、博客和用户案例等)帮助推动 KubeSphere 的发展,让更多的人参与社区。 + +**贡献者**:KubeSphere 贡献者通过贡献代码或文档等对整个社区进行贡献。就算您不是该领域的专家,无论是细微的代码修改或是语言改进,您的贡献也会帮助到整个社区。 + +有关更多信息,请参见[合作伙伴项目](https://kubesphere.io/zh/partner/)和[社区治理](https://kubesphere.io/zh/contribution/)。 diff --git a/content/zh/docs/v3.4/introduction/architecture.md b/content/zh/docs/v3.4/introduction/architecture.md new file mode 100644 index 000000000..5931b08ff --- /dev/null +++ b/content/zh/docs/v3.4/introduction/architecture.md @@ -0,0 +1,41 @@ +--- +title: "架构说明" +keywords: "kubesphere, kubernetes, docker, helm, jenkins, istio, prometheus, devops, service mesh,架构说明,架构" +description: "KubeSphere 架构说明" + +linkTitle: "架构说明" +weight: 1500 +--- + +## 前后端分离 + +KubeSphere 将 [前端](https://github.com/kubesphere/console) 与 [后端](https://github.com/kubesphere/kubesphere) 分开,实现了面向云原生的设计,后端的各个功能组件可通过 REST API 对接外部系统。 可参考 [API文档](../../reference/api-docs/)。下图是系统架构图。 KubeSphere 无底层的基础设施依赖,可以运行在任何 Kubernetes、私有云、公有云、VM 或物理环境(BM)之上。 此外,它可以部署在任何 Kubernetes 发行版上。 + +![Architecture](https://pek3b.qingstor.com/kubesphere-docs/png/20190810073322.png) + +## 组件列表 + +| 后端组件 | 功能说明 | +|---|---| +| ks-apiserver | 整个集群管理的 API 接口和集群内部各个模块之间通信的枢纽,以及集群安全控制。| +| ks-console | 提供 KubeSphere 的控制台服务。| +| ks-controller-manager | 实现业务逻辑的,例如创建企业空间时,为其创建对应的权限;或创建服务策略时,生成对应的 Istio 配置等。| +| metrics-server | Kubernetes 的监控组件,从每个节点的 Kubelet 采集指标信息。| +| Prometheus | 提供集群,节点,工作负载,API对象的监视指标和服务。| +| Elasticsearch | 提供集群的日志索引、查询、数据管理等服务,在安装时也可对接您已有的 ES 减少资源消耗。| +| Fluent Bit | 提供日志接收与转发,可将采集到的⽇志信息发送到 ElasticSearch、Kafka。 | +| Jenkins | 提供 CI/CD 流水线服务。| +| Source-to-Image | 将源代码自动将编译并打包成 Docker 镜像,方便快速构建镜像。| +| Istio | 提供微服务治理与流量管控,如灰度发布、金丝雀发布、熔断、流量镜像等。| +| Jaeger | 收集 Sidecar 数据,提供分布式 Tracing 服务。| +| OpenPitrix | 提供应用程序生命周期管理,例如应用模板、应用部署与管理的服务等。| +| Alert | 提供集群、Workload、Pod、容器级别的自定义告警服务。| +| Notification | 是一项综合通知服务; 它当前支持邮件传递方法。| +| Redis | 将 ks-console 与 ks-account 的数据存储在内存中的存储系统。| +| OpenLDAP | 负责集中存储和管理用户帐户信息与对接外部的 LDAP。| +| Storage | 内置 CSI 插件对接云平台存储服务,可选安装开源的 NFS/Ceph/Gluster 的客户端。| +| Network | 可选安装 Calico/Flannel 等开源的网络插件,支持对接云平台 SDN。| + +## 服务组件 + +以上列表中每个功能组件下还有多个服务组件,关于服务组件的说明,可参考 [服务组件说明](../../pluggable-components/)。 diff --git a/content/zh/docs/v3.4/introduction/ecosystem.md b/content/zh/docs/v3.4/introduction/ecosystem.md new file mode 100644 index 000000000..e6c05d2cb --- /dev/null +++ b/content/zh/docs/v3.4/introduction/ecosystem.md @@ -0,0 +1,15 @@ +--- +title: "KubeSphere 生态工具" +keywords: 'Kubernetes, KubeSphere, KubeSphere 生态工具' +description: 'KubeSphere 生态工具' +linkTitle: "KubeSphere 生态工具" +weight: 1200 +--- + +## 丰富的生态工具 + +KubeSphere **围绕 Kubernetes 集成了多个云原生生态主流的开源软件**,同时支持对接大部分流行的第三方组件,从应用和应用生命周期管理到集群底层的运行时,将这些开源项目作为其后端组件,通过标准的 API 与 KubeSphere 控制台交互,最终在一个统一的控制台界面提供一致的用户体验,以降低对不同工具的学习成本和复杂性。 + +同时,KubeSphere 还具备了 Kubernetes 尚未提供的新功能,旨在解决 Kubernetes 本身存在的存储、网络、安全和易用性等痛点。KubeSphere 不仅允许开发人员和 DevOps 团队在统一的控制台中使用他们喜欢的工具,最重要的是,这些功能与平台松耦合,所有功能组件均支持可插拔安装。 + +![kubesphere-ecosystem](/images/docs/v3.3/zh-cn/introduction/kubesphere-ecosystem/kubesphere-ecosystem.png) \ No newline at end of file diff --git a/content/zh/docs/v3.4/introduction/features.md b/content/zh/docs/v3.4/introduction/features.md new file mode 100644 index 000000000..e4fd0e56a --- /dev/null +++ b/content/zh/docs/v3.4/introduction/features.md @@ -0,0 +1,173 @@ +--- +title: "平台功能" +keywords: "KubeSphere, Kubernetes, Docker, Jenkins, Istio, Features, 平台功能, 功能" +description: "KubeSphere 平台功能" + +linkTitle: "平台功能" +weight: 1300 +--- + +## 概览 + +KubeSphere 作为开源的企业级全栈化容器平台,为用户提供了一个健壮、安全、功能丰富、具备极致体验的 Web 控制台。拥有企业级 Kubernetes 所需的最常见的功能,如工作负载管理,网络策略配置,微服务治理(基于 Istio),DevOps 项目 (CI/CD) ,安全管理,Source to Image/Binary to Image,多租户管理,多维度监控,日志查询和收集,告警通知,审计,应用程序管理和镜像管理、应用配置密钥管理等功能模块。 + +它还支持各种开源存储和网络解决方案以及云存储服务。例如,KubeSphere 为用户提供了功能强大的云原生工具[负载均衡器插件 OpenELB](https://openelb.github.io/),这是为 Kubernetes 集群开发的 CNCF 认证的负载均衡插件。 + +有了易于使用的图形化 Web 控制台,KubeSphere 简化了用户的学习曲线并推动了更多的企业使用 Kubernetes 。 + +![Overview](https://pek3b.qingstor.com/kubesphere-docs/png/20200202153355.png) + +以下从专业的角度详解各个模块的功能服务。有关详细信息,请参阅本指南中的相应章节。 + +## 部署和维护 Kubernetes + +### 部署 Kubernetes 集群 + +[KubeKey](https://github.com/kubesphere/kubekey) 允许用户直接在基础架构上部署 Kubernetes,为 Kubernetes 集群提供高可用性。建议在生产环境至少配置三个主节点。 + +### Kubernetes 资源管理 + +对底层 Kubernetes 中的多种类型的资源提供可视化的展示与监控数据,以向导式 UI 实现工作负载管理、镜像管理、服务与应用路由管理 (服务发现)、密钥配置管理等,并提供弹性伸缩 (HPA) 和容器健康检查支持,支持数万规模的容器资源调度,保证业务在高峰并发情况下的高可用性。 + +由于 KubeSphere 3.3 具有增强的可观测性,用户可以从多租户角度跟踪资源,例如自定义监视、事件、审核日志、告警通知。 + +### 集群升级和扩展 + +[KubeKey](https://github.com/kubesphere/kubekey) 提供了一种简单的安装,管理和维护方式。它支持 Kubernetes 集群的滚动升级,以便集群服务在升级时始终可用。另外,也可以使用 KubeKey 将新节点添加到 Kubernetes 集群中以使用更多工作负载。 + +## 多集群管理和部署 + +随着IT界越来越多的企业使用云原生应用程序来重塑软件产品组合,用户更倾向于跨位置、地理位置和云部署集群。在此背景下,KubeSphere 进行了重大升级,以其全新的多集群功能满足用户的迫切需求。 + +借助 KubeSphere的图形化 Web 控制台,用户可以管理底层的基础架构,例如添加或删除集群。可以使用相同的方式管理部署在任何基础架构(例如 Amazon EKS和Google Kubernetes Engine)上的异构集群。 + +- **独立**:可以在 KubeSphere 容器平台中维护和管理独立部署的 Kubernetes 集群。 +- **联邦**:多个 Kubernetes 集群可以聚合在一起作为 Kubernetes 资源池。当用户部署应用程序时,副本可以部署在资源池中的不同 Kubernetes 集群上。由此,跨区域和多集群实现了高可用性。 + +KubeSphere 允许用户跨集群部署应用程序。更重要的是,还可以将应用程序配置为在特定集群上运行。此外,多集群功能与行业领先的应用程序管理平台 [OpenPitrix](https://github.com/openpitrix/openpitrix) 配合使用,使用户可以在整个生命周期内管理应用程序,包括发行、移除和分发。 + +有关更多信息,请参阅[多集群管理](../../multicluster-management/)。 + +## DevOps支持 + +KubeSphere 提供了基于 Jenkins 的可视化 CI/CD 流水线编辑,无需对 Jenkins 进行配置,同时内置丰富的 CI/CD 流水线插件,包括Binary-to-Image (B2I) 和Source-to-Image (S2I),用于将源代码或二进制文件打包到准备运行的容器映像中。 + +![DevOps](https://pek3b.qingstor.com/kubesphere-docs/png/20200202220455.png) + +### CI/CD 流水线 + +- **自动化**:CI/CD 流水线和构建策略均基于 Jenkins,可简化和自动化开发、测试和生产过程。缓存依赖项用于加速构建和部署。 +- **开箱即用**:用户可以基于他们的 Jenkins 构建策略和客户端插件来创建基于 Git repository/SVN 的 Jenkins 流水线。在内置的 Jenkinsfile 中定义任何步骤和阶段。支持常见的代理类型,例如 Maven,Node.js 和 Go。用户也可以自定义代理类型。 +- **可视化**:用户可以轻松地与可视化控制面板进行交互,编辑、管理 CI/CD 流水线。 +- **质量管理**:支持通过静态代码分析扫描来检测DevOps 项目中的 bug、代码错误和安全漏洞。 +- **日志**:日志完整记录 CI/CD 流水线运行全过程。 + +### Source-to-Image + +Source-to-Image (S2I) 是一个直接将源代码构建成镜像的自动化构建工具,它是通过将源代码放入一个负责编译源代码的构建器镜像(Builder image) 中,自动将编译后的代码打包成 Docker 镜像。 + +S2I 允许用户将服务发布到 Kubernetes,而无需编写 Dockerfile。只需要提供源代码仓库地址,并指定目标镜像仓库即可。所有配置将在 Kubernetes 中存储为不同的资源。服务将自动发布到 Kubernetes,并且镜像也将推送到目标仓库。 + +![S2I](https://pek3b.qingstor.com/kubesphere-docs/png/20200204131749.png) + +### Binary-to-Image + +与 S2I 相似,Binary-to-Image (B2I) 是一个直接将二进制文件构建成镜像的自动化构建工具,用于从二进制文件(例如 Jar,War,Binary 包)构建可复制的容器镜像。 + +用户只需要上传应用程序二进制包,并指定要推送到的镜像仓库即可。其余部分与 S2I 完全相同。 + +有关更多信息,请参阅 [DevOps 用户指南](../../devops-user-guide/)。 + +## 基于 Istio 的服务网络 + +KubeSphere 服务网络由一组生态系统项目组成,例如 Istio,Envoy 和 Jaeger。我们设计了一个统一的用户界面来使用和管理这些工具。大多数功能都是现成的,并且是从开发人员的角度进行设计的,这意味着 KubeSphere 可以帮助用户减少学习难度,因为不需要单独深入研究这些工具。 + +KubeSphere 服务网络为分布式应用程序提供细粒度的流量管理、可观测性、服务跟踪以及服务身份和安全性管理。因此,开发人员只需要专注于核心业务。通过 KubeSphere 的服务网络管理,用户可以更好地跟踪、查看路由和优化 Kubernetes 中用于云原生应用程序的通信。 + +### 流量管理 + +- **金丝雀发布**: 金丝雀发布是在现有生产系统旁边创建了一个全新的独立生产环境,通过使新版本只对少数终端用户可用,这样可降低向推出新代码和功能的风险。如果新版本一切顺利,用户可以更改百分比,并逐渐用新版本替换旧版本。 +- **蓝绿发布**: 允许用户同时运行一个应用程序的两个版本。蓝色代表当前应用程序版本,绿色代表经过功能和性能测试的新版本。一旦测试结果成功,就将应用程序流量从生产版本(蓝色)路由到新版本(绿色)。 +- **流量镜像**:流量镜像也叫作影子流量,是指通过一定的配置将线上的真实流量复制一份到镜像服务中去,我们通过流量镜像转发以达到在不影响线上服务的情况下对流量或请求内容做具体分析的目的,它的设计思想是只做转发而不接收响应(fire and forget),使团队能够以最小的风险进行生产变更。 +- **断路器**: 允许用户设置服务内单个主机的呼叫限制,例如并发连接数或对该主机的呼叫失败次数。 + +有关更多信息,请参见[灰度发布](../../project-user-guide/grayscale-release/overview/)。 + +### 可视化 + +KubeSphere 服务网络具有可视化微服务之间的连接以及它们如何互连的拓扑的能力。在这方面,可观测性对于理解云原生微服务的互连非常有帮助。 + +### 分布式跟踪 + +用户能基于 Jaeger,跟踪 KubeSphere 服务之间的网络交互。它通过可视化帮助用户更深入地了解请求延迟、瓶颈、序列化和并行性。 + +## 多租户管理 + +在 KubeSphere 中,资源(例如集群)可以在租户之间共享。首先,管理员或运维人员需要使用不同的权限设置不同的帐户角色。可以将这些角色分配给平台中的成员,以对各种资源执行特定的操作。同时,由于 KubeSphere 完全隔离了租户,因此它们根本不会相互影响。 + +- **多租户**:它提供了基于角色的细粒度身份验证和三层授权的系统。 +- **统一认证**:KubeSphere 与 LDAP 或 AD 协议的中央身份验证系统兼容。还支持单点登录 (SSO),以实现租户身份的统一身份验证。 +- **授权系统**:它分为三个级别:集群,企业空间和项目。 KubeSphere 确保可以共享资源,同时完全隔离多个级别的不同角色以确保资源安全。 + +有关更多信息,请参见企业空间中的[角色和成员管理](../../workspace-administration/role-and-member-management/)。 + +## 可观测性 + +### 多维度监控 + +KubeSphere 通过可视化界面操作监控、运维功能,可简化操作和维护的整个过程。它提供了对各种资源的自定义监控,并可以立即将发生的问题发送给用户。 + +- **可定制的监控仪表板**:用户可以准确决定需要以哪种形式监控哪些工具。 KubeSphere 中提供了不同的模板供用户选择,例如 Elasticsearch,MySQL 和 Redis。或者,他们也可以创建自己的监视模板,包括图表,颜色,间隔和单位。 +- **运维友好**:开放标准接口,易于对接企业运维系统,以统一运维入口实现集中化运维。 +- **第三方兼容性**:KubeSphere 与 Prometheus 兼容,后者是用于在 Kubernetes 环境中进行监视的事实指标收集平台。监视数据可以在 KubeSphere 的 Web 控制台中无缝显示。 + +- **二级精度的多维度监控**: + - 在集群资源维度,系统提供了全面的指标,例如 CPU 利用率、内存利用率、CPU 平均负载、磁盘使用量、inode 使用率、磁盘吞吐量、IOPS、网卡速率、容器组运行状态、etcd 监控、API Server 监控等多项指标。 + - 在应用资源维度,提供针对应用的 CPU 用量、内存用量、容器组数量、网络流出速率、网络流入速率等五项监控指标。并支持按用量排序和自定义时间范围查询,快速定位异常提供按节点、企业空间、项目等资源用量排行。 +- **排序**:用户可以按节点,工作空间和项目对数据进行排序,从而以直观的方式为他们的资源运行提供图形化视图。 + +- **组件监控**:它允许用户快速定位任何组件故障,以避免不必要的业务停机。 + +### 自研多租户告警系统 + +- **自定义告警策略和规则**:支持基于多租户、多维度的监控指标告警。 该系统将发送与各种资源,如节点、网络和工作负载相关的告警。可自定义包含多个告警规则的告警策略,如重复间隔和时间,来定制自己的告警策略、阈值和告警级别。 +- **准确的事件跟踪**:用户可以及时了解集群内部发生的情况,例如容器运行状态(成功或失败),节点调度和镜像拉取结果。 它们将被准确记录,并在 Web 控制台中显示结果,状态和消息。 在生产环境中,这将帮助用户及时响应任何问题。 +- **增强审计安全性**:由于 KubeSphere 具有对用户授权的细粒度管理,因此可以将资源和网络完全隔离以确保数据安全。 全面的审核功能使用户可以搜索与任何操作或告警相关的活动。 +- **多种通知方式**:电子邮件是用户接收相关活动通知并获取所需信息的一种关键方法。用户可以自定义规则来发送邮件,包括自定义发件人电子邮件地址及其收件人列表。此外,KubeSphere 还支持其他渠道,例如 Slack 和微信等。随着后续迭代,KubeSphere 会为用户提供更多的通知渠道和配置。 + +有关更多信息,请参见[项目用户指南](../../project-user-guide/)。 + +## 日志查询与收集 + +- **多租户日志管理**.:提供多租户日志管理,在 KubeSphere 的日志查询系统中,不同的租户只能看到属于自己的日志信息,支持中文日志检索,支持日志导出。 +- **多级日志查询**:多级别的日志查询(项目/工作负载/容器组/容器以及关键字)、灵活方便的日志收集配置选项等。 +- **多种日志收集平台**:用户可以选择多种日志收集平台,例如 Elasticsearch,Kafka 和 Fluentd。 +- **落盘日志收集功能**:对于将日志以文件形式保存在 Pod 挂盘上的应用,支持开启落盘日志收集功能。 + +## 应用程序管理和编排 + +- **应用商店**:KubeSphere 提供了一个基于开源 [OpenPitrix](https://github.com/openpitrix/openpitrix) 的应用商店,支持应用上传、应用审核、应用上架与分类、应用部署,为用户提供应用全生命周期管理功能。 +- **应用资料库**:在 KubeSphere 中,用户可将基于 Helm Chart 模板文件打包好的应用程序包上传并保存在第三方平台之上,例如 [QingStor](https://www.qingcloud.com/products/objectstorage/) 对象存储服务、[AWS S3](https://aws.amazon.com/what-is-cloud-object-storage/) 或者 [GitHub](https://github.com/) 等,同时在 KubeSphere 控制台上配置并使用这些第三方的应用资料库。 +- **应用程式范本**:有了应用程序模板,KubeSphere 只需单击一下即可提供一种可视化的方法来部署应用程序。 在内部,应用程序模板可以帮助企业中的不同团队共享中间件和业务系统。 在外部,它们可以用作基于不同方案和需求的应用程序交付的行业标准。 + +## 多种存储解决方案 + +- 可使用开源存储解决方案,例如 GlusterFS,CephRBD 和 NFS。 +- 可使用 NeonSAN CSI 插件连接到 QingStor NeonSAN,以满足低延迟,高弹性和高性能的核心业务要求。 +- 可使用 QingCloud CSI 插件连接到 QingCloud 平台中的各种块存储服务。 + +## 多种网络解决方案 + +- 支持 Calico、Flannel 等开源网络方案。 + +- [OpenELB](https://github.com/kubesphere/openelb),是由 KubeSphere 开发团队设计、经过 CNCF 认证的一款适用于物理机部署 Kubernetes 的负载均衡插件。 主要特点: + + 1. ECMP 路由负载均衡 + 2. BGP 动态路由 + 3. VIP 管理 + 4. 分配 Kubernetes 服务中的 LoadBalancerIP (v0.3.0) + 5. 使用 Helm Chart 安装 (v0.3.0) + 6. 通过 CRD 动态配置BGP服务器 (v0.3.0) + 7. 通过 CRD 动态配置BGP对等 (v0.3.0) + + 有关 OpenELB 的更多信息,请参见[本文](https://kubesphere.io/zh/conferences/porter/)。 diff --git a/content/zh/docs/v3.4/introduction/scenarios.md b/content/zh/docs/v3.4/introduction/scenarios.md new file mode 100644 index 000000000..a55128d30 --- /dev/null +++ b/content/zh/docs/v3.4/introduction/scenarios.md @@ -0,0 +1,96 @@ +--- +title: "应用场景" +keywords: 'KubeSphere, Kubernetes, 多集群, 可观测性, DevOps' +description: 'KubeSphere 适用于多种应用场景,为企业提供全栈的云原生功能。' +linkTitle: "应用场景" +weight: 1700 +--- + +KubeSphere 适用于多种场景,为企业提供容器化的环境,借助完善的管理和运维功能,让企业在数字化转型过程中从容应对各种挑战和各类业务场景,如多云多集群管理、敏捷软件开发、自动化运维、微服务治理、流量管理、高可用以及 DevOps 持续集成与交付等。 + +## 多集群部署 + +随着容器的普及和 Kubernetes 的日渐成熟,企业内部运行多个 Kubernetes 集群已变得颇为常见。概括起来,多个集群的使用场景主要有以下几种: + +### 高可用 + +用户可以将应用负载部署在多个集群上,使用一个全局 VIP 或 DNS 域名将请求发送到对应的后端集群。当一个集群发生故障或无法处理请求时,将 VIP 或 DNS 记录切换至健康的集群。 + +![高可用](/images/docs/v3.3/zh-cn/introduction/use-cases/高可用.png) + +### 低延迟 + +在多个地区部署集群时,可将用户请求转发至距离最近的集群处理,以此来最大限度减少网络带来的延迟。例如,在北京、上海和广州三地部署了三个 Kubernetes 集群,对于广东的用户就将请求转发至部署于广州的集群处理,这样可以减少地理距离带来的网络延迟,最大限度地实现各地一致的用户体验。 + +### 隔离 + +**故障隔离**:通常来说,多个小规模的集群比一个大规模的集群更容易隔离故障。当集群发生诸如服务中断、网络故障、资源不足引起的连锁反应等问题时,使用多个集群可以将故障隔离在特定的集群,不会向其他集群传播。 + +**业务隔离**:Kubernetes 通过命名空间来隔离应用,但这仅是逻辑上的隔离,不同命名空间之间网络互通,依旧存在资源抢占的问题。要想实现更进一步的隔离,需要额外设置诸如网络隔离策略、资源限额等。多集群可以在物理上实现彻底隔离,安全性和可靠性相比使用命名空间隔离更高。例如企业内部不同部门部署各自独立的集群、使用多个集群来分别部署开发、测试和生成环境等。 + +![流水线](/images/docs/v3.3/zh-cn/introduction/use-cases/流水线.png) + +### 避免厂商锁定 + +Kubernetes 已经成为容器编排领域的事实标准,很多企业在不同云厂商上部署集群时都避免将鸡蛋都放在一个篮子,以便可以随时迁移业务,在不同集群间伸缩。缺点是成本增加,考虑到不同厂商提供的 Kubernetes 服务对应的存储、网络接口有差异,业务迁移也非易事。 + +为应对不同的使用场景,KubeSphere 提供统一的中央控制平面,由 Host 集群纳管 Member 集群,即多个异构的 Kubernetes 集群可以聚合在一起作为 Kubernetes 资源池。当用户部署应用程序时,可以选择应用的副本所要运行于的一个或多个 Kubernetes 集群。整个过程可以通过 KubeSphere 控制台进行管理,以可视化的方式帮助用户实现跨区域和跨集群的高可用性。 + +有关更多信息,请参见[多集群管理](../../multicluster-management/)。 + +## 多维度监控 + +可观测性是运维团队日常工作中的重要一环,随着企业部署在云厂商平台上业务量的不断增加,运维团队所面临的压力与挑战也与日俱增。对于将业务跨云夸集群部署的企业来说,运维团队需要处理海量的数据以对各个 Kubernetes 集群进行监控与分析。此外,如何满足企业对自定义监控指标的需求也是急需解决的问题之一。 + +### 多维度集群监控 + +当前,越来越多的企业和个人跨云部署多集群,然而,由于各个云厂商的环境不同,其所提供可观测性工具可能并不适用其他平台。从学习成本和监控的角度来说,进行跨集群管理和监控也并非易事。简而言之,运维团队急需一种统一的工具以对多集群上不同的指标实现多维度监控。 + +### 日志、事件与审计查询 + +强大的可观测性系统需要由灵活的日志查询体系所支撑,帮助用户追踪集群内各类资源的完整信息,了解集群中的最新状况,例如告警消息、节点调度状态、应用部署情况以及网络策略变更等。由此,用户可对其业务做出相应的调整。 + +### 自定义监控 + +即使是在同一平台进行资源监控,云厂商所提供的工具也并非适用于所有场景。在某些情况下,用户需要建立其特有的可观测性标准,例如自定义监控指标和监控形式。此外,他们还需要手动将常用工具集成至云端,如用于 Kubernetes 监控的事实标准工具 Prometheus。换言之,自定义功能已成为行业上的必要需求,不仅需要各类云原生应用提供云上业务支撑,同时也需要细粒度全监控功能,以提前检测出任何可能对业务造成影响的问题。 + +如前文所述,KubeSphere 提供统一的中央控制平面用于跨云多集群管理,极大降低了运维成本。与此同时,KubeSphere 还具备强大的可观测性功能(告警通知、审计日志与事件)以监控多集群资源,为用户提供多维度自定义监控面板,用户可自行选择以何种形式监控任意资源。此外,KubeSphere 还配有多指标的日志、事件与审计查询功能,以可视化的形式提供基于多租户的日志检索。 + +借助 KubeSphere,企业可以更多地专注于业务创新,从复杂的数据收集和分析流程中彻底解放。 + +## 微服务和云原生架构 + +在企业数字化转型过程中,推动应用迅速迭代的压力也与日俱增。具体来说,企业需要加快开发流程,缩短交付时间,提高更新频率。然而,现代化、云原生应用更多地以微服务的形式部署,而非从前的单体大型应用,这也给企业的应用研发与更新带来了更多的挑战。例如,微服务之间的频繁交付需要稳定、流畅的网络连接,网络延迟不仅影响系统问题性,更会降低用户体验。如何在不影响生产环境的同时进行版本更迭成为各个企业必须要解决的问题。为此,企业需要搭建一套完整的微服务架构以及时地检测并解决潜在问题。 + +KubeSphere 提供轻量级、扩展性强的微服务架构,为企业创造了充分的条件以开发云原生应用程序应对各类使用场景。基于 Istio,KubeSphere 以代码无侵入的模式提供可视化、灵活的微服务治理平台,包含各类微服务治理功能,支持熔断、灰度发布、流量管控、分布式链路追踪等,助力企业一步搭建微服务架构,实现应用云原生转型。 + +### 可视化 + +由于服务网格的微服务之间会频繁进行交互,如果能以可视化的方式查看微服务之间通信,用户也能更好地了解微服务的拓扑关系。此外,分布式链路追踪对每个服务来说同样重要,能让管理者了解服务网格中调度流向和服务依赖。 + +### 灰度策略 + +当企业引入服务新版本时,可以在 KubeSphere 中采取不同的灰度发布策略。 + +**蓝绿发布**提供零宕机部署,即在保留旧版本的同时部署新版本。在任何时候,只有其中一个版本处于活跃状态,接收所有流量,另一个版本保持空闲状态。如果运行出现问题,您可以快速回滚到旧版本。 + +**金丝雀发布**将实际流量引入新版本以测试性能和可靠性,在不影响系统稳定性的同时能够检测实际环境中存在的问题。 + +**流量镜像**是一种强大的、无风险的测试应用版本的方法,将实时流量的副本发送给被镜像的服务。采用这种方法,您可以搭建一个与原环境类似的环境以进行验收测试,从而提前发现问题。 + +## DevOps 落地实践 + +DevOps 是一套重要的实践和方法,让开发和运维团队能够更高效地协同工作。软件的开发、测试和发布也得以更迅速、高效和可靠。KubeSphere 中的 CI/CD 流水线为企业提供敏捷开发功能和自动化运维。同时, KubeSphere 的微服务治理功能,帮助企业以一种细粒度的方式开发、测试和发布服务,有效推动企业 DevOps 落地。借助 KubeSphere 的 DevOps 系统,企业可以: + +- 以代码无侵入的模式通过错误注入测试服务健壮性; +- 可视化端到端监控流程; +- 以图形编辑面板创建流水线,无需编写 Jenkinsfile; +- 为流水线轻松集成第三方程序,例如 SonarQube 用于代码质检。 + +## 裸机环境部署 + +有时,云端并非资源部署的最优环境。例如,当需要大量计算资源并要求硬盘高 I/O 速度时,使用专门的物理服务器可以实现更佳的性能。此外,对于一些难以迁移上云的特殊工作负载,可能还需要通过经认证的硬件运行,加以复杂的许可与支持协议,在这种情况下,企业更倾向于使用裸机环境部署应用。 + +借助新一代轻量级安装器 [KubeKey](https://github.com/kubesphere/kubekey),KubeSphere 帮助企业快速在裸机环境搭建容器化架构,并通过 OpenELB 实现流量的负载均衡。[OpenELB](https://github.com/kubesphere/openelb) 由 KubeSphere 社区开源,专为裸机环境下的负载均衡所设计,现已加入 CNCF Landscape,是为 CNCF 所认可的构建云原生最佳实践中的重要一环。 + +有关 KubeSphere 如何推动各行各业的发展并实现数字化转型,请参见[用户案例学习](/case/)。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/introduction/what's-new-in-3.3.md b/content/zh/docs/v3.4/introduction/what's-new-in-3.3.md new file mode 100644 index 000000000..25fa5734b --- /dev/null +++ b/content/zh/docs/v3.4/introduction/what's-new-in-3.3.md @@ -0,0 +1,13 @@ +--- +title: "3.3 重要更新" +keywords: 'Kubernetes, KubeSphere, 介绍' +description: '3.3 新增了对 “边缘计算” 场景的支持。同时在 3.2.x 的基础上新增了计量计费,让基础设施的运营成本更清晰,并进一步优化了在 “多云、多集群、多团队、多租户” 等应用场景下的使用体验' +linkTitle: "3.3 重要更新" +weight: 1400 +--- + +2022 年 6 月 24 日,KubeSphere 3.3 正式发布,带来了更多令人期待的功能。新增了基于 GitOps 的持续部署方案,进一步优化了 DevOps 的使用体验。同时还增强了 “多集群管理、多租户管理、可观测性、应用商店、微服务治理、边缘计算、存储” 等特性,更进一步完善交互设计,并全面提升了用户体验。 + +关于 3.3 新特性的详细解读,可参考博客 [KubeSphere 3.3.0 发布:全面拥抱 GitOps](/../../news/kubesphere-3.3.0-ga-announcement/)。 + +关于 3.3 的新功能及增强、Bug 修复、重要的技术调整,以及废弃或移除的功能,请参见 [3.3.0 版本说明](../../../v3.3/release/release-v330/),[3.3.1 版本说明](../../../v3.3/release/release-v331/)和[3.3.2 版本说明](../../../v3.3/release/release-v332/)。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/introduction/what-is-kubesphere.md b/content/zh/docs/v3.4/introduction/what-is-kubesphere.md new file mode 100644 index 000000000..788b1216e --- /dev/null +++ b/content/zh/docs/v3.4/introduction/what-is-kubesphere.md @@ -0,0 +1,39 @@ +--- +title: "什么是 KubeSphere" +keywords: 'Kubernetes, KubeSphere, 介绍' +description: '什么是 KubeSphere' +linkTitle: "什么是 KubeSphere" +weight: 1100 +--- + +## 概述 + +[KubeSphere](https://kubesphere.io) 是在 [Kubernetes](https://kubernetes.io) 之上构建的面向云原生应用的**分布式操作系统**,完全开源,支持多云与多集群管理,提供全栈的 IT 自动化运维能力,简化企业的 DevOps 工作流。它的架构可以非常方便地使第三方应用与云原生生态组件进行即插即用 (plug-and-play) 的集成。 + +作为全栈的多租户容器平台,KubeSphere 提供了运维友好的向导式操作界面,帮助企业快速构建一个强大和功能丰富的容器云平台。KubeSphere 为用户提供构建企业级 Kubernetes 环境所需的多项功能,例如**多云与多集群管理、Kubernetes 资源管理、DevOps、应用生命周期管理、微服务治理(服务网格)、日志查询与收集、服务与网络、多租户管理、监控告警、事件与审计查询、存储管理、访问权限控制、GPU 支持、网络策略、镜像仓库管理以及安全管理**等。 + +KubeSphere 还开源了 [KubeKey](https://github.com/kubesphere/kubekey) 帮助企业一键在公有云或数据中心快速搭建 Kubernetes 集群,提供单节点、多节点、集群插件安装,以及集群升级与运维。 + +![功能概览](/images/docs/v3.3/zh-cn/introduction/what-is-kubesphere/kubesphere-feature-overview.jpeg) + +## 开发运维友好 + +KubeSphere 为用户屏蔽了基础设施底层复杂的技术细节,帮助企业在各类基础设施之上无缝地部署、更新、迁移和管理现有的容器化应用。通过这种方式,KubeSphere 使开发人员能够专注于应用程序开发,使运维团队能够通过企业级可观测性功能和故障排除机制、统一监控和日志查询、存储和网络管理,以及易用的 CI/CD 流水线等来加快 DevOps 自动化工作流程和交付流程等。 + +## 支持在任意平台运行 KubeSphere + +作为一个灵活的轻量级容器 PaaS 平台,KubeSphere 对不同云生态系统的支持非常友好,因为它对原生 Kubernetes 本身没有任何的侵入 (Hack)。换句话说,KubeSphere 可以**部署并运行在任何基础架构以及所有版本兼容的 Kubernetes 集群**之上,包括虚拟机、物理机、数据中心、公有云和混合云等。 + +您可以选择在公有云和托管 Kubernetes 集群(例如阿里云、AWS、青云QingCloud、腾讯云、华为云等)上安装 KubeSphere,**还可以导入和纳管已有的 Kubernetes 集群**。 + +KubeSphere 可以在不修改用户当前的资源或资产、不影响其业务的情况下部署在现有的 Kubernetes 平台上。有关更多信息,请参见[在 Linux 上安装](../../installing-on-linux/)和[在 Kubernetes 上安装](../../installing-on-kubernetes/)。 + +## 完全开源 + +借助开源的模式,KubeSphere 社区驱动着开发工作以开放的方式进行。KubeSphere **100% 开源免费**,已大规模服务于社区用户,广泛地应用在以 Docker 和 Kubernetes 为中心的开发、测试及生产环境中,大量服务平稳地运行在 KubeSphere 之上。您可在 [GitHub](https://github.com/kubesphere/) 上找到所有源代码、文档和讨论,所有主要的开源项目介绍可以在[开源项目列表](../../../../projects/)中找到。 + +## 云原生 Landscape + +KubeSphere 是 CNCF 基金会成员并且通过了 [Kubernetes 一致性认证](https://www.cncf.io/certification/software-conformance/#logos),进一步丰富了 [CNCF 云原生的生态](https://landscape.cncf.io/?landscape=observability-and-analysis&license=apache-license-2-0)。 + +![cncf-landscape](/images/docs/v3.3/zh-cn/introduction/what-is-kubesphere/cncf-landscape.png) \ No newline at end of file diff --git a/content/zh/docs/v3.4/multicluster-management/_index.md b/content/zh/docs/v3.4/multicluster-management/_index.md new file mode 100644 index 000000000..5f0568cf6 --- /dev/null +++ b/content/zh/docs/v3.4/multicluster-management/_index.md @@ -0,0 +1,15 @@ +--- +title: "多集群管理" +description: "将托管的或本地的 Kubernetes 集群导入 KubeSphere" +layout: "second" + +linkTitle: "多集群管理" + +weight: 5000 + +icon: "/images/docs/v3.3/docs.svg" +--- + +## 介绍 + +本章演示如何使用 KubeSphere 的多集群功能导入异构集群以进行统一的管理。 diff --git a/content/zh/docs/v3.4/multicluster-management/enable-multicluster/_index.md b/content/zh/docs/v3.4/multicluster-management/enable-multicluster/_index.md new file mode 100644 index 000000000..b26d04f98 --- /dev/null +++ b/content/zh/docs/v3.4/multicluster-management/enable-multicluster/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "启用多集群" +weight: 5200 + +_build: + render: false +--- diff --git a/content/zh/docs/v3.4/multicluster-management/enable-multicluster/agent-connection.md b/content/zh/docs/v3.4/multicluster-management/enable-multicluster/agent-connection.md new file mode 100644 index 000000000..0e56327d9 --- /dev/null +++ b/content/zh/docs/v3.4/multicluster-management/enable-multicluster/agent-connection.md @@ -0,0 +1,264 @@ +--- +title: "代理连接" +keywords: 'Kubernetes, KubeSphere, 多集群, 代理连接' +description: '了解通过代理连接导入集群的一般步骤。' +linkTitle: "代理连接" +weight: 5220 +--- + +KubeSphere 的组件 [Tower](https://github.com/kubesphere/tower) 用于代理连接。Tower 是一种通过代理在集群间建立网络连接的工具。如果主集群无法直接访问成员集群,您可以暴露主集群的代理服务地址,这样可以让成员集群通过代理连接到主集群。当成员集群部署在私有环境(例如 IDC)并且主集群可以暴露代理服务时,适用此连接方法。当您的集群分布部署在不同的云厂商上时,同样适用代理连接的方法。 + +要通过代理连接使用多集群功能,您必须拥有至少两个集群,分别用作主集群和成员集群。您可以在安装 KubeSphere 之前或者之后将一个集群指定为主集群或成员集群。有关安装 KubeSphere 的更多信息,请参考[在 Linux 上安装](../../../installing-on-linux/)和[在 Kubernetes 上安装](../../../installing-on-kubernetes/)。 + +## 准备主集群 + +主集群为您提供中央控制平面,并且您只能指定一个主集群。 + +{{< tabs >}} + +{{< tab "已经安装 KubeSphere" >}} + +如果已经安装了独立的 KubeSphere 集群,您可以编辑集群配置,将 `clusterRole` 的值设置为 `host`。 + +- 选项 A - 使用 Web 控制台: + + 使用 `admin` 帐户登录控制台,然后进入**集群管理**页面上的**定制资源定义**,输入关键字 `ClusterConfiguration`,然后转到其详情页面。编辑 `ks-installer` 的 YAML 文件,方法类似于[启用可插拔组件](../../../pluggable-components/)。 + +- 选项 B - 使用 Kubectl: + + ```shell + kubectl edit cc ks-installer -n kubesphere-system + ``` + +在 `ks-installer` 的 YAML 文件中,搜寻到 `multicluster`,将 `clusterRole` 的值设置为 `host`,然后点击**确定**(如果使用 Web 控制台)使其生效: + +```yaml +multicluster: + clusterRole: host +``` + +要设置主集群名称,请在 `ks-installer` 的 YAML 文件中的 `multicluster.clusterRole` 下添加 `hostClusterName` 字段: + +```yaml +multicluster: + clusterRole: host + hostClusterName: <主集群名称> +``` + +{{< notice note >}} + +- 建议您在准备主集群的同时设置主集群名称。若您的主集群已在运行并且已经部署过资源,不建议您再去设置主集群名称。 +- 主集群名称只能包含小写字母、数字、连字符(-)或者半角句号(.),必须以小写字母或数字开头和结尾。 + +{{}} + +您需要**稍等片刻**待该更改生效。 + +{{}} + +{{< tab "尚未安装 KubeSphere" >}} + +在 Linux 上或者在现有 Kubernetes 集群上安装 KubeSphere 之前,您可以定义一个主集群。如果您想[在 Linux 上安装 KubeSphere](../../../installing-on-linux/introduction/multioverview/#1-创建示例配置文件),需要使用 `config-sample.yaml` 文件。如果您想[在现有 Kubernetes 集群上安装 KubeSphere](../../../installing-on-kubernetes/introduction/overview/#部署-kubesphere),需要使用两个 YAML 文件,其中一个是 `cluster-configuration.yaml`。 + +要设置一个主集群,请在安装 KubeSphere 之前,将 `config-sample.yaml` 或 `cluster-configuration.yaml` 文件中对应的 `clusterRole` 的值修改为 `host`。 + +```yaml +multicluster: + clusterRole: host +``` + +要设置主集群名称,请在 `config-sample.yaml` 或 `cluster-configuration.yaml` 文件中的 `multicluster.clusterRole` 下添加 `hostClusterName` 字段: + +```yaml +multicluster: + clusterRole: host + hostClusterName: <主集群名称> +``` + +{{< notice note >}} + +- 主集群名称只能包含小写字母、数字、连字符(-)或者半角句号(.),必须以小写字母或数字开头和结尾。 + +{{}} + +{{< notice info >}} + +如果您在单节点集群上安装 KubeSphere ([All-in-One](../../../quick-start/all-in-one-on-linux/)),则不需要创建 `config-sample.yaml` 文件。这种情况下,您可以在安装 KubeSphere 之后设置主集群。 + +{{}} + +{{}} + +{{}} + +您可以使用 **kubectl** 来获取安装日志以验证状态。运行以下命令,稍等片刻,如果主集群已准备就绪,您将看到成功的日志返回。 + +```bash +kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f +``` + +## 设置代理服务地址 + +安装主集群后,将在 `kubesphere-system` 中创建一个名为 `tower` 的代理服务,其类型为 `LoadBalancer`。 + +{{< tabs >}} + +{{< tab "集群中有可用的 LoadBalancer" >}} + +如果集群中有可用的 LoadBalancer 插件,则可以看到 Tower 服务有相应的 `EXTERNAL-IP` 地址,该地址将由 KubeSphere 自动获取并配置代理服务地址,这意味着您可以跳过设置代理服务地址这一步。执行以下命令确认是否有 LoadBalancer 插件。 + +```bash +kubectl -n kubesphere-system get svc +``` + +命令输出结果可能如下所示: + +```shell +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +tower LoadBalancer 10.233.63.191 139.198.110.23 8080:30721/TCP 16h +``` + +{{< notice note >}} + +一般来说,主流公有云厂商会提供 LoadBalancer 解决方案,并且负载均衡器可以自动分配外部 IP。如果您的集群运行在本地环境中,尤其是在**裸机环境**中,可以使用 [OpenELB](https://github.com/kubesphere/openelb) 作为负载均衡器解决方案。 + +{{}} + +{{}} + +{{< tab "集群中没有可用的 LoadBalancer" >}} + +1. 执行以下命令来检查服务。 + + ```shell + kubectl -n kubesphere-system get svc + ``` + + 命令输出结果可能如下所示。在此示例中,可以看出 `NodePort` 为 `30721`: + ``` + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + tower LoadBalancer 10.233.63.191 8080:30721/TCP 16h + ``` + +2. 由于 `EXTERNAL-IP` 处于 `pending` 状态,您需要手动设置代理地址。例如,如果您的公有 IP 地址为 `139.198.120.120`,则需要将公网 IP 的端口,如`30721` 转发到 `NodeIP`:`NodePort`。 + +3. 将 `proxyPublishAddress` 的值添加到 `ks-installer` 的配置文件中,并按如下所示输入公有 IP 地址(此处示例 `139.198.120.120`)和端口号。 + + - 选项 A - 使用 Web 控制台: + + 使用 `admin` 帐户登录控制台,然后进入**集群管理**页面上的**定制资源定义**,输入关键字 `ClusterConfiguration`,然后转到其详情页面。编辑 `ks-installer` 的 YAML 文件,方法类似于[启用可插拔组件](../../../pluggable-components/)。 + + - 选项 B - 使用 Kubectl: + + ```bash + kubectl -n kubesphere-system edit clusterconfiguration ks-installer + ``` + + 搜寻到 `multicluster` 并添加新行输入 `proxyPublishAddress` 来定义 IP 地址,以便访问 Tower。 + + ```yaml + multicluster: + clusterRole: host + proxyPublishAddress: http://139.198.120.120:{NodePort} # Add this line to set the address to access tower + ``` + 请将 YAML 文件中的 {NodePort} 替换为您在步骤 2 中指定的端口。 + +4. 保存配置并稍等片刻,或者您可以运行以下命令手动重启 `ks-apiserver` 使修改立即生效。 + + ```shell + kubectl -n kubesphere-system rollout restart deployment ks-apiserver + ``` + +{{}} + +{{}} + +## 准备成员集群 + +为了通过**主集群**管理成员集群,您需要使它们之间的 `jwtSecret` 相同。因此,您首先需要在**主集群**中执行以下命令来获取它。 + +```bash +kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret +``` + +命令输出结果可能如下所示: + +```yaml +jwtSecret: "gfIwilcc0WjNGKJ5DLeksf2JKfcLgTZU" +``` + +{{< tabs >}} + +{{< tab "已经安装 KubeSphere" >}} + +如果已经安装了独立的 KubeSphere 集群,您可以编辑集群配置,将 `clusterRole` 的值设置为 `member`。 + +- 选项 A - 使用 Web 控制台: + + 使用 `admin` 帐户登录控制台,然后进入**集群管理**页面上的**定制资源定义**,输入关键字 `ClusterConfiguration`,然后转到其详情页面。编辑 `ks-installer` 的 YAML 文件,方法类似于[启用可插拔组件](../../../pluggable-components/)。 + +- 选项 B - 使用 Kubectl: + + ```shell + kubectl edit cc ks-installer -n kubesphere-system + ``` + +在 `ks-installer` 的 YAML 文件中对应输入上面所示的 `jwtSecret`: + +```yaml +authentication: + jwtSecret: gfIwilcc0WjNGKJ5DLeksf2JKfcLgTZU +``` + +向下滚动并将 `clusterRole` 的值设置为 `member`,然后点击**确定**(如果使用 Web 控制台)使其生效: + +```yaml +multicluster: + clusterRole: member +``` + +您需要**稍等片刻**待该更改生效。 + +{{}} + +{{< tab "尚未安装 KubeSphere" >}} + +在 Linux 上或者在现有 Kubernetes 集群上安装 KubeSphere 之前,您可以定义成员集群。如果您想[在 Linux 上安装 KubeSphere](../../../installing-on-linux/introduction/multioverview/#1-创建示例配置文件),需要使用 `config-sample.yaml` 文件。如果您想[在现有 Kubernetes 集群上安装 KubeSphere](../../../installing-on-kubernetes/introduction/overview/#部署-kubesphere),需要使用两个 YAML 文件,其中一个是 `cluster-configuration.yaml`。要设置成员集群,请在安装 KubeSphere 之前,在 `config-sample.yaml` 或 `cluster-configuration.yaml` 文件中输入上方 `jwtSecret` 所对应的值,并将 `clusterRole` 的值修改为 `member`。 + +```yaml +authentication: + jwtSecret: gfIwilcc0WjNGKJ5DLeksf2JKfcLgTZU +``` + +```yaml +multicluster: + clusterRole: member +``` + +{{< notice note >}} + +如果您在单节点集群上安装 KubeSphere ([All-in-One](../../../quick-start/all-in-one-on-linux/)),则不需要创建 `config-sample.yaml` 文件。这种情况下,您可以在安装 KubeSphere 之后设置成员集群。 + +{{}} + +{{}} + +{{}} + +您可以使用 **kubectl** 来获取安装日志以验证状态。运行以下命令,稍等片刻,如果成员集群已准备就绪,您将看到成功的日志返回。 + +```bash +kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f +``` + +## 导入成员集群 + +1. 以 `admin` 身份登录 KubeSphere 控制台,转到**集群管理**页面点击**添加集群**。 + +2. 在**导入集群**页面输入要导入的集群的基本信息。您也可以点击右上角的**编辑模式**以 YAML 格式查看并编辑基本信息。编辑完成后,点击**下一步**。 + +3. 在**连接方式**,选择**集群连接代理**,然后点击**创建**。主集群为代理部署 (Deployment) 生成的 YAML 配置文件会显示在控制台上。 + +4. 根据指示在成员集群中创建一个 `agent.yaml` 文件,然后将代理部署复制并粘贴到该文件中。在该节点上执行 `kubectl create -f agent.yaml` 然后等待代理启动并运行。请确保成员集群可以访问代理地址。 + +5. 待集群代理启动并运行,您会看到成员集群已经导入主集群。 diff --git a/content/zh/docs/v3.4/multicluster-management/enable-multicluster/direct-connection.md b/content/zh/docs/v3.4/multicluster-management/enable-multicluster/direct-connection.md new file mode 100644 index 000000000..f1700616a --- /dev/null +++ b/content/zh/docs/v3.4/multicluster-management/enable-multicluster/direct-connection.md @@ -0,0 +1,195 @@ +--- +title: "直接连接" +keywords: 'Kubernetes, KubeSphere, 多集群, 混合云, 直接连接' +description: '了解通过直接连接导入集群的一般步骤。' +linkTitle: "直接连接" +weight: 5210 +--- + +如果主集群的任何节点都能访问的 kube-apiserver 地址,您可以采用**直接连接**。当成员集群的 kube-apiserver 地址可以暴露给外网,或者主集群和成员集群在同一私有网络或子网中时,此方法均适用。 + +要通过直接连接使用多集群功能,您必须拥有至少两个集群,分别用作主集群和成员集群。您可以在安装 KubeSphere 之前或者之后将一个集群指定为主集群或成员集群。有关安装 KubeSphere 的更多信息,请参考[在 Linux 上安装](../../../installing-on-linux/)和[在 Kubernetes 上安装](../../../installing-on-kubernetes/)。 + +## 准备主集群 + +主集群为您提供中央控制平面,并且您只能指定一个主集群。 + +{{< tabs >}} + +{{< tab "已经安装 KubeSphere" >}} + +如果已经安装了独立的 KubeSphere 集群,您可以编辑集群配置,将 `clusterRole` 的值设置为 `host`。 + +- 选项 A - 使用 Web 控制台: + + 使用 `admin` 帐户登录控制台,然后进入**集群管理**页面上的**定制资源定义**,输入关键字 `ClusterConfiguration`,然后转到其详情页面。编辑 `ks-installer` 的 YAML 文件,方法类似于[启用可插拔组件](../../../pluggable-components/)。 + +- 选项 B - 使用 Kubectl: + + ```shell + kubectl edit cc ks-installer -n kubesphere-system + ``` + +在 `ks-installer` 的 YAML 文件中,搜寻到 `multicluster`,将 `clusterRole` 的值设置为 `host`,然后点击**确定**(如果使用 Web 控制台)使其生效: + +```yaml +multicluster: + clusterRole: host +``` + +要设置主集群名称,请在 `ks-installer` 的 YAML 文件中的 `multicluster.clusterRole` 下添加 `hostClusterName` 字段: + +```yaml +multicluster: + clusterRole: host + hostClusterName: <主集群名称> +``` + +{{< notice note >}} + +- 建议您在准备主集群的同时设置主集群名称。若您的主集群已在运行并且已经部署过资源,不建议您再去设置主集群名称。 +- 主集群名称只能包含小写字母、数字、连字符(-)或者半角句号(.),必须以小写字母或数字开头和结尾。 + +{{}} + +您需要**稍等片刻**待该更改生效。 + +{{}} + +{{< tab "尚未安装 KubeSphere" >}} + +在 Linux 上或者在现有 Kubernetes 集群上安装 KubeSphere 之前,您可以定义一个主集群。如果您想[在 Linux 上安装 KubeSphere](../../../installing-on-linux/introduction/multioverview/#1-创建示例配置文件),需要使用 `config-sample.yaml` 文件。如果您想[在现有 Kubernetes 集群上安装 KubeSphere](../../../installing-on-kubernetes/introduction/overview/#部署-kubesphere),需要使用两个 YAML 文件,其中一个是 `cluster-configuration.yaml`。 + +要设置一个主集群,请在安装 KubeSphere 之前,将 `config-sample.yaml` 或 `cluster-configuration.yaml` 文件中对应的 `clusterRole` 的值修改为 `host`。 + +```yaml +multicluster: + clusterRole: host +``` + +要设置主集群名称,请在 `config-sample.yaml` 或 `cluster-configuration.yaml` 文件中的 `multicluster.clusterRole` 下添加 `hostClusterName` 字段: + +```yaml +multicluster: + clusterRole: host + hostClusterName: <主集群名称> +``` + +{{< notice note >}} + +- 主集群名称只能包含小写字母、数字、连字符(-)或者半角句号(.),必须以小写字母或数字开头和结尾。 + +{{}} + +{{< notice info >}} + +如果您在单节点集群上安装 KubeSphere ([All-in-One](../../../quick-start/all-in-one-on-linux/)),则不需要创建 `config-sample.yaml` 文件。这种情况下,您可以在安装 KubeSphere 之后设置主集群。 + +{{}} + +{{}} + +{{}} + +您可以使用 **kubectl** 来获取安装日志以验证状态。运行以下命令,稍等片刻,如果主集群已准备就绪,您将看到成功的日志返回。 + +```bash +kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f +``` + +## 准备成员集群 + +为了通过**主集群**管理,您需要使它们之间的 `jwtSecret` 相同。因此,您首先需要在**主集群**中执行以下命令来获取它。 + +```bash +kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret +``` + +命令输出结果可能如下所示: + +```yaml +jwtSecret: "gfIwilcc0WjNGKJ5DLeksf2JKfcLgTZU" +``` + +{{< tabs >}} + +{{< tab "已经安装 KubeSphere" >}} + +如果已经安装了独立的 KubeSphere 集群,您可以编辑集群配置,将 `clusterRole` 的值设置为 `member`。 + +- 选项 A - 使用 Web 控制台: + + 使用 `admin` 帐户登录控制台,然后进入**集群管理**页面上的**定制资源定义**,输入关键字 `ClusterConfiguration`,然后转到其详情页面。编辑 `ks-installer` 的 YAML 文件,方法类似于[启用可插拔组件](../../../pluggable-components/)。 + +- 选项 B - 使用 Kubectl: + + ```shell + kubectl edit cc ks-installer -n kubesphere-system + ``` + +在 `ks-installer` 的 YAML 文件中对应输入上面所示的 `jwtSecret`: + +```yaml +authentication: + jwtSecret: gfIwilcc0WjNGKJ5DLeksf2JKfcLgTZU +``` + +向下滚动并将 `clusterRole` 的值设置为 `member`,然后点击**确定**(如果使用 Web 控制台)使其生效: + +```yaml +multicluster: + clusterRole: member +``` + +您需要**稍等片刻**待该更改生效。 + +{{}} + +{{< tab "尚未安装 KubeSphere" >}} + +在 Linux 上或者在现有 Kubernetes 集群上安装 KubeSphere 之前,您可以定义。如果您想[在 Linux 上安装 KubeSphere](../../../installing-on-linux/introduction/multioverview/#1-创建示例配置文件),需要使用 `config-sample.yaml` 文件。如果您想[在现有 Kubernetes 集群上安装 KubeSphere](../../../installing-on-kubernetes/introduction/overview/#部署-kubesphere),需要使用两个 YAML 文件,其中一个是 `cluster-configuration.yaml`。 + +要设置,请在安装 KubeSphere 之前,在 `config-sample.yaml` 或 `cluster-configuration.yaml` 文件中输入上方 `jwtSecret` 所对应的值,并将 `clusterRole` 的值修改为 `member`。 + +```yaml +authentication: + jwtSecret: gfIwilcc0WjNGKJ5DLeksf2JKfcLgTZU +``` + +```yaml +multicluster: + clusterRole: member +``` + +{{< notice note >}} + +如果您在单节点集群上安装 KubeSphere ([All-in-One](../../../quick-start/all-in-one-on-linux/)),则不需要创建 `config-sample.yaml` 文件。这种情况下,您可以在安装 KubeSphere 之后设置。 + +{{}} + +{{}} + +{{}} + +您可以使用 **kubectl** 来获取安装日志以验证状态。运行以下命令,稍等片刻,如果已准备就绪,您将看到成功的日志返回。 + +```bash +kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f +``` + +## 导入成员集群 + +1. 以 `admin` 身份登录 KubeSphere 控制台,转到**集群管理**页面点击**添加集群**。 + +2. 在**导入集群**页面,输入要导入的集群的基本信息。您也可以点击右上角的**编辑模式**以 YAML 格式查看并编辑基本信息。编辑完成后,点击**下一步**。 + +3. 在**连接方式**,选择**直接连接 Kubernetes 集群**,复制 kubeconfig 内容并粘贴至文本框。您也可以点击右上角的**编辑模式**以 YAML 格式编辑的 kubeconfig。 + + {{< notice note >}} + +请确保 server 地址为该 member 集群可访问的 kube-apiserver 地址(主集群的任何节点都能访问)。 + + {{}} + +4. 点击**创建**,然后等待集群初始化完成。 + diff --git a/content/zh/docs/v3.4/multicluster-management/enable-multicluster/retrieve-kubeconfig.md b/content/zh/docs/v3.4/multicluster-management/enable-multicluster/retrieve-kubeconfig.md new file mode 100644 index 000000000..12fe8612c --- /dev/null +++ b/content/zh/docs/v3.4/multicluster-management/enable-multicluster/retrieve-kubeconfig.md @@ -0,0 +1,43 @@ +--- +title: "获取 Kubeconfig" +keywords: 'Kubernetes, KubeSphere, 多集群, 混合云, kubeconfig' +description: '获取通过直接连接导入集群所需的 kubeconfig。' +linkTitle: "获取 Kubeconfig" +weight: 5230 +--- + +如果您使用[直接连接](../direct-connection/)导入,则需要提供 kubeconfig。 + +## 准备工作 + +您有一个 Kubernetes 集群。 + +## 获取 Kubeconfig + +进入 `$HOME/.kube`,检查目录中的文件,通常该目录下存在一个名为 `config` 的文件。使用以下命令获取 kubeconfig 文件: + +```bash +cat $HOME/.kube/config +``` + +```yaml +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJd01EZ3dPREE1hqaVE3NXhwbGFQNUgwSm5ySk5peTBacFh6QWxjYzZlV2JlaXJ1VgpUbmZUVjZRY3pxaVcrS3RBdFZVbkl4MCs2VTgzL3FiKzdINHk2RnA0aVhUaDJxRHJ6Qkd4dG1UeFlGdC9OaFZlCmhqMHhEbHVMOTVUWkRjOUNmSFgzdGZJeVh5WFR3eWpnQ2g1RldxbGwxVS9qVUo2RjBLVVExZ1pRTFp4TVJMV0MKREM2ZFhvUGlnQ3BNaVRPVXl5SVNhWUVjYVNBMEo5VWZmSGd4ditVcXVleTc0cEM2emszS0lOT2tGMkI1MllxeApUa09OT2VkV2hDUExMZkUveVJqeGw1aFhPL1Z4REFaVC9HQ1Y1a0JZN0toNmRhendmUllOa21IQkhDMD0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=hqaVE3NXhwbGFQNUgwSm5ySk5peTBacFh6QWxjYzZlV2JlaXJ1VgpUbmZUVjZRY3pxaVcrS3RBdFZVbkl4MCs2VTgzL3FiKzdINHk2RnA0aVhUaDJxRHJ6Qkd4dG1UeFlGdC9OaFZlCmhqMHhEbHVMOTVUWkRjOUNmSFgzdGZJeVh5WFR3eWpnQ2g1RldxbGwxVS9qVUo2RjBLVVExZ1pRTFp4TVJMV0MKREM2ZFhvUGlnQ3BNaVRPVXl5SVNhWUVjYVNBMEo5VWZmSGd4ditVcXVleTc0cEM2emszS0lOT2tGMkI1MllxeApUa09OT2VkV2hDUExMZkUveVJqeGw1aFhPL1Z4REFaVC9HQ1Y1a0JZN0toNmRhendmUllOa21IQkhDMD0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + server: https://lb.kubesphere.local:6443 + name: cluster.local +contexts: +- context: + cluster: cluster.local + user: kubernetes-admin + name: kubernetes-admin@cluster.local +current-context: kubernetes-admin@cluster.local +kind: Config +preferences: {} +users: +- name: kubernetes-admin + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM4akNDQWRxZ0F3SUJBZ0lJRzd5REpscVdjdTh3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TURBNE1EZ3dPVEkzTXpkYUZ3MHlNVEE0TURnd09USTNNemhhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnsOTJBUkJDNTRSR3BsZ3VmCmw5a0hPd0lEQVFBQm95Y3dKVEFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFEQ2FUTXNBR1Vhdnhrazg0NDZnOGNRQUJpSmk5RTZiREV5TwphRnJubC8reGRzRmgvOTFiMlNpM3ZwaHFkZ2k5bXRYWkhhaWI5dnQ3aXdtSEFwbGQxUkhBU25sMFoxWFh1dkhzCmMzcXVIU0puY3dmc3JKT0I4UG9NRjVnaG10a0dPV3g0M2RHTTNHQnpGTVJ4ZGcrNmttNjRNUGhneXl6NTJjYUoKbzhPajNja1Uzd1NWNkxvempRcFVaUnZHV25qQjEwUXFPWXBtQUk4VCtlZkxKZzhuY0drK3V3UUVTeXBYWExpYwoxWVQ2QkFJeFhEK2tUUU1hOFhjdUhHZzlWRkdsUm9yK1EvY3l0S3RDeHVncFlxQ2xvbHVpckFUUnpsemRXamxYCkVQaHVjRWs2UUdIZEpObjd0M2NwRGkzSUdYYXJFdGxQQmFwck9nSGpkOHZVOStpWXdoQT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=TJBUkJDNTRSR3BsZ3VmCmw5a0hPd0lEQVFBQm95Y3dKVEFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFEQ2FUTXNBR1Vhdnhrazg0NDZnOGNRQUJpSmk5RTZiREV5TwphRnJubC8reGRzRmgvOTFiMlNpM3ZwaHFkZ2k5bXRYWkhhaWI5dnQ3aXdtSEFwbGQxUkhBU25sMFoxWFh1dkhzCmMzcXVIU0puY3dmc3JKT0I4UG9NRjVnaG10a0dPV3g0M2RHTTNHQnpGTVJ4ZGcrNmttNjRNUGhneXl6NTJjYUoKbzhPajNja1Uzd1NWNkxvempRcFVaUnZHV25qQjEwUXFPWXBtQUk4VCtlZkxKZzhuY0drK3V3UUVTeXBYWExpYwoxWVQ2QkFJeFhEK2tUUU1hOFhjdUhHZzlWRkdsUm9yK1EvY3l0S3RDeHVncFlxQ2xvbHVpckFUUnpsemRXamxYCkVQaHVjRWs2UUdIZEpObjd0M2NwRGkzSUdYYXJFdGxQQmFwck9nSGpkOHZVOStpWXdoQT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBeXBLWkdtdmdiSHdNaU9pVU80UHZKZXB2MTJaaE1yRUIxK2xlVnM0dHIzMFNGQ0p1Ck8wc09jL2lUNmFuWEJzUU1XNDF6V3hwV1B5elkzWXlUWEJMTlIrM01pWTl2SFhUeWJ6eitTWnNlTzVENytHL3MKQnR5NkovNGpJb2pZZlRZNTFzUUxyRVJydStmVnNGeUU0U2dXbE1HYWdqV0RIMFltM0VJsOTJBUkJDNTRSR3BsZ3VmCmw5a0hPd0lEQVFBQm95Y3dKVEFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFEQ2FUTXNBR1Vhdnhrazg0NDZnOGNRQUJpSmk5RTZiREV5TwphRnJubC8reGRzRmgvOTFiMlNpM3ZwaHFkZ2k5bXRYWkhhaWI5dnQ3aXdtSEFwbGQxUkhBU25sMFoxWFh1dkhzCmMzcXVIU0puY3dmc3JKT0I4UG9NRjVnaG10a0dPV3g0M2RHTTNHQnpGTVJ4ZGcrNmttNjRNUGhneXl6NTJjYUoKbzhPajNja1Uzd1NWNkxvempRcFVaUnZHV25qQjEwUXFPWXBtQUk4VCtlZkxKZzhuY0drK3V3UUVTeXBYWExpYwoxWVQ2QkFJeFhEK2tUUU1hOFhjdUhHZzlWRkdsUm9yK1EvY3l0S3RDeHVncFlxQ2xvbHVpckFUUnpsemRXamxYCkVQaHVjRWs2UUdIZEpObjd0M2NwRGkzSUdYYXJFdGxQQmFwck9nSGpkOHZVOStpWXdoQT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=Ygo3THE3a2tBMURKNTBld2pMUTNTd1Yxd2p6N2ZjeDYvbzUwRnJnK083dEJMVVdQNTNHaDQ1VjJpUEp2NkdPYk1uCjhIWElmem83cW5XRFQvU20ybW5HbitUdVY4THdLVWFXL2wya3FkRUNnWUVBcS9zRmR1RDk2Z3VoT2ZaRnczcWMKblZGekNGQ3JsMkUvVkdYQy92SmV1WnJLQnFtSUtNZFI3ajdLWS9WRFVlMnJocVd6MFh2Wm9Sa1FoMkdwWkdIawpDd3NzcENKTVl4L0hETTVaWlBvcittb1J6VE5HNHlDNGhTRGJ2VEFaTmV1VTZTK1hzL1JSTDJ6WnUwemNQQXk1CjJJRVgwelFpZ1JzK3VzS3Jkc1FVZXZrQ2dZQUUrQUNWeDJnMC94bmFsMVFJNmJsK3Y2TDJrZVJtVGppcHB4Wm0KS1JEd2xnaXpsWGxsTjhyQmZwSGNiK1ZnZ282anN2eHFrb0pkTEhBLzFDME5IMWVuS1NoUTlpZVFpeWNsZngwdQpKOE1oeW1JM0RBZUg1REJyOG1rZ0pwNnJwUXNBc1paYmVhOHlLTzV5eVdCYTN6VGxOVnQvNDRibGg5alpnTWNMCjNyUXFVUUtCZ1FETVlXdEt2S0hOQllXV0p5enFERnFPbS9qY3Z3andvcURibUZVMlU3UGs2aUdNVldBV3VYZ3cKSm5qQWtES01GN0JXSnJRUjR6RHVoQlhvQVMxWVhiQ2lGd2hTcXVjWGhFSGlwQ3Nib0haVVRtT1pXUUh4Vlp4bQowU1NiRXFZU2MvZHBDZ1BHRk9IaW1FdUVic05kc2JjRmRETDQyODZHb0psQUxCOGc3VWRUZUE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= +``` diff --git a/content/zh/docs/v3.4/multicluster-management/enable-multicluster/update-kubeconfig.md b/content/zh/docs/v3.4/multicluster-management/enable-multicluster/update-kubeconfig.md new file mode 100644 index 000000000..efe91e8e3 --- /dev/null +++ b/content/zh/docs/v3.4/multicluster-management/enable-multicluster/update-kubeconfig.md @@ -0,0 +1,18 @@ +--- +title: "更新 Kubeconfig" +keywords: 'Kubernetes, KubeSphere, 多集群, kubeconfig' +description: '更新成员集群的 kubeconfig。' +linkTitle: "更新 Kubeconfig" +weight: 5240 +--- + +多集群环境下,如果成员集群证书即将过期,系统会提前7天提示您。参考以下步骤更新 kubeconfig。 + +1. 选择 **平台管理 > 集群管理**。 + +2. 在**集群管理**页面,点击成员集群后的按钮,选择 **更新 KubeConfig**。 + +3. 在弹出的**更新 KubeConfig** 对话框,输入新的 kubeconfig,点击 **更新**。 + + + diff --git a/content/zh/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/_index.md b/content/zh/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/_index.md new file mode 100644 index 000000000..8d50b1bbe --- /dev/null +++ b/content/zh/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "导入云托管的 Kubernetes 集群" +weight: 5300 + +_build: + render: false +--- diff --git a/content/zh/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/import-aliyun-ack.md b/content/zh/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/import-aliyun-ack.md new file mode 100644 index 000000000..09d28ce9d --- /dev/null +++ b/content/zh/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/import-aliyun-ack.md @@ -0,0 +1,70 @@ +--- +title: "导入阿里云 ACK 集群" +keywords: 'Kubernetes, KubeSphere, 多集群, 阿里云 ACK' +description: '了解如何导入阿里云 Kubernetes 集群。' +titleLink: "导入阿里云 ACK 集群" +weight: 5310 +--- + +本教程演示如何使用[直接连接](../../../multicluster-management/enable-multicluster/direct-connection/)方法导入阿里云 ACK 集群。如果您想使用代理连接方法,请参考[代理连接](../../../multicluster-management/enable-multicluster/agent-connection/)。 + +## 准备工作 + +- 您需要准备已安装 KubeSphere 的 Kubernetes 集群,并将该集群设置为主集群。有关如何准备主集群的更多信息,请参考[准备主集群](../../../multicluster-management/enable-multicluster/direct-connection/#准备-host-集群)。 +- 您需要准备已安装 KubeSphere 的 ACK 集群,用作成员集群。 + +## 导入 ACK 集群 + +### 步骤 1:准备 ACK 成员集群 + +1. 为了通过主集群管理,您需要使它们之间的 `jwtSecret` 相同。因此,首先需要在主集群上执行以下命令获取 `jwtSecret`。 + + ```bash + kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret + ``` + + 输出类似如下: + + ```yaml + jwtSecret: "QVguGh7qnURywHn2od9IiOX6X8f8wK8g" + ``` + +2. 以 `admin` 身份登录 ACK 集群的 KubeSphere 控制台。点击左上角的**平台管理**,选择**集群管理**。 + +3. 访问**定制资源定义**,在搜索栏输入 `ClusterConfiguration`,然后按下键盘上的**回车键**。点击 **ClusterConfiguration** 访问其详情页。 + +4. 点击右侧的 ,选择**编辑 YAML** 来编辑 `ks-installer`。 + +5. 在 `ks-installer` 的 YAML 文件中,将 `jwtSecret` 的值修改为如上所示的相应值,将 `clusterRole` 的值设置为 `member`。点击**更新**保存更改。 + + ```yaml + authentication: + jwtSecret: QVguGh7qnURywHn2od9IiOX6X8f8wK8g + ``` + + ```yaml + multicluster: + clusterRole: member + ``` + + {{< notice note >}} + + 请确保您使用自己的 `jwtSecret`。您需要等待一段时间使更改生效。 + + {{}} + +### 步骤 2:获取 kubeconfig 文件 + +登录阿里云的控制台。访问**容器服务 - Kubernetes** 下的**集群**,点击您的集群访问其详情页,然后选择**连接信息**选项卡。您可以看到**公网访问**选项卡下的 kubeconfig 文件。复制 kubeconfig 文件的内容。 + +![kubeconfig](/images/docs/v3.3/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-ack/kubeconfig.png) + +### 步骤 3:导入 ACK 成员集群 + +1. 以 `admin` 身份登录主集群的 KubeSphere Web 控制台。点击左上角的**平台管理**,选择**集群管理**。在**集群管理**页面,点击**添加集群**。 + +2. 按需填写基本信息,然后点击**下一步**。 + +3. **连接方式**选择**直接连接 Kubernetes 集群**。填写 ACK 的 kubeconfig,然后点击**创建**。 + +4. 等待集群初始化完成。 diff --git a/content/zh/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/import-aws-eks.md b/content/zh/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/import-aws-eks.md new file mode 100644 index 000000000..7d50d9497 --- /dev/null +++ b/content/zh/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/import-aws-eks.md @@ -0,0 +1,171 @@ +--- +title: "导入 AWS EKS 集群" +keywords: 'Kubernetes, KubeSphere, 多集群, Amazon EKS' +description: '了解如何导入 Amazon Elastic Kubernetes 服务集群。' +titleLink: "导入 AWS EKS 集群" +weight: 5320 +--- + +本教程演示如何使用[直接连接](../../enable-multicluster/direct-connection)方法将 AWS EKS 集群导入 KubeSphere。如果您想使用代理连接方法,请参考[代理连接](../../../multicluster-management/enable-multicluster/agent-connection/)。 + +## 准备工作 + +- 您需要准备一个已安装 KubeSphere 的 Kubernetes 集群,并将其设置为主集群。有关如何准备主集群的更多信息,请参考[准备主集群](../../../multicluster-management/enable-multicluster/direct-connection/#准备-host-集群)。 +- 您需要准备一个 EKS 集群,用作成员集群。 + +## 导入 EKS 集群 + +### 步骤 1:在 EKS 集群上部署 KubeSphere + +您需要首先在 EKS 集群上部署 KubeSphere。有关如何在 EKS 上部署 KubeSphere 的更多信息,请参考[在 AWS EKS 上部署 KubeSphere](../../../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks/#在-eks-上安装-kubesphere)。 + +### 步骤 2:准备 EKS 成员集群 + +1. 为了通过主集群管理,您需要使它们之间的 `jwtSecret` 相同。首先,需要在主集群上执行以下命令获取 `jwtSecret`。 + + ```bash + kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret + ``` + + 输出类似如下: + + ```yaml + jwtSecret: "QVguGh7qnURywHn2od9IiOX6X8f8wK8g" + ``` + +2. 以 `admin` 身份登录 EKS 集群的 KubeSphere Web 控制台。点击左上角的**平台管理**,选择**集群管理**。 + +3. 访问**定制资源定义**,在搜索栏输入 `ClusterConfiguration`,然后按下键盘上的**回车键**。点击 **ClusterConfiguration** 访问其详情页。 + +4. 点击右侧的 ,选择**编辑 YAML** 来编辑 `ks-installer`。 + +5. 在 `ks-installer` 的 YAML 文件中,将 `jwtSecret` 的值改为如上所示的相应值,将 `clusterRole` 的值改为 `member`。点击**更新**保存更改。 + + ```yaml + authentication: + jwtSecret: QVguGh7qnURywHn2od9IiOX6X8f8wK8g + ``` + + ```yaml + multicluster: + clusterRole: member + ``` + + {{< notice note >}} + + 请确保使用您自己的 `jwtSecret`。您需要等待一段时间使更改生效。 + + {{}} + +### 步骤 3:创建新的 kubeconfig 文件 + +1. [Amazon EKS](https://docs.aws.amazon.com/zh_cn/eks/index.html) 不像标准的 kubeadm 集群那样提供内置的 kubeconfig 文件。但您可以参考此[文档](https://docs.aws.amazon.com/zh_cn/eks/latest/userguide/create-kubeconfig.html)创建 kubeconfig 文件。生成的 kubeconfig 文件类似如下: + + ```yaml + apiVersion: v1 + clusters: + - cluster: + server: + certificate-authority-data: + name: kubernetes + contexts: + - context: + cluster: kubernetes + user: aws + name: aws + current-context: aws + kind: Config + preferences: {} + users: + - name: aws + user: + exec: + apiVersion: client.authentication.k8s.io/v1alpha1 + command: aws + args: + - "eks" + - "get-token" + - "--cluster-name" + - "" + # - "--role" + # - "" + # env: + # - name: AWS_PROFILE + # value: "" + ``` + + 但是,自动生成的 kubeconfig 文件要求使用此 kubeconfig 的每台计算机均安装有 `aws` 命令(aws CLI 工具)。 + +2. 在本地计算机上运行以下命令,获得由 KubeSphere 创建的 ServiceAccount `kubesphere` 的令牌,该令牌对集群具有集群管理员访问权限,并将用作新的 kubeconfig 令牌。 + + ```bash + TOKEN=$(kubectl -n kubesphere-system get secret $(kubectl -n kubesphere-system get sa kubesphere -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 -d) + kubectl config set-credentials kubesphere --token=${TOKEN} + kubectl config set-context --current --user=kubesphere + ``` + +3. 运行以下命令获取新的 kubeconfig 文件: + + ```bash + cat ~/.kube/config + ``` + + 输出类似如下,可以看到已插入新用户 `kubesphere` 并已将其设置为当前集群环境上下文 (current-context) 用户: + + ```yaml + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZ...S0tLQo= + server: https://*.sk1.cn-north-1.eks.amazonaws.com.cn + name: arn:aws-cn:eks:cn-north-1:660450875567:cluster/EKS-LUSLVMT6 + contexts: + - context: + cluster: arn:aws-cn:eks:cn-north-1:660450875567:cluster/EKS-LUSLVMT6 + user: kubesphere + name: arn:aws-cn:eks:cn-north-1:660450875567:cluster/EKS-LUSLVMT6 + current-context: arn:aws-cn:eks:cn-north-1:660450875567:cluster/EKS-LUSLVMT6 + kind: Config + preferences: {} + users: + - name: arn:aws-cn:eks:cn-north-1:660450875567:cluster/EKS-LUSLVMT6 + user: + exec: + apiVersion: client.authentication.k8s.io/v1alpha1 + args: + - --region + - cn-north-1 + - eks + - get-token + - --cluster-name + - EKS-LUSLVMT6 + command: aws + env: null + - name: kubesphere + user: + token: eyJhbGciOiJSUzI1NiIsImtpZCI6ImlCRHF4SlE5a0JFNDlSM2xKWnY1Vkt5NTJrcDNqRS1Ta25IYkg1akhNRmsifQ.eyJpc3M................9KQtFULW544G-FBwURd6ArjgQ3Ay6NHYWZe3gWCHLmag9gF-hnzxequ7oN0LiJrA-al1qGeQv-8eiOFqX3RPCQgbybmix8qw5U6f-Rwvb47-xA + ``` + + 您可以运行以下命令检查新的 kubeconfig 是否有权限访问 EKS 集群。 + + ```shell + kubectl get nodes + ``` + + 输出类似如下: + + ``` + NAME STATUS ROLES AGE VERSION + ip-10-0-47-38.cn-north-1.compute.internal Ready 11h v1.18.8-eks-7c9bda + ip-10-0-8-148.cn-north-1.compute.internal Ready 78m v1.18.8-eks-7c9bda + ``` + +### 步骤 4:导入 EKS 成员集群 + +1. 以 `admin` 身份登录主集群的 KubeSphere Web 控制台。点击左上角的**平台管理**,然后选择**集群管理**。在**集群管理**页面,点击**添加集群**。 + +2. 按需输入基本信息,然后点击**下一步**。 + +3. **连接方式**选择**直接连接 Kubernetes 集群**。填写 EKS 的新 kubeconfig,然后点击**创建**。 + +4. 等待集群初始化完成。 diff --git a/content/zh/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/import-gke.md b/content/zh/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/import-gke.md new file mode 100644 index 000000000..a58c41d85 --- /dev/null +++ b/content/zh/docs/v3.4/multicluster-management/import-cloud-hosted-k8s/import-gke.md @@ -0,0 +1,116 @@ +--- +title: "导入 Google GKE 集群" +keywords: 'Kubernetes, KubeSphere, 多集群, Google GKE' +description: '了解如何导入 Google Kubernetes Engine 集群。' +titleLink: "导入 Google GKE 集群" +weight: 5330 +--- + +本教程演示如何使用[直接连接](../../../multicluster-management/enable-multicluster/direct-connection/)方法导入 GKE 集群。如果您想使用代理连接方法,请参考[代理连接](../../../multicluster-management/enable-multicluster/agent-connection/)。 + +## 准备工作 + +- 您需要准备一个已安装 KubeSphere 的 Kubernetes 集群,并将该集群设置为主集群。有关如何准备主集群的更多信息,请参考[准备主集群](../../../multicluster-management/enable-multicluster/direct-connection/#准备-host-集群)。 +- 您需要准备一个 GKE 集群,用作成员集群。 + +## 导入 GKE 集群 + +### 步骤 1:在 GKE 集群上部署 KubeSphere + +您需要首先在 GKE 集群上部署 KubeSphere。有关如何在 GKE 上部署 KubeSphere 的更多信息,请参考[在 Google GKE 上部署 KubeSphere](../../../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke/)。 + +### 步骤 2:准备 GKE 成员集群 + +1. 为了通过主集群管理,您需要使它们之间的 `jwtSecret` 相同。首先,在主集群上执行以下命令获取 `jwtSecret`。 + + ```bash + kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret + ``` + + 输出类似如下: + + ```yaml + jwtSecret: "QVguGh7qnURywHn2od9IiOX6X8f8wK8g" + ``` + +2. 以 `admin` 身份登录 GKE 的 KubeSphere Web 控制台。点击左上角的**平台管理**,选择**集群管理**。 + +3. 访问**定制资源定义**,在搜索栏中输入 `ClusterConfiguration`,然后按下键盘上的**回车键**。点击 **ClusterConfiguration** 访问其详情页。 + +4. 点击右侧的 ,选择**编辑 YAML** 来编辑 `ks-installer`。 + +5. 在 `ks-installer` 的 YAML 文件中,将 `jwtSecret` 的值改为如上所示的相应值,将 `clusterRole` 的值改为 `member`。 + + ```yaml + authentication: + jwtSecret: QVguGh7qnURywHn2od9IiOX6X8f8wK8g + ``` + + ```yaml + multicluster: + clusterRole: member + ``` + + {{< notice note >}} + + 请确保使用自己的 `jwtSecret`。您需要等待一段时间使更改生效。 + + {{}} + +### 步骤 3:创建新的 kubeconfig 文件 + +1. 在 GKE Cloud Shell 终端运行以下命令: + + ```bash + TOKEN=$(kubectl -n kubesphere-system get secret $(kubectl -n kubesphere-system get sa kubesphere -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 -d) + kubectl config set-credentials kubesphere --token=${TOKEN} + kubectl config set-context --current --user=kubesphere + ``` + +2. 运行以下命令获取新的 kubeconfig 文件: + + ```bash + cat ~/.kube/config + ``` + + 输出类似如下: + + ```yaml + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURLekNDQWhPZ0F3SUJBZ0lSQUtPRUlDeFhyWEdSbjVQS0dlRXNkYzR3RFFZSktvWklodmNOQVFFTEJRQXcKTHpFdE1Dc0dBMVVFQXhNa1pqVTBNVFpoTlRVdFpEZzFZaTAwWkdZNUxXSTVNR1V0TkdNeE0yRTBPR1ZpWW1VMwpNQjRYRFRJeE1ETXhNVEl5TXpBMU0xb1hEVEkyTURNeE1ESXpNekExTTFvd0x6RXRNQ3NHQTFVRUF4TWtaalUwCk1UWmhOVFV0WkRnMVlpMDBaR1k1TFdJNU1HVXROR014TTJFME9HVmlZbVUzTUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdkVHVGtKRjZLVEl3QktlbXNYd3dPSnhtU3RrMDlKdXh4Z1grM0dTMwpoeThVQm5RWEo1d3VIZmFGNHNWcDFzdGZEV2JOZitESHNxaC9MV3RxQk5iSlNCU1ppTC96V3V5OUZNeFZMS2czCjVLdnNnM2drdUpVaFVuK0tMUUFPdTNUWHFaZ2tTejE1SzFOSU9qYm1HZGVWSm5KQTd6NTF2ZkJTTStzQWhGWTgKejJPUHo4aCtqTlJseDAvV0UzTHZEUUMvSkV4WnRCRGFuVFU0anpHMHR2NGk1OVVQN2lWbnlwRHk0dkFkWm5mbgowZncwVnplUXJqT2JuQjdYQTZuUFhseXZubzErclRqakFIMUdtU053c1IwcDRzcEViZ0lXQTNhMmJzeUN5dEJsCjVOdmJKZkVpSTFoTmFOZ3hoSDJNenlOUWVhYXZVa29MdDdPN0xqYzVFWlo4cFFJREFRQUJvMEl3UURBT0JnTlYKSFE4QkFmOEVCQU1DQWdRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVUVyVkJrc3MydGV0Qgp6ZWhoRi92bGdVMlJiM2N3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUdEZVBVa3I1bDB2OTlyMHZsKy9WZjYrCitBanVNNFoyOURtVXFHVC80OHBaR1RoaDlsZDQxUGZKNjl4eXFvME1wUlIyYmJuTTRCL2NVT1VlTE5VMlV4VWUKSGRlYk1oQUp4Qy9Uaks2SHpmeExkTVdzbzVSeVAydWZEOFZob2ZaQnlBVWczajdrTFgyRGNPd1lzNXNrenZ0LwpuVUlhQURLaXhtcFlSSWJ6MUxjQmVHbWROZ21iZ0hTa3MrYUxUTE5NdDhDQTBnSExhMER6ODhYR1psSi80VmJzCjNaWVVXMVExY01IUHd5NnAwV2kwQkpQeXNaV3hZdFJyV3JFWUhZNVZIanZhUG90S3J4Y2NQMUlrNGJzVU1ZZ0wKaTdSaHlYdmJHc0pKK1lNc3hmalU5bm5XYVhLdXM5ZHl0WG1kRGw1R0hNU3VOeTdKYjIwcU5RQkxhWHFkVmY0PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://130.211.231.87 + name: gke_grand-icon-307205_us-central1-c_cluster-3 + contexts: + - context: + cluster: gke_grand-icon-307205_us-central1-c_cluster-3 + user: gke_grand-icon-307205_us-central1-c_cluster-3 + name: gke_grand-icon-307205_us-central1-c_cluster-3 + current-context: gke_grand-icon-307205_us-central1-c_cluster-3 + kind: Config + preferences: {} + users: + - name: gke_grand-icon-307205_us-central1-c_cluster-3 + user: + auth-provider: + config: + cmd-args: config config-helper --format=json + cmd-path: /usr/lib/google-cloud-sdk/bin/gcloud + expiry-key: '{.credential.token_expiry}' + token-key: '{.credential.access_token}' + name: gcp + - name: kubesphere + user: + token: eyJhbGciOiJSUzI1NiIsImtpZCI6InNjOFpIb3RrY3U3bGNRSV9NWV8tSlJzUHJ4Y2xnMDZpY3hhc1BoVy0xTGsifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlc3BoZXJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlc3BoZXJlLXRva2VuLXpocmJ3Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6Imt1YmVzcGhlcmUiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiIyMGFmZGI1Ny01MTBkLTRjZDgtYTAwYS1hNDQzYTViNGM0M2MiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZXNwaGVyZS1zeXN0ZW06a3ViZXNwaGVyZSJ9.ic6LaS5rEQ4tXt_lwp7U_C8rioweP-ZdDjlIZq91GOw9d6s5htqSMQfTeVlwTl2Bv04w3M3_pCkvRzMD0lHg3mkhhhP_4VU0LIo4XeYWKvWRoPR2kymLyskAB2Khg29qIPh5ipsOmGL9VOzD52O2eLtt_c6tn-vUDmI_Zw985zH3DHwUYhppGM8uNovHawr8nwZoem27XtxqyBkqXGDD38WANizyvnPBI845YqfYPY5PINPYc9bQBFfgCovqMZajwwhcvPqS6IpG1Qv8TX2lpuJIK0LLjiKaHoATGvHLHdAZxe_zgAC2cT_9Ars3HIN4vzaSX0f-xP--AcRgKVSY9g + ``` + +### 步骤 4:导入 GKE 成员集群 + +1. 以 `admin` 身份登录主集群的 KubeSphere Web 控制台。点击左上角的**平台管理**,选择**集群管理**。在**集群管理**页面,点击**添加集群**。 + +2. 按需输入基本信息,然后点击**下一步**。 + +3. **连接方式**选择**直接连接 Kubernetes 集群**。填写 GKE 的新 kubeconfig,然后点击**创建**。 + +4. 等待集群初始化完成。 diff --git a/content/zh/docs/v3.4/multicluster-management/introduction/_index.md b/content/zh/docs/v3.4/multicluster-management/introduction/_index.md new file mode 100644 index 000000000..08a69aacd --- /dev/null +++ b/content/zh/docs/v3.4/multicluster-management/introduction/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "介绍" +weight: 5100 + +_build: + render: false +--- diff --git a/content/zh/docs/v3.4/multicluster-management/introduction/kubefed-in-kubesphere.md b/content/zh/docs/v3.4/multicluster-management/introduction/kubefed-in-kubesphere.md new file mode 100644 index 000000000..57b21b6b5 --- /dev/null +++ b/content/zh/docs/v3.4/multicluster-management/introduction/kubefed-in-kubesphere.md @@ -0,0 +1,49 @@ +--- +title: "KubeSphere 联邦" +keywords: 'Kubernetes, KubeSphere, 联邦, 多集群, 混合云' +description: '了解 KubeSphere 中的 Kubernetes 联邦的基本概念,包括成员集群和主集群。' +linkTitle: "KubeSphere 联邦" +weight: 5120 +--- + +多集群功能与多个集群之间的网络连接有关。因此,了解集群的拓扑关系很重要。 + +## 多集群架构如何运作 + +在使用 KubeSphere 的中央控制平面管理多个集群之前,您需要创建一个主集群。主集群实际上是一个启用了多集群功能的 KubeSphere 集群,您可以使用它提供的控制平面统一管理。成员集群是没有中央控制平面的普通 KubeSphere 集群。也就是说,拥有必要权限的租户(通常是集群管理员)能够通过主集群访问控制平面,管理所有成员集群,例如查看和编辑成员集群上面的资源。反过来,如果您单独访问任意成员集群的 Web 控制台,您将无法查看其他集群的任何资源。 + +只能有一个主集群存在,而多个成员集群可以同时存在。在多集群架构中,主集群和成员集群之间的网络可以[直接连接](../../enable-multicluster/direct-connection/),或者通过[代理连接](../../enable-multicluster/agent-connection/)。成员集群之间的网络可以设置在完全隔离的环境中。 + +如果您是使用通过 kubeadm 搭建的自建 Kubernetes 集群,请参阅[离线安装](../../../installing-on-kubernetes/on-prem-kubernetes/install-ks-on-linux-airgapped/)在您的 Kubernetes 集群上安装 KubeSphere,然后通过直接连接或者代理连接来启用 KubeSphere 多集群管理功能。 + +![Kubernetes 联邦](/images/docs/v3.3/zh-cn/multicluster-management/introduction/kubesphere-federation/kubesphere-federation.png) + +## 厂商无锁定 + +KubeSphere 拥有功能强大的中央控制平面,您可以统一纳管部署在任意环境或云厂商上的 KubeSphere 集群。 + +## 资源要求 + +启用多集群管理前,请确保您的环境中有足够的资源。 + +| 命名空间 | kube-federation-system | kubesphere-system | +| -------- | ---------------------- | ----------------- | +| 子组件 | 2 x controller-manager | Tower | +| CPU 请求 | 100 m | 100 m | +| CPU 限制 | 500 m | 500 m | +| 内存请求 | 64 MiB | 128 MiB | +| 内存限制 | 512 MiB | 256 MiB | +| 安装 | 可选 | 可选 | + +{{< notice note >}} + +- CPU 和内存的资源请求和限制均指单个副本的要求。 +- 多集群功能启用后,主集群上会安装 Tower 和 controller-manager。如果您使用[代理连接](../../../multicluster-management/enable-multicluster/agent-connection/),成员集群仅需要 Tower。如果您使用[直接连接](../../../multicluster-management/enable-multicluster/direct-connection/),成员集群无需额外组件。 + +{{}} + +## 在多集群架构中使用应用商店 + +与 KubeSphere 中的其他组件不同,[KubeSphere 应用商店](../../../pluggable-components/app-store/)是所有集群(包括主集群和成员集群)的全局应用程序池。您只需要在主集群上启用应用商店,便可以直接在成员集群上使用应用商店的相关功能(无论成员集群是否启用应用商店),例如[应用模板](../../../project-user-guide/application/app-template/)和[应用仓库](../../../workspace-administration/app-repository/import-helm-repository/)。 + +但是,如果只在成员集群上启用应用商店而没有在主集群上启用,您将无法在多集群架构中的任何集群上使用应用商店。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/multicluster-management/introduction/overview.md b/content/zh/docs/v3.4/multicluster-management/introduction/overview.md new file mode 100644 index 000000000..2f23eaf32 --- /dev/null +++ b/content/zh/docs/v3.4/multicluster-management/introduction/overview.md @@ -0,0 +1,15 @@ +--- +title: "概述" +keywords: 'Kubernetes, KubeSphere, 多集群, 混合云' +description: '对多集群管理有个基本的了解,例如多集群管理的常见用例,以及 KubeSphere 可以通过多集群功能带来的好处。' +linkTitle: "概述" +weight: 5110 +--- + +如今,各种组织跨不同的云厂商或者在不同的基础设施上运行和管理多个 Kubernetes 集群的做法非常普遍。由于每个 Kubernetes 集群都是一个相对独立的单元,上游社区正在艰难地研究和开发多集群管理解决方案。即便如此,Kubernetes 集群联邦(Kubernetes Cluster Federation,简称 [KubeFed](https://github.com/kubernetes-sigs/kubefed))可能是其中一种可行的方法。 + +多集群管理最常见的使用场景包括服务流量负载均衡、隔离开发和生产环境、解耦数据处理和数据存储、跨云备份和灾难恢复、灵活分配计算资源、跨区域服务的低延迟访问以及避免厂商锁定等。 + +开发 KubeSphere 旨在解决多集群和多云管理(包括上述使用场景)的难题,为用户提供统一的控制平面,将应用程序及其副本分发到位于公有云和本地环境的多个集群。KubeSphere 还拥有跨多个集群的丰富可观测性,包括集中监控、日志系统、事件和审计日志等。 + +![多集群概览](/images/docs/v3.3/zh-cn/multicluster-management/introduction/overview/multi-cluster-overview.png) diff --git a/content/zh/docs/v3.4/multicluster-management/unbind-cluster.md b/content/zh/docs/v3.4/multicluster-management/unbind-cluster.md new file mode 100644 index 000000000..04275cad1 --- /dev/null +++ b/content/zh/docs/v3.4/multicluster-management/unbind-cluster.md @@ -0,0 +1,61 @@ +--- +title: "移除成员集群" +keywords: 'Kubernetes, KubeSphere, 多集群, 混合云' +description: '了解如何从 KubeSphere 的集群池中移除成员集群。' +linkTitle: "移除成员集群" +weight: 5500 +--- + +本教程演示如何在 KubeSphere 控制台移除成员集群。 + +## 准备工作 + +- 您已经启用多集群管理。 +- 您需要有一个拥有**集群管理**权限角色的用户。例如,您可以直接以 `admin` 身份登录控制台,或者创建一个拥有该权限的新角色并授予至一个用户。 + +## 移除成员集群 + +你可以使用以下任一方法移除成员集群: + +**方法 1** + +1. 点击左上角的**平台管理**,选择**集群管理**。 + +2. 在**成员集群**区域,点击要从中央控制平面移除的集群右侧的 ,点击**移除集群**。 + +3. 在弹出的**移除集群**对话框,请仔细阅读风险提示信息。如果您仍然想移除成员集群,输入集群名称,点击**确定**以移除成员集群。 + +**方法 2** + +1. 点击左上角的**平台管理**,选择**集群管理**。 + +2. 在**成员集群**区域,请点击要从中央控制平面移除的集群。 + +3. 点击**集群设置** > **基本信息**。 + +4. 在**集群信息**右侧,点击**管理** > **移除集群**。 + +5. 在弹出的**移除集群**对话框,请仔细阅读风险提示信息。如果您仍然想移除成员集群,输入集群名称,点击**确定**以移除成员集群。 + + {{< notice warning >}} + + * 集群被移除后,集群中原有的资源不会被自动清除。 + + * 集群被移除后,集群中原有的多集群配置数据不会被自动清除,卸载 KubeSphere 或删除关联资源时会导致用户数据丢失。 + + {{}} + +6. 执行以下命令手动清理被移除集群中的多集群配置数据: + + ```bash + for ns in $(kubectl get ns --field-selector status.phase!=Terminating -o jsonpath='{.items[*].metadata.name}'); do kubectl label ns $ns kubesphere.io/workspace- && kubectl patch ns $ns --type merge -p '{"metadata":{"ownerReferences":[]}}'; done + ``` + +## 移除不健康的集群 + +在某些情况下,您无法按照上述步骤移除集群。例如,您导入了一个凭证错误的集群,并且无法访问**集群设置**。在这种情况下,请执行以下命令来移除不健康的集群: + +```bash +kubectl delete cluster +``` + diff --git a/content/zh/docs/v3.4/pluggable-components/_index.md b/content/zh/docs/v3.4/pluggable-components/_index.md new file mode 100644 index 000000000..af4385c7e --- /dev/null +++ b/content/zh/docs/v3.4/pluggable-components/_index.md @@ -0,0 +1,13 @@ +--- +title: "启用可插拔组件" +description: "启用 KubeSphere 的可插拔组件" +layout: "second" + +linkTitle: "启用可插拔组件" + +weight: 6000 + +icon: "/images/docs/v3.3/docs.svg" +--- + +本章详细演示如何在安装前和安装后启用 KubeSphere 不同组件的步骤,以便您可以充分利用该容器平台为您的业务服务。 diff --git a/content/zh/docs/v3.4/pluggable-components/alerting.md b/content/zh/docs/v3.4/pluggable-components/alerting.md new file mode 100644 index 000000000..3270f3658 --- /dev/null +++ b/content/zh/docs/v3.4/pluggable-components/alerting.md @@ -0,0 +1,97 @@ +--- +title: "KubeSphere 告警系统" +keywords: "Kubernetes, alertmanager, KubeSphere, 告警" +description: "了解如何启用告警功能,以便在潜在问题对您的业务造成影响之前提前识别这些问题。" +linkTitle: "KubeSphere 告警系统" +weight: 6600 +--- + +告警是可观测性的重要组成部分,与监控和日志密切相关。KubeSphere 中的告警系统与其主动式故障通知 (Proactive Failure Notification) 系统相结合,使用户可以基于告警策略了解感兴趣的活动。当达到某个指标的预定义阈值时,会向预先配置的收件人发出告警。因此,您需要预先配置通知方式,包括邮件、Slack、钉钉、企业微信和 Webhook。有了功能强大的告警和通知系统,您就可以迅速发现并提前解决潜在问题,避免您的业务受影响。 + +## 在安装前启用告警系统 + +### 在 Linux 上安装 + +当您在 Linux 上安装多节点 KubeSphere 时,需要创建一个配置文件,该文件列出了所有 KubeSphere 组件。 + +1. [在 Linux 上安装 KubeSphere](../../installing-on-linux/introduction/multioverview/) 时,您需要创建一个默认文件 `config-sample.yaml`。通过执行以下命令修改该文件: + + ```bash + vi config-sample.yaml + ``` + + {{< notice note >}} +如果您采用 [All-in-One 安装](../../quick-start/all-in-one-on-linux/),则不需要创建 `config-sample.yaml` 文件,因为可以直接创建集群。一般来说,All-in-One 模式针对那些刚接触 KubeSphere 并希望熟悉系统的用户。如果您想在该模式下启用告警系统(例如用于测试),请参考[下面的部分](#在安装后启用告警系统),查看如何在安装后启用告警系统。 + {{}} + +2. 在该文件中,搜索 `alerting` 并将 `enabled` 的 `false` 更改为 `true`。完成后保存文件。 + + ```yaml + alerting: + enabled: true # 将“false”更改为“true”。 + ``` + +3. 执行以下命令使用该配置文件创建集群: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +### 在 Kubernetes 上安装 + +当您[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 时,需要先在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) 文件中启用告警系统。 + +1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) 文件并进行编辑。 + + ```bash + vi cluster-configuration.yaml + ``` + +2. 在 `cluster-configuration.yaml` 文件中,搜索 `alerting`,将 `enabled` 的 `false` 更改为 `true` 以启用告警系统。完成后保存文件。 + + ```yaml + alerting: + enabled: true # 将“false”更改为“true”。 + ``` + +3. 执行以下命令开始安装: + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f cluster-configuration.yaml + ``` + +## 在安装后启用告警系统 + +1. 使用 `admin` 用户登录控制台。点击左上角的**平台管理**,选择**集群管理**。 + +2. 点击**定制资源定义**,在搜索栏中输入 `clusterconfiguration`。点击结果查看其详细页面。 + + {{< notice info >}} +定制资源定义 (CRD) 允许用户在不新增 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些定制资源。 + {{}} + +3. 在**自定义资源**中,点击 `ks-installer` 右侧的 ,选择**编辑 YAML**。 + +4. 在该 YAML 文件中,搜寻到 `alerting`,将 `enabled` 的 `false` 更改为 `true`。完成后,点击右下角的**确定**,保存配置。 + + ```yaml + alerting: + enabled: true # 将“false”更改为“true”。 + ``` + +5. 在 kubectl 中执行以下命令检查安装过程: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + {{< notice note >}} + +您可以通过点击控制台右下角的 找到 kubectl 工具。 + {{}} + +## 验证组件的安装 + +如果您可以在**集群管理**页面看到**告警消息**和**告警策略**,则说明安装成功。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/pluggable-components/app-store.md b/content/zh/docs/v3.4/pluggable-components/app-store.md new file mode 100644 index 000000000..c02f8eeb1 --- /dev/null +++ b/content/zh/docs/v3.4/pluggable-components/app-store.md @@ -0,0 +1,118 @@ +--- +title: "KubeSphere 应用商店" +keywords: "Kubernetes, KubeSphere, App Store, OpenPitrix" +description: "了解如何启用应用商店,一个可以在内部实现数据和应用共享、并制定应用交付流程的行业标准的组件。" +linkTitle: "KubeSphere 应用商店" +weight: 6200 +--- + +作为一个开源的、以应用为中心的容器平台,KubeSphere 在 [OpenPitrix](https://github.com/openpitrix/openpitrix) 的基础上,为用户提供了一个基于 Helm 的应用商店,用于应用生命周期管理。OpenPitrix 是一个开源的 Web 平台,用于打包、部署和管理不同类型的应用。KubeSphere 应用商店让 ISV、开发者和用户能够在一站式服务中只需点击几下就可以上传、测试、安装和发布应用。 + +对内,KubeSphere 应用商店可以作为不同团队共享数据、中间件和办公应用的场所。对外,有利于设立构建和交付的行业标准。启用该功能后,您可以通过应用模板添加更多应用。 + +有关更多信息,请参阅[应用商店](../../application-store/)。 + +## 在安装前启用应用商店 + +### 在 Linux 上安装 + +当您在 Linux 上安装多节点 KubeSphere 时,首先需要创建一个配置文件,该文件列出了所有 KubeSphere 组件。 + +1. [在 Linux 上安装 KubeSphere](../../installing-on-linux/introduction/multioverview/) 时,您需要创建一个默认文件 `config-sample.yaml`,通过执行以下命令修改该文件: + + ```bash + vi config-sample.yaml + ``` + + {{< notice note >}} +如果您采用 [All-in-One 安装](../../quick-start/all-in-one-on-linux/),则不需要创建 `config-sample.yaml` 文件,因为可以直接创建集群。一般来说,All-in-One 模式是为那些刚接触 KubeSphere 并希望熟悉系统的用户而准备的。如果您想在这个模式下启用应用商店(比如用于测试),请参考[下面的部分](#在安装后启用应用商店),查看如何在安装后启用应用商店。 + {{}} + +2. 在该文件中,搜索 `openpitrix`,并将 `enabled` 的 `false` 改为 `true`,完成后保存文件。 + + ```yaml + openpitrix: + store: + enabled: true # 将“false”更改为“true”。 + ``` + +3. 执行以下命令使用该配置文件创建集群: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +### 在 Kubernetes 上安装 + +当您[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 时,需要先在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) 文件中启用应用商店。 + +1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) 文件,执行以下命令打开并编辑该文件。 + + ```bash + vi cluster-configuration.yaml + ``` + +2. 在 `cluster-configuration.yaml` 文件中,搜索 `openpitrix`,并将 `enabled` 的 `false` 改为 `true`。完成后保存文件。 + + ```yaml + openpitrix: + store: + enabled: true # 将“false”更改为“true”。 + ``` + +3. 执行以下命令开始安装: + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f cluster-configuration.yaml + ``` + +## 在安装后启用应用商店 + +1. 使用 `admin` 用户登录控制台,点击左上角的**平台管理**,选择**集群管理**。 + +2. 点击**定制资源定义**,在搜索栏中输入 `clusterconfiguration`,点击结果查看其详细页面。 + + {{< notice info >}} +定制资源定义(CRD)允许用户在不增加额外 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些定制资源。 + {{}} + +3. 在**自定义资源**中,点击 `ks-installer` 右侧的 ,选择**编辑 YAML**。 + +4. 在该 YAML 文件中,搜索 `openpitrix`,将 `enabled` 的 `false` 改为 `true`。完成后,点击右下角的**确定**,保存配置。 + + ```yaml + openpitrix: + store: + enabled: true # 将“false”更改为“true”。 + ``` + +5. 在 kubectl 中执行以下命令检查安装过程: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + {{< notice note >}} + +您可以通过点击控制台右下角的 找到 kubectl 工具。 + {{}} + +## 验证组件的安装 + +在您登录控制台后,如果您能看到页面左上角的**应用商店**以及其中的应用,则说明安装成功。 + +{{< notice note >}} + +- 您可以在不登录控制台的情况下直接访问 `<节点 IP 地址>:30880/apps` 进入应用商店。 +- KubeSphere 3.2.x 中的应用商店启用后,**OpenPitrix** 页签不会显示在**系统组件**页面。 + +{{}} + +## 在多集群架构中使用应用商店 + +[在多集群架构中](../../multicluster-management/introduction/kubefed-in-kubesphere/),一个主集群管理所有成员集群。与 KubeSphere 中的其他组件不同,应用商店是所有集群(包括主集群和成员集群)的全局应用程序池。您只需要在主集群上启用应用商店,便可以直接在成员集群上使用应用商店的相关功能(无论成员集群是否启用应用商店),例如[应用模板](../../project-user-guide/application/app-template/)和[应用仓库](../../workspace-administration/app-repository/import-helm-repository/)。 + +但是,如果只在成员集群上启用应用商店而没有在主集群上启用,您将无法在多集群架构中的任何集群上使用应用商店。 + diff --git a/content/zh/docs/v3.4/pluggable-components/auditing-logs.md b/content/zh/docs/v3.4/pluggable-components/auditing-logs.md new file mode 100644 index 000000000..4a7bb9fd8 --- /dev/null +++ b/content/zh/docs/v3.4/pluggable-components/auditing-logs.md @@ -0,0 +1,182 @@ +--- +title: "KubeSphere 审计日志" +keywords: "Kubernetes, 审计, KubeSphere, 日志" +description: "了解如何启用审计来记录平台事件和活动。" +linkTitle: "KubeSphere 审计日志" +weight: 6700 +--- + +KubeSphere 审计日志系统提供了一套与安全相关并按时间顺序排列的记录,按顺序记录了与单个用户、管理人员或系统其他组件相关的活动。对 KubeSphere 的每个请求都会生成一个事件,然后写入 Webhook,并根据一定的规则进行处理。 + +有关更多信息,请参见[审计日志查询](../../toolbox/auditing/auditing-query/)。 + +## 在安装前启用审计日志 + +### 在 Linux 上安装 + +当您在 Linux 上安装多节点 KubeSphere 时,需要创建一个配置文件,该文件列出了所有 KubeSphere 组件。 + +1. [在 Linux 上安装 KubeSphere](../../installing-on-linux/introduction/multioverview/) 时,您需要创建一个默认文件 `config-sample.yaml`。执行以下命令修改该文件: + + ```bash + vi config-sample.yaml + ``` + + {{< notice note >}} +如果您采用 [All-in-One 安装](../../quick-start/all-in-one-on-linux/),则不需要创建 `config-sample.yaml` 文件,因为可以直接创建集群。一般来说,All-in-One 模式是为那些刚接触 KubeSphere 并希望熟悉系统的用户而准备的,如果您想在该模式下启用审计日志(例如用于测试),请参考[下面的部分](#在安装后启用审计日志),查看如何在安装后启用审计功能。 + {{}} + +2. 在该文件中,搜索 `auditing`,并将 `enabled` 的 `false` 改为 `true`。完成后保存文件。 + + ```yaml + auditing: + enabled: true # 将“false”更改为“true”。 + ``` + + {{< notice note >}} +默认情况下,如果启用了审计功能,KubeKey 将安装内置 Elasticsearch。对于生产环境,如果您想启用审计功能,强烈建议在 `config-sample.yaml` 中设置以下值,尤其是 `externalElasticsearchHost` 和 `externalElasticsearchPort`。在安装前提供以下信息后,KubeKey 将直接对接您的外部 Elasticsearch,不再安装内置 Elasticsearch。 + {{}} + + ```yaml + es: # Storage backend for logging, tracing, events and auditing. + elasticsearchMasterReplicas: 1 # The total number of master nodes. Even numbers are not allowed. + elasticsearchDataReplicas: 1 # The total number of data nodes. + elasticsearchMasterVolumeSize: 4Gi # The volume size of Elasticsearch master nodes. + elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. + logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. + elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. + externalElasticsearchHost: # The Host of external Elasticsearch. + externalElasticsearchPort: # The port of external Elasticsearch. + ``` + +3. 执行以下命令使用该配置文件创建集群: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +### 在 Kubernetes 上安装 + +当您[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 时,需要先在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) 文件中启用审计功能。 + +1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) 文件,执行以下命令打开并编辑该文件: + + ```bash + vi cluster-configuration.yaml + ``` + +2. 在 `cluster-configuration.yaml` 文件中,搜索 `auditing`,并将 `enabled` 的 `false` 改为 `true`。完成后保存文件。 + + ```yaml + auditing: + enabled: true # 将“false”更改为“true”。 + ``` + + {{< notice note >}} +默认情况下,如果启用了审计功能,ks-installer 会安装内置 Elasticsearch。对于生产环境,如果您想启用审计功能,强烈建议在 `cluster-configuration.yaml` 中设置以下值,尤其是 `externalElasticsearchHost` 和 `externalElasticsearchPort`。在安装前提供以下信息后,ks-installer 将直接对接您的外部 Elasticsearch,不再安装内置 Elasticsearch。 + {{}} + + ```yaml + es: # Storage backend for logging, tracing, events and auditing. + elasticsearchMasterReplicas: 1 # The total number of master nodes. Even numbers are not allowed. + elasticsearchDataReplicas: 1 # The total number of data nodes. + elasticsearchMasterVolumeSize: 4Gi # The volume size of Elasticsearch master nodes. + elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. + logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. + elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. + externalElasticsearchHost: # The Host of external Elasticsearch. + externalElasticsearchPort: # The port of external Elasticsearch. + ``` + +3. 执行以下命令开始安装: + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f cluster-configuration.yaml + ``` + +## 在安装后启用审计日志 + +1. 以 `admin` 用户登录控制台。点击左上角的**平台管理**,选择**集群管理**。 + +2. 点击**定制资源定义**,在搜索栏中输入 `clusterconfiguration`,点击搜索结果查看其详细页面。 + + {{< notice info >}} +定制资源定义 (CRD) 允许用户在不新增 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些定制资源。 + {{}} + +3. 在**自定义资源**中,点击 `ks-installer` 右侧的 ,选择**编辑 YAML**。 + +4. 在该 YAML 文件中,搜索 `auditing`,将 `enabled` 的 `false` 改为 `true`。完成后,点击右下角的**确定**,保存配置。 + + ```yaml + auditing: + enabled: true # 将“false”更改为“true”。 + ``` + + {{< notice note >}} +默认情况下,如果启用了审计功能,将安装内置 Elasticsearch。对于生产环境,如果您想启用审计功能,强烈建议在该 YAML 文件中设置以下值,尤其是 `externalElasticsearchHost` 和 `externalElasticsearchPort`。提供以下信息后,KubeSphere 将直接对接您的外部 Elasticsearch,不再安装内置 Elasticsearch。 + {{}} + + ```yaml + es: # Storage backend for logging, tracing, events and auditing. + elasticsearchMasterReplicas: 1 # The total number of master nodes. Even numbers are not allowed. + elasticsearchDataReplicas: 1 # The total number of data nodes. + elasticsearchMasterVolumeSize: 4Gi # The volume size of Elasticsearch master nodes. + elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. + logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. + elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. + externalElasticsearchHost: # The Host of external Elasticsearch. + externalElasticsearchPort: # The port of external Elasticsearch. + ``` + +5. 在 kubectl 中执行以下命令检查安装过程: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + {{< notice note >}} + +您可以点击控制台右下角的 找到 kubectl 工具。 + {{}} + +## 验证组件的安装 + +{{< tabs >}} + +{{< tab "在仪表板中验证组件的安装" >}} + +验证您可以使用右下角**工具箱**中的**审计日志查询**功能。 + +{{}} + +{{< tab "通过 kubectl 验证组件的安装" >}} + +执行以下命令来检查容器组的状态: + +```bash +kubectl get pod -n kubesphere-logging-system +``` + +如果组件运行成功,输出结果如下: + +```yaml +NAME READY STATUS RESTARTS AGE +elasticsearch-logging-curator-elasticsearch-curator-159872n9g9g 0/1 Completed 0 2d10h +elasticsearch-logging-curator-elasticsearch-curator-159880tzb7x 0/1 Completed 0 34h +elasticsearch-logging-curator-elasticsearch-curator-1598898q8w7 0/1 Completed 0 10h +elasticsearch-logging-data-0 1/1 Running 1 2d20h +elasticsearch-logging-data-1 1/1 Running 1 2d20h +elasticsearch-logging-discovery-0 1/1 Running 1 2d20h +fluent-bit-6v5fs 1/1 Running 1 2d20h +fluentbit-operator-5bf7687b88-44mhq 1/1 Running 1 2d20h +kube-auditing-operator-7574bd6f96-p4jvv 1/1 Running 1 2d20h +kube-auditing-webhook-deploy-6dfb46bb6c-hkhmx 1/1 Running 1 2d20h +kube-auditing-webhook-deploy-6dfb46bb6c-jp77q 1/1 Running 1 2d20h +``` + +{{}} + +{{}} diff --git a/content/zh/docs/v3.4/pluggable-components/devops.md b/content/zh/docs/v3.4/pluggable-components/devops.md new file mode 100644 index 000000000..2bb3a7c35 --- /dev/null +++ b/content/zh/docs/v3.4/pluggable-components/devops.md @@ -0,0 +1,127 @@ +--- +title: "KubeSphere DevOps 系统" +keywords: "Kubernetes, Jenkins, KubeSphere, DevOps, cicd" +description: "了解如何启用 DevOps 系统来进一步解放您的开发人员,让他们专注于代码编写。" +linkTitle: "KubeSphere DevOps" +weight: 6300 +--- + +基于 [Jenkins](https://jenkins.io/) 的 KubeSphere DevOps 系统是专为 Kubernetes 中的 CI/CD 工作流设计的,它提供了一站式的解决方案,帮助开发和运维团队用非常简单的方式构建、测试和发布应用到 Kubernetes。它还具有插件管理、[Binary-to-Image (B2I)](../../project-user-guide/image-builder/binary-to-image/)、[Source-to-Image (S2I)](../../project-user-guide/image-builder/source-to-image/)、代码依赖缓存、代码质量分析、流水线日志等功能。 + +DevOps 系统为用户提供了一个自动化的环境,应用可以自动发布到同一个平台。它还兼容第三方私有镜像仓库(如 Harbor)和代码库(如 GitLab/GitHub/SVN/BitBucket)。它为用户提供了全面的、可视化的 CI/CD 流水线,打造了极佳的用户体验,而且这种兼容性强的流水线能力在离线环境中非常有用。 + +有关更多信息,请参见 [DevOps 用户指南](../../devops-user-guide/)。 + +## 在安装前启用 DevOps + +### 在 Linux 上安装 + +当您在 Linux 上安装多节点 KubeSphere 时,首先需要创建一个配置文件,该文件列出了所有 KubeSphere 组件。 + +1. [在 Linux 上安装 KubeSphere](../../installing-on-linux/introduction/multioverview/) 时,您需要创建一个默认文件 `config-sample.yaml`,通过执行以下命令修改该文件: + + ```bash + vi config-sample.yaml + ``` + + {{< notice note >}} +如果您采用 [All-in-one 安装](../../quick-start/all-in-one-on-linux/),则不需要创建 `config-sample.yaml` 文件,因为可以直接创建集群。一般来说,All-in-one 模式是为那些刚接触 KubeSphere 并希望熟悉系统的用户而准备的,如果您想在这个模式下启用 DevOps(比如用于测试),请参考[下面的部分](#在安装后启用-devops),查看如何在安装后启用 DevOps。 + {{}} + +2. 在该文件中,搜索 `devops`,并将 `enabled` 的 `false `改为 `true`,完成后保存文件。 + + ```yaml + devops: + enabled: true # 将“false”更改为“true”。 + ``` + +3. 执行以下命令使用该配置文件创建集群: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +### 在 Kubernetes 上安装 + +当您[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 时,需要先在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) 文件中启用 DevOps。 + +1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) 文件,执行以下命令打开并编辑该文件: + + ```bash + vi cluster-configuration.yaml + ``` + +2. 在 `cluster-configuration.yaml` 文件中,搜索 `devops`,并将 `enabled` 的 `false` 改为 `true`。完成后保存文件。 + + ```yaml + devops: + enabled: true # 将“false”更改为“true”。 + ``` + +3. 执行以下命令开始安装: + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f cluster-configuration.yaml + ``` + +## 在安装后启用 DevOps + +1. 以 `admin` 用户登录控制台,点击左上角的**平台管理**,选择**集群管理**。 + +2. 点击**定制资源定义**,在搜索栏中输入 `clusterconfiguration`,点击搜索结果查看其详细页面。 + + {{< notice info >}} +定制资源定义(CRD)允许用户在不新增 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些定制资源。 + {{}} + +3. 在**自定义资源**中,点击 `ks-installer` 右侧的 ,选择**编辑 YAML**。 + +4. 在该 YAML 文件中,搜索 `devops`,将 `enabled` 的 `false` 改为 `true`。完成后,点击右下角的**确定**,保存配置。 + + ```yaml + devops: + enabled: true # 将“false”更改为“true”。 + ``` + +5. 在 kubectl 中执行以下命令检查安装过程: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + {{< notice note >}} + +您可以点击控制台右下角的 找到 kubectl 工具。 + {{}} + +## 验证组件的安装 + +{{< tabs >}} + +{{< tab "在仪表板中验证组件的安装" >}} + +进入**系统组件**,检查是否 **DevOps** 标签页中的所有组件都处于**健康**状态。如果是,组件安装成功。 + +{{}} + +{{< tab "通过 kubectl 验证组件的安装" >}} + +执行以下命令来检查容器组的状态: + +```bash +kubectl get pod -n kubesphere-devops-system +``` + +如果组件运行成功,输出结果如下: + +```bash +NAME READY STATUS RESTARTS AGE +devops-jenkins-5cbbfbb975-hjnll 1/1 Running 0 40m +s2ioperator-0 1/1 Running 0 41m +``` + +{{}} + +{{}} diff --git a/content/zh/docs/v3.4/pluggable-components/events.md b/content/zh/docs/v3.4/pluggable-components/events.md new file mode 100644 index 000000000..de647fd54 --- /dev/null +++ b/content/zh/docs/v3.4/pluggable-components/events.md @@ -0,0 +1,191 @@ +--- +title: "KubeSphere 事件系统" +keywords: "Kubernetes, 事件, KubeSphere, k8s-events" +description: "了解如何启用 KubeSphere 事件模块来跟踪平台上发生的所有事件。" +linkTitle: "KubeSphere 事件系统" +weight: 6500 +--- + +KubeSphere 事件系统使用户能够跟踪集群内部发生的事件,例如节点调度状态和镜像拉取结果。这些事件会被准确记录下来,并在 Web 控制台中显示具体的原因、状态和信息。要查询事件,用户可以快速启动 Web 工具箱,在搜索栏中输入相关信息,并有不同的过滤器(如关键字和项目)可供选择。事件也可以归档到第三方工具,例如 Elasticsearch、Kafka 或 Fluentd。 + +有关更多信息,请参见[事件查询](../../toolbox/events-query/)。 + +## 在安装前启用事件系统 + +### 在 Linux 上安装 + +当您在 Linux 上安装多节点 KubeSphere 时,需要创建一个配置文件,该文件列出了所有 KubeSphere 组件。 + +1. [在 Linux 上安装 KubeSphere](../../installing-on-linux/introduction/multioverview/) 时,您需要创建一个默认文件 `config-sample.yaml`。执行以下命令修改该文件: + + ```bash + vi config-sample.yaml + ``` + + {{< notice note >}} + +如果您采用 [All-in-One 安装](../../quick-start/all-in-one-on-linux/),则不需要创建 `config-sample.yaml` 文件,因为可以直接创建集群。一般来说,All-in-One 模式是为那些刚接触 KubeSphere 并希望熟悉系统的用户而准备的。如果您想在该模式下启用事件系统(例如用于测试),请参考[下面的部分](#在安装后启用事件系统),查看[如何在安装后启用](#在安装后启用事件系统)事件系统。 + +{{}} + +2. 在该文件中,搜寻到 `events`,并将 `enabled` 的 `false` 改为 `true`。完成后保存文件。 + + ```yaml + events: + enabled: true # 将“false”更改为“true”。 + ``` + + {{< notice note >}} +默认情况下,如果启用了事件系统,KubeKey 将安装内置 Elasticsearch。对于生产环境,如果您想启用事件系统,强烈建议在 `config-sample.yaml` 中设置以下值,尤其是 `externalElasticsearchHost` 和 `externalElasticsearchPort`。在安装前提供以下信息后,KubeKey 将直接对接您的外部 Elasticsearch,不再安装内置 Elasticsearch。 + {{}} + + ```yaml + es: # Storage backend for logging, tracing, events and auditing. + elasticsearchMasterReplicas: 1 # The total number of master nodes. Even numbers are not allowed. + elasticsearchDataReplicas: 1 # The total number of data nodes. + elasticsearchMasterVolumeSize: 4Gi # The volume size of Elasticsearch master nodes. + elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. + logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. + elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. + externalElasticsearchHost: # The Host of external Elasticsearch. + externalElasticsearchPort: # The port of external Elasticsearch. + ``` + +3. 使用该配置文件创建集群: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +### 在 Kubernetes 上安装 + +当您[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 时,需要先在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) 文件中启用事件系统。 + +1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) 文件,然后打开并开始编辑。 + + ```bash + vi cluster-configuration.yaml + ``` + +2. 在 `cluster-configuration.yaml` 文件中,搜索 `events`,并将 `enabled` 的 `false` 改为 `true`以启用事件系统。完成后保存文件。 + + ```yaml + events: + enabled: true # 将“false”更改为“true”。 + ``` + + {{< notice note >}} +对于生产环境,如果您想启用事件系统,强烈建议在 `cluster-configuration.yaml` 中设置以下值,尤其是 `externalElasticsearchHost` 和 `externalElasticsearchPort`。在安装前提供以下信息后,ks-installer 将直接对接您的外部 Elasticsearch,不再安装内置 Elasticsearch。 + {{}} + + ```yaml + es: # Storage backend for logging, tracing, events and auditing. + elasticsearchMasterReplicas: 1 # The total number of master nodes. Even numbers are not allowed. + elasticsearchDataReplicas: 1 # The total number of data nodes. + elasticsearchMasterVolumeSize: 4Gi # The volume size of Elasticsearch master nodes. + elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. + logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. + elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. + externalElasticsearchHost: # The Host of external Elasticsearch. + externalElasticsearchPort: # The port of external Elasticsearch. + ``` + +3. 执行以下命令开始安装: + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f cluster-configuration.yaml + ``` + +## 在安装后启用事件系统 + +1. 使用 `admin` 用户登录控制台。点击左上角的**平台管理**,选择**集群管理**。 + +2. 点击**定制资源定义**,在搜索栏中输入 `clusterconfiguration`。点击结果查看其详情页。 + + {{< notice info >}} + +定制资源定义 (CRD) 允许用户在不新增 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些定制资源。 + +{{}} + +3. 在**自定义资源**中,点击 `ks-installer` 右侧的 ,选择**编辑 YAML**。 + +4. 在该配置文件中,搜索 `events`,将 `enabled` 的 `false` 改为 `true`。完成后,点击右下角的**确定**,保存配置。 + + ```yaml + events: + enabled: true # 将“false”更改为“true”。 + ``` + + {{< notice note >}} + +默认情况下,如果启用了事件系统,将会安装内置 Elasticsearch。对于生产环境,如果您想启用事件系统,强烈建议在该 YAML 文件中设置以下值,尤其是 `externalElasticsearchHost` 和 `externalElasticsearchPort`。在文件中提供以下信息后,KubeSphere 将直接对接您的外部 Elasticsearch,不再安装内置 Elasticsearch。 + {{}} + + ```yaml + es: # Storage backend for logging, tracing, events and auditing. + elasticsearchMasterReplicas: 1 # The total number of master nodes. Even numbers are not allowed. + elasticsearchDataReplicas: 1 # The total number of data nodes. + elasticsearchMasterVolumeSize: 4Gi # The volume size of Elasticsearch master nodes. + elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. + logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. + elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. + externalElasticsearchHost: # The Host of external Elasticsearch. + externalElasticsearchPort: # The port of external Elasticsearch. + ``` + +5. 在 kubectl 中执行以下命令检查安装过程: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + {{< notice note >}} + +您可以通过点击控制台右下角的 找到 kubectl 工具。 + +{{}} + +## 验证组件的安装 + +{{< tabs >}} + +{{< tab "在仪表板中验证组件的安装" >}} + +验证您可以使用右下角**工具箱**中的**资源事件查询**功能。 + +{{}} + +{{< tab "通过 kubectl 验证组件的安装" >}} + +执行以下命令来检查容器组的状态: + +```bash +kubectl get pod -n kubesphere-logging-system +``` + +如果组件运行成功,输出结果如下: + +```bash +NAME READY STATUS RESTARTS AGE +elasticsearch-logging-data-0 1/1 Running 0 155m +elasticsearch-logging-data-1 1/1 Running 0 154m +elasticsearch-logging-discovery-0 1/1 Running 0 155m +fluent-bit-bsw6p 1/1 Running 0 108m +fluent-bit-smb65 1/1 Running 0 108m +fluent-bit-zdz8b 1/1 Running 0 108m +fluentbit-operator-9b69495b-bbx54 1/1 Running 0 109m +ks-events-exporter-5cb959c74b-gx4hw 2/2 Running 0 7m55s +ks-events-operator-7d46fcccc9-4mdzv 1/1 Running 0 8m +ks-events-ruler-8445457946-cl529 2/2 Running 0 7m55s +ks-events-ruler-8445457946-gzlm9 2/2 Running 0 7m55s +logsidecar-injector-deploy-667c6c9579-cs4t6 2/2 Running 0 106m +logsidecar-injector-deploy-667c6c9579-klnmf 2/2 Running 0 106m +``` + +{{}} + +{{}} + diff --git a/content/zh/docs/v3.4/pluggable-components/kubeedge.md b/content/zh/docs/v3.4/pluggable-components/kubeedge.md new file mode 100644 index 000000000..6e19a6be5 --- /dev/null +++ b/content/zh/docs/v3.4/pluggable-components/kubeedge.md @@ -0,0 +1,185 @@ +--- +title: "KubeEdge" +keywords: "Kubernetes, KubeSphere, KubeEdge" +description: "了解如何启用 KubeEdge 为您的集群添加边缘节点。" +linkTitle: "KubeEdge" +weight: 6930 +--- + +[KubeEdge](https://kubeedge.io/zh/) 是一个开源系统,用于将容器化应用程序编排功能扩展到边缘的主机。KubeEdge 支持多个边缘协议,旨在对部署于云端和边端的应用程序与资源等进行统一管理。 + +KubeEdge 的组件在两个单独的位置运行——云上和边缘节点上。在云上运行的组件统称为 CloudCore,包括 Controller 和 Cloud Hub。Cloud Hub 作为接收边缘节点发送请求的网关,Controller 则作为编排器。在边缘节点上运行的组件统称为 EdgeCore,包括 EdgeHub,EdgeMesh,MetadataManager 和 DeviceTwin。有关更多信息,请参见 [KubeEdge 网站](https://kubeedge.io/zh/)。 + +启用 KubeEdge 后,您可以[为集群添加边缘节点](../../installing-on-linux/cluster-operation/add-edge-nodes/)并在这些节点上部署工作负载。 + +![kubeedge_arch](/images/docs/v3.3/zh-cn/enable-pluggable-components/kubeedge/kubeedge_arch.png) + +## 安装前启用 KubeEdge + +### 在 Linux 上安装 + +在 Linux 上多节点安装 KubeSphere 时,您需要创建一个配置文件,该文件会列出所有 KubeSphere 组件。 + +1. [在 Linux 上安装 KubeSphere](../../installing-on-linux/introduction/multioverview/) 时,您需要创建一个默认文件 `config-sample.yaml`。执行以下命令修改该文件: + + ```bash + vi config-sample.yaml + ``` + + {{< notice note >}} + 如果您采用 [All-in-one 安装](../../quick-start/all-in-one-on-linux/),则不需要创建 `config-sample.yaml` 文件,因为可以直接创建集群。一般来说,All-in-one 模式针对那些刚接触 KubeSphere 并希望熟悉系统的用户。如果您想在该模式下启用 KubeEdge(比如用于测试),请参考[下面的部分](#在安装后启用-kubeedge),查看如何在安装后启用 KubeEdge。 + + {{}} + +2. 在该文件中,搜索 `edgeruntime` 和 `kubeedge`,然后将它们 `enabled` 值从 `false` 更改为 `true` 以便开启所有 KubeEdge 组件。完成后保存文件。 + + ```yaml + edgeruntime: # Add edge nodes to your cluster and deploy workloads on edge nodes. + enabled: false + kubeedge: # kubeedge configurations + enabled: false + cloudCore: + cloudHub: + advertiseAddress: # At least a public IP address or an IP address which can be accessed by edge nodes must be provided. + - "" # Note that once KubeEdge is enabled, CloudCore will malfunction if the address is not provided. + service: + cloudhubNodePort: "30000" + cloudhubQuicNodePort: "30001" + cloudhubHttpsNodePort: "30002" + cloudstreamNodePort: "30003" + tunnelNodePort: "30004" + # resources: {} + # hostNetWork: false + ``` + +3. 将 `kubeedge.cloudCore.cloudHub.advertiseAddress` 的值设置为集群的公共 IP 地址或边缘节点可以访问的 IP 地址。编辑完成后保存文件。 + +4. 使用该配置文件创建一个集群: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +### 在 Kubernetes 上安装 + +当您[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 时,需要先在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) 文件中启用 KubeEdge。 + +1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) 文件并进行编辑。 + + ```bash + vi cluster-configuration.yaml + ``` + +2. 在本地 `cluster-configuration.yaml` 文件中,搜索 `edgeruntime` 和 `kubeedge`,然后将它们 `enabled` 值从 `false` 更改为 `true` 以便开启所有 KubeEdge 组件。完成后保存文件。 + + ```yaml + edgeruntime: # Add edge nodes to your cluster and deploy workloads on edge nodes. + enabled: false + kubeedge: # kubeedge configurations + enabled: false + cloudCore: + cloudHub: + advertiseAddress: # At least a public IP address or an IP address which can be accessed by edge nodes must be provided. + - "" # Note that once KubeEdge is enabled, CloudCore will malfunction if the address is not provided. + service: + cloudhubNodePort: "30000" + cloudhubQuicNodePort: "30001" + cloudhubHttpsNodePort: "30002" + cloudstreamNodePort: "30003" + tunnelNodePort: "30004" + # resources: {} + # hostNetWork: false + ``` + +3. 将 `kubeedge.cloudCore.cloudHub.advertiseAddress` 的值设置为集群的公共 IP 地址或边缘节点可以访问的 IP 地址。 + +4. 执行以下命令开始安装: + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f cluster-configuration.yaml + ``` + +## 在安装后启用 KubeEdge + +1. 使用 `admin` 用户登录控制台。点击左上角的**平台管理**,然后选择**集群管理**。 + +2. 点击**定制资源定义**,然后在搜索栏中输入 `clusterconfiguration`。点击搜索结果查看其详情页。 + + {{< notice info >}} +定制资源定义(CRD)允许用户在不新增 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些定制资源。 + {{}} + +3. 在**自定义资源**中,点击 `ks-installer` 右侧的 ,然后选择**编辑 YAML**。 + +4. 在该配置文件中,搜索 `edgeruntime` 和 `kubeedge`,然后将它们 `enabled` 值从 `false` 更改为 `true` 以便开启所有 KubeEdge 组件。完成后保存文件。 + + ```yaml + edgeruntime: # Add edge nodes to your cluster and deploy workloads on edge nodes. + enabled: false + kubeedge: # kubeedge configurations + enabled: false + cloudCore: + cloudHub: + advertiseAddress: # At least a public IP address or an IP address which can be accessed by edge nodes must be provided. + - "" # Note that once KubeEdge is enabled, CloudCore will malfunction if the address is not provided. + service: + cloudhubNodePort: "30000" + cloudhubQuicNodePort: "30001" + cloudhubHttpsNodePort: "30002" + cloudstreamNodePort: "30003" + tunnelNodePort: "30004" + # resources: {} + # hostNetWork: false + ``` + +5. 将 `kubeedge.cloudCore.cloudHub.advertiseAddress` 的值设置为集群的公共 IP 地址或边缘节点可以访问的 IP 地址。完成后,点击右下角的**确定**保存配置。 + +6. 在 kubectl 中执行以下命令检查安装过程: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + {{< notice note >}} + +您可以通过点击控制台右下角的 来找到 kubectl 工具。 + {{}} + +## 验证组件的安装 + +{{< tabs >}} + +{{< tab "在仪表板中验证组件的安装" >}} + +在**集群管理**页面,您可以看到**节点**下出现**边缘节点**板块。 + +{{}} + +{{< tab "通过 Kubectl 验证组件的安装" >}} + +执行以下命令来检查容器组的状态: + +```bash +kubectl get pod -n kubeedge +``` + +如果组件运行成功,输出结果可能如下: + +```bash +NAME READY STATUS RESTARTS AGE +cloudcore-5f994c9dfd-r4gpq 1/1 Running 0 5h13m +edge-watcher-controller-manager-bdfb8bdb5-xqfbk 2/2 Running 0 5h13m +iptables-hphgf 1/1 Running 0 5h13m +``` + +{{}} + +{{}} + +{{< notice note >}} + +如果您在启用 KubeEdge 时未设置 `kubeedge.cloudCore.cloudHub.advertiseAddress`,则 CloudCore 无法正常运行 (`CrashLoopBackOff`)。在这种情况下,请运行 `kubectl -n kubeedge edit cm cloudcore` 添加集群的公共 IP 地址或边缘节点可以访问的 IP 地址。 + +{{}} diff --git a/content/zh/docs/v3.4/pluggable-components/logging.md b/content/zh/docs/v3.4/pluggable-components/logging.md new file mode 100644 index 000000000..a3fef71e0 --- /dev/null +++ b/content/zh/docs/v3.4/pluggable-components/logging.md @@ -0,0 +1,199 @@ +--- +title: "KubeSphere 日志系统" +keywords: "Kubernetes, Elasticsearch, KubeSphere, 日志系统, 日志" +description: "了解如何启用日志,利用基于租户的系统进行日志收集、查询和管理。" +linkTitle: "KubeSphere 日志系统" +weight: 6400 +--- + +KubeSphere 为日志收集、查询和管理提供了一个强大的、全面的、易于使用的日志系统。它涵盖了不同层级的日志,包括租户、基础设施资源和应用。用户可以从项目、工作负载、容器组和关键字等不同维度对日志进行搜索。与 Kibana 相比,KubeSphere 基于租户的日志系统中,每个租户只能查看自己的日志,从而可以在租户之间提供更好的隔离性和安全性。除了 KubeSphere 自身的日志系统,该容器平台还允许用户添加第三方日志收集器,如 Elasticsearch、Kafka 和 Fluentd。 + +有关更多信息,请参见[日志查询](../../toolbox/log-query/)。 + +## 在安装前启用日志系统 + +### 在 Linux 上安装 + +当您在 Linux 上安装 KubeSphere 时,首先需要创建一个配置文件,该文件列出了所有 KubeSphere 组件。 + +1. [在 Linux 上安装 KubeSphere](../../installing-on-linux/introduction/multioverview/) 时,您需要创建一个默认文件 `config-sample.yaml`。通过执行以下命令修改该文件: + + ```bash + vi config-sample.yaml + ``` + + {{< notice note >}} + +- 如果您采用 [All-in-one 安装](../../quick-start/all-in-one-on-linux/),则不需要创建 `config-sample.yaml` 文件,因为可以直接创建集群。一般来说,All-in-one 模式是为那些刚接触 KubeSphere 并希望熟悉系统的用户而准备的。如果您想在这个模式下启用日志系统(比如用于测试),请参考[下面的部分](#在安装后启用日志系统),查看如何在安装后启用日志系统。 + +- 如果您采用[多节点安装](../../installing-on-linux/introduction/multioverview/),并且使用符号链接作为 Docker 根目录,请确保所有节点遵循完全相同的符号链接。日志代理在守护进程集中部署到节点上。容器日志路径的任何差异都可能导致该节点的收集失败。 + +{{}} + +2. 在该文件中,搜寻到 `logging`,并将 `enabled` 的 `false` 改为 `true`。完成后保存文件。 + + ```yaml + logging: + enabled: true # 将“false”更改为“true”。 + containerruntime: docker + ``` + + {{< notice info >}}若使用 containerd 作为容器运行时,请将 `containerruntime` 字段的值更改为 `containerd`。如果您从低版本升级至 KubeSphere 3.3,则启用 KubeSphere 日志系统时必须在 `logging` 字段下手动添加 `containerruntime` 字段。 + + {{}} + + {{< notice note >}}默认情况下,如果启用了日志系统,KubeKey 将安装内置 Elasticsearch。对于生产环境,如果您想启用日志系统,强烈建议在 `config-sample.yaml` 中设置以下值,尤其是 `externalElasticsearchHost` 和 `externalElasticsearchPort`。在安装前提供以下信息后,KubeKey 将直接对接您的外部 Elasticsearch,不再安装内置 Elasticsearch。 + {{}} + + ```yaml + es: # Storage backend for logging, tracing, events and auditing. + elasticsearchMasterReplicas: 1 # The total number of master nodes. Even numbers are not allowed. + elasticsearchDataReplicas: 1 # The total number of data nodes. + elasticsearchMasterVolumeSize: 4Gi # The volume size of Elasticsearch master nodes. + elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. + logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. + elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. + externalElasticsearchHost: # The Host of external Elasticsearch. + externalElasticsearchPort: # The port of external Elasticsearch. + ``` + +3. 使用该配置文件创建集群: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +### 在 Kubernetes 上安装 + +当您[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 时,需要先在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) 文件中启用日志系统。 + +1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) 文件,然后打开并开始编辑。 + + ```bash + vi cluster-configuration.yaml + ``` + +2. 在 `cluster-configuration.yaml` 文件中,搜索 `logging`,并将 `enabled` 的 `false` 改为 `true`,以启用日志系统。完成后保存文件。 + + ```yaml + logging: + enabled: true # 将“false”更改为“true”。 + containerruntime: docker + ``` + + {{< notice info >}}若使用 containerd 作为容器运行时,请将 `.logging.containerruntime` 字段的值更改为 `containerd`。如果您从低版本升级至 KubeSphere 3.3,则启用 KubeSphere 日志系统时必须在 `logging` 字段下手动添加 `containerruntime` 字段。 + + {{}} + + {{< notice note >}}默认情况下,如果启用了日志系统,ks-installer 将安装内置 Elasticsearch。对于生产环境,如果您想启用日志系统,强烈建议在 `cluster-configuration.yaml` 中设置以下值,尤其是 `externalElasticsearchHost` 和 `externalElasticsearchPort`。在安装前提供以下信息后,ks-installer 将直接对接您的外部 Elasticsearch,不再安装内置 Elasticsearch。 + {{}} + + ```yaml + es: # Storage backend for logging, tracing, events and auditing. + elasticsearchMasterReplicas: 1 # The total number of master nodes. Even numbers are not allowed. + elasticsearchDataReplicas: 1 # The total number of data nodes. + elasticsearchMasterVolumeSize: 4Gi # The volume size of Elasticsearch master nodes. + elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. + logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. + elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. + externalElasticsearchHost: # The Host of external Elasticsearch. + externalElasticsearchPort: # The port of external Elasticsearch. + ``` + +3. 执行以下命令开始安装: + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f cluster-configuration.yaml + ``` + +## 在安装后启用日志系统 + +1. 以 `admin` 用户登录控制台。点击左上角的**平台管理**,选择**集群管理**。 + +2. 点击**定制资源定义**,在搜索栏中输入 `clusterconfiguration`。点击结果查看其详细页面。 + + {{< notice info >}} + +定制资源定义 (CRD) 允许用户在不增加额外 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些定制资源。 + +{{}} + +3. 在**自定义资源**中,点击 `ks-installer` 右侧的 ,选择**编辑 YAML**。 + +4. 在该 YAML 文件中,搜索 `logging`,将 `enabled` 的 `false` 改为 `true`。完成后,点击右下角的**确定**以保存配置。 + + ```yaml + logging: + enabled: true # 将“false”更改为“true”。 + containerruntime: docker + ``` + + {{< notice info >}}若使用 containerd 作为容器运行时,请将 `.logging.containerruntime` 字段的值更改为 `containerd`。如果您从低版本升级至 KubeSphere 3.3,则启用 KubeSphere 日志系统时必须在 `logging` 字段下手动添加 `containerruntime` 字段。 + + {{}} + + {{< notice note >}}默认情况下,如果启用了日志系统,将会安装内置 Elasticsearch。对于生产环境,如果您想启用日志系统,强烈建议在该 YAML 文件中设置以下值,尤其是 `externalElasticsearchHost` 和 `externalElasticsearchPort`。在文件中提供以下信息后,KubeSphere 将直接对接您的外部 Elasticsearch,不再安装内置 Elasticsearch。 + {{}} + + ```yaml + es: # Storage backend for logging, tracing, events and auditing. + elasticsearchMasterReplicas: 1 # The total number of master nodes. Even numbers are not allowed. + elasticsearchDataReplicas: 1 # The total number of data nodes. + elasticsearchMasterVolumeSize: 4Gi # The volume size of Elasticsearch master nodes. + elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. + logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. + elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. + externalElasticsearchHost: # The Host of external Elasticsearch. + externalElasticsearchPort: # The port of external Elasticsearch. + ``` + +5. 在 kubectl 中执行以下命令检查安装过程: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + {{< notice note >}} + +您可以通过点击控制台右下角的 找到 kubectl 工具。 + +{{}} + +## 验证组件的安装 + +{{< tabs >}} + +{{< tab "在仪表板中验证组件的安装" >}} + +进入**系统组件**,检查**日志**标签页中的所有组件都处于**健康**状态。如果是,组件安装成功。 + +{{}} + +{{< tab "通过 kubectl 验证组件的安装" >}} + +执行以下命令来检查容器组的状态: + +```bash +kubectl get pod -n kubesphere-logging-system +``` + +如果组件运行成功,输出结果如下: + +```bash +NAME READY STATUS RESTARTS AGE +elasticsearch-logging-data-0 1/1 Running 0 87m +elasticsearch-logging-data-1 1/1 Running 0 85m +elasticsearch-logging-discovery-0 1/1 Running 0 87m +fluent-bit-bsw6p 1/1 Running 0 40m +fluent-bit-smb65 1/1 Running 0 40m +fluent-bit-zdz8b 1/1 Running 0 40m +fluentbit-operator-9b69495b-bbx54 1/1 Running 0 40m +logsidecar-injector-deploy-667c6c9579-cs4t6 2/2 Running 0 38m +logsidecar-injector-deploy-667c6c9579-klnmf 2/2 Running 0 38m +``` + +{{}} + +{{}} diff --git a/content/zh/docs/v3.4/pluggable-components/metrics-server.md b/content/zh/docs/v3.4/pluggable-components/metrics-server.md new file mode 100644 index 000000000..f804ab4ae --- /dev/null +++ b/content/zh/docs/v3.4/pluggable-components/metrics-server.md @@ -0,0 +1,114 @@ +--- +title: "Metrics Server" +keywords: "Kubernetes, KubeSphere, Metrics Server" +description: "了解如何启用 Metrics Server 以使用 HPA 对部署进行自动伸缩。" +linkTitle: "Metrics Server" +weight: 6910 +--- + +KubeSphere 支持用于[部署](../../project-user-guide/application-workloads/deployments/)的容器组(Pod)弹性伸缩程序 (HPA)。在 KubeSphere 中,Metrics Server 控制着 HPA 是否启用。您可以根据不同类型的指标(例如 CPU 和内存使用率,以及最小和最大副本数),使用 HPA 对象对部署 (Deployment) 自动伸缩。通过这种方式,HPA 可以帮助确保您的应用程序在不同情况下都能平稳、一致地运行。 + +## 在安装前启用 Metrics Server + +### 在 Linux 上安装 + +当您在 Linux 上安装多节点 KubeSphere 时,首先需要创建一个配置文件,该文件列出了所有 KubeSphere 组件。 + +1. [在 Linux 上安装 KubeSphere](../../installing-on-linux/introduction/multioverview/) 时,您需要创建一个默认文件 `config-sample.yaml`,通过执行以下命令修改该文件: + + ```bash + vi config-sample.yaml + ``` + + {{< notice note >}} + 如果您采用 [All-in-One 安装](../../quick-start/all-in-one-on-linux/),则不需要创建 `config-sample.yaml` 文件,因为可以直接创建集群。一般来说,All-in-One 模式是为那些刚接触 KubeSphere 并希望熟悉系统的用户而准备的。如果您想在这个模式下启用 Metrics Server(比如用于测试),请参考[下面的部分](#在安装后启用应用商店),查看如何在安装后启用 Metrics Server。 + {{}} + +2. 在该文件中,搜寻到 `metrics_server`,并将 `enabled` 的 `false` 改为 `true`。完成后保存文件。 + + ```yaml + metrics_server: + enabled: true # 将“false”更改为“true”。 + ``` + +3. 使用该配置文件创建集群: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +### 在 Kubernetes 上安装 + +当您[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 时,需要先在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) 文件中先启用 Metrics Server组件。 + +1. 下载文件 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml),并打开文件进行编辑。 + + ```bash + vi cluster-configuration.yaml + ``` + +2. 在 `cluster-configuration.yaml` 中,搜索 `metrics_server`,并将 `enabled` 的 `false` 改为 `true`。完成后保存文件。 + + ```yaml + metrics_server: + enabled: true # 将“false”更改为“true”。 + ``` + +3. 执行以下命令以开始安装: + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f cluster-configuration.yaml + ``` + + {{< notice note >}} + +如果您在某些云托管的 Kubernetes 引擎上安装 KubeSphere,那么很可能您的环境中已经安装了 Metrics Server。在这种情况下,不建议您在 `cluster-configuration.yaml` 中启用 Metrics Server,因为这可能会在安装过程中引起冲突。 {{}} + +## 在安装后启用 Metrics Server + +1. 以 `admin` 用户登录控制台。点击左上角**平台管理**,选择**集群管理**。 + +2. 点击**定制资源定义**,在搜索栏中输入 `clusterconfiguration`。点击搜索结果查看详情页。 + + {{< notice info >}} + +定制资源定义(CRD)允许用户在不增加额外 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些定制资源。 + + {{}} + +3. 在**自定义资源**中,点击 `ks-installer` 右侧的 ,选择**编辑 YAML**。 + +4. 在该 YAML 文件中,搜索 `metrics_server`,将 `enabled` 的 `false` 改为 `true`。完成后,点击右下角的**确定**以保存配置。 + + ```yaml + metrics_server: + enabled: true # 将“false”更改为“true”。 + ``` + +5. 在 kubectl 中执行以下命令检查安装过程: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + {{< notice note >}} + +可以通过点击控制台右下角的 找到 kubectl 工具。 + {{}} + +## 验证组件的安装 + +执行以下命令以验证 Metrics Server 的容器组是否正常运行: + +```bash +kubectl get pod -n kube-system +``` + +如果 Metrics Server 安装成功,那么集群可能会返回以下输出(不包括无关容器组): + +```bash +NAME READY STATUS RESTARTS AGE +metrics-server-6c767c9f94-hfsb7 1/1 Running 0 9m38s +``` \ No newline at end of file diff --git a/content/zh/docs/v3.4/pluggable-components/network-policy.md b/content/zh/docs/v3.4/pluggable-components/network-policy.md new file mode 100644 index 000000000..29c0d7552 --- /dev/null +++ b/content/zh/docs/v3.4/pluggable-components/network-policy.md @@ -0,0 +1,109 @@ +--- +title: "网络策略" +keywords: "Kubernetes, KubeSphere, NetworkPolicy" +description: "了解如何启用网络策略来控制 IP 地址或端口级别的流量。" +linkTitle: "网络策略" +weight: 6900 +--- + +从 3.0.0 版本开始,用户可以在 KubeSphere 中配置原生 Kubernetes 的网络策略。网络策略是一种以应用为中心的结构,使您能够指定如何允许容器组通过网络与各种网络实体进行通信。通过网络策略,用户可以在同一集群内实现网络隔离,这意味着可以在某些实例(容器组)之间设置防火墙。 + +{{< notice note >}} + +- 在启用之前,请确保集群使用的 CNI 网络插件支持网络策略。支持网络策略的 CNI 网络插件有很多,包括 Calico、Cilium、Kube-router、Romana 和 Weave Net 等。 +- 建议您在启用网络策略之前,使用 [Calico](https://www.projectcalico.org/) 作为 CNI 插件。 + +{{}} + +有关更多信息,请参见[网络策略](https://kubernetes.io/zh/docs/concepts/services-networking/network-policies/)。 + +## 在安装前启用网络策略 + +### 在 Linux 上安装 + +当您在 Linux 上安装多节点 KubeSphere 时,需要创建一个配置文件,该文件列出了所有 KubeSphere 组件。 + +1. 在[在 Linux 上安装 KubeSphere](../../installing-on-linux/introduction/multioverview/) 时,您需要创建一个默认文件 `config-sample.yaml`。执行以下命令修改该文件: + + ```bash + vi config-sample.yaml + ``` + + {{< notice note >}} +如果您采用 [All-in-One 安装](../../quick-start/all-in-one-on-linux/),则不需要创建 `config-sample.yaml` 文件,因为可以直接创建集群。一般来说,All-in-One 模式是为那些刚接触 KubeSphere 并希望熟悉系统的用户而准备的。如果您想在该模式下启用网络策略(例如用于测试),可以参考[下面的部分](#在安装后启用网络策略),查看如何在安装后启用网络策略。 + {{}} + +2. 在该文件中,搜索 `network.networkpolicy`,并将 `enabled` 的 `false` 改为 `true`。完成后保存文件。 + + ```yaml + network: + networkpolicy: + enabled: true # 将“false”更改为“true”。 + ``` + +3. 使用配置文件创建一个集群: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +### 在 Kubernetes 上安装 + +当您[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 时,需要先在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) 文件中启用网络策略。 + +1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) 文件,然后打开并开始编辑。 + + ```bash + vi cluster-configuration.yaml + ``` + +2. 在该本地 `cluster-configuration.yaml` 文件中,搜索 `network.networkpolicy`,并将 `enabled` 的 `false` 改为 `true`。完成后保存文件。 + + ```yaml + network: + networkpolicy: + enabled: true # 将“false”更改为“true”。 + ``` + +3. 执行以下命令开始安装: + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f cluster-configuration.yaml + ``` + +## 在安装后启用网络策略 + +1. 以 `admin` 身份登录控制台。点击左上角的**平台管理**,选择**集群管理**。 + +2. 点击**定制资源定义**,在搜索栏中输入 `clusterconfiguration`。点击结果查看其详细页面。 + + {{< notice info >}} +定制资源定义(CRD)允许用户在不新增 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些定制资源。 + {{}} + +3. 在**自定义资源**中,点击 `ks-installer` 右侧的 ,选择**编辑 YAML**。 + +4. 在该 YAML 文件中,搜寻到 `network.networkpolicy`,将 `enabled` 的 `false` 改为 `true`。完成后,点击右下角的**确定**,保存配置。 + + ```yaml + network: + networkpolicy: + enabled: true # 将“false”更改为“true”。 + ``` + +5. 在 kubectl 中执行以下命令检查安装过程: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + {{< notice note >}} + +您可以通过点击控制台右下角的 找到 kubectl 工具。 + {{}} + +## 验证组件的安装 + +如果您能在**网络**中看到**网络策略**,说明安装成功。 diff --git a/content/zh/docs/v3.4/pluggable-components/overview.md b/content/zh/docs/v3.4/pluggable-components/overview.md new file mode 100644 index 000000000..044a17ac6 --- /dev/null +++ b/content/zh/docs/v3.4/pluggable-components/overview.md @@ -0,0 +1,98 @@ +--- +title: "概述" +keywords: "Kubernetes, KubeSphere, 可插拔组件, 概述" +description: "了解 KubeSphere 中的关键组件以及对应的资源消耗。" +linkTitle: "概述" +weight: 6100 +--- + +从 2.1.0 版本开始,KubeSphere 解耦了一些核心功能组件。这些组件设计成了可插拔式,您可以在安装之前或之后启用它们。如果您不启用它们,KubeSphere 会默认以最小化进行安装部署。 + +不同的可插拔组件部署在不同的命名空间中。您可以根据需求启用任意组件。强烈建议您安装这些可插拔组件来深度体验 KubeSphere 提供的全栈特性和功能。 + +有关如何启用每个组件的更多信息,请参见本章的各个教程。 + +## 资源要求 + +在您启用可插拔组件之前,请确保您的环境中有足够的资源,具体参见下表。否则,可能会因为缺乏资源导致组件崩溃。 + +{{< notice note >}} + +CPU 和内存的资源请求和限制均指单个副本的要求。 + +{{}} + +### KubeSphere 应用商店 + +| 命名空间 | openpitrix-system | +| -------- | ---------------------------------------- | +| CPU 请求 | 0.3 核 | +| CPU 限制 | 无 | +| 内存请求 | 300 MiB | +| 内存限制 | 无 | +| 安装 | 可选 | +| 备注 | 该组件可用于管理应用生命周期。建议安装。 | + +### KubeSphere DevOps 系统 + +| 命名空间 | kubesphere-devops-system | kubesphere-devops-system | +| -------- | ------------------------------------------------------------ | -------------------------------- | +| 安装模式 | All-in-One 安装 | 多节点安装 | +| CPU 请求 | 34 m | 0.47 核 | +| CPU 限制 | 无 | 无 | +| 内存请求 | 2.69 G | 8.6 G | +| 内存限制 | 无 | 无 | +| 安装 | 可选 | 可选 | +| 备注 | 提供一站式 DevOps 解决方案,包括 Jenkins 流水线、B2I 和 S2I。 | 其中一个节点的内存必须大于 8 G。 | + +### KubeSphere 监控系统 + +| 命名空间 | kubesphere-monitoring-system | kubesphere-monitoring-system | kubesphere-monitoring-system | +| -------- | ------------------------------------------------------------ | ---------------------------- | ---------------------------- | +| 子组件 | 2 x Prometheus | 3 x Alertmanager | Notification Manager | +| CPU 请求 | 100 m | 10 m | 100 m | +| CPU 限制 | 4 core | 无 | 500 m | +| 内存请求 | 400 MiB | 30 MiB | 20 MiB | +| 内存限制 | 8 GiB | | 1 GiB | +| 安装 | 必需 | 必需 | 必需 | +| 备注 | Prometheus 的内存消耗取决于集群大小。8 GiB 可满足 200 个节点/16,000 个容器组的集群规模。 | | | + +{{< notice note >}} + +KubeSphere 监控系统不是可插拔组件,会默认安装。它与其他组件(例如日志系统)紧密关联,因此将其资源请求和限制也列在本页中,供您参考。 + +{{}} + +### KubeSphere 日志系统 + +| 命名空间 | kubesphere-logging-system | kubesphere-logging-system | kubesphere-logging-system | kubesphere-logging-system | +| -------- | ------------------------------------------------------------ | -------------------------------------------- | --------------------------------------- | --------------------------------------------------- | +| 子组件 | 3 x Elasticsearch | fluent bit | kube-events | kube-auditing | +| CPU 请求 | 50 m | 20 m | 90 m | 20 m | +| CPU 限制 | 1 core | 200 m | 900 m | 200 m | +| 内存请求 | 2 G | 50 MiB | 120 MiB | 50 MiB | +| 内存限制 | 无 | 100 MiB | 1200 MiB | 100 MiB | +| 安装 | 可选 | 必需 | 可选 | 可选 | +| 备注 | 可选组件,用于存储日志数据。不建议在生产环境中使用内置 Elasticsearch。 | 日志收集代理。启用日志系统后,它是必需组件。 | Kubernetes 事件收集、过滤、导出和告警。 | Kubernetes 和 KubeSphere 审计日志收集、过滤和告警。 | + +### KubeSphere 告警和通知 + +| 命名空间 | kubesphere-alerting-system | +| -------- | -------------------------- | +| CPU 请求 | 0.08 core | +| CPU 限制 | 无 | +| 内存请求 | 80 M | +| 内存限制 | 无 | +| 安装 | 可选 | +| 备注 | 告警和通知需要同时启用。 | + +### KubeSphere 服务网格 + +| 命名空间 | istio-system | +| -------- | ------------------------------------------------------ | +| CPU 请求 | 1 core | +| CPU 限制 | 无 | +| 内存请求 | 3.5 G | +| 内存限制 | 无 | +| 安装 | 可选 | +| 备注 | 支持灰度发布策略、流量拓扑、流量管理和分布式链路追踪。 | diff --git a/content/zh/docs/v3.4/pluggable-components/pod-ip-pools.md b/content/zh/docs/v3.4/pluggable-components/pod-ip-pools.md new file mode 100644 index 000000000..eb2520400 --- /dev/null +++ b/content/zh/docs/v3.4/pluggable-components/pod-ip-pools.md @@ -0,0 +1,102 @@ +--- +title: "容器组 IP 池" +keywords: "Kubernetes, KubeSphere, 容器组, IP 池" +description: "了解如何启用容器组 IP 池,为您的容器组分配一个特定的容器组 IP 池。" +linkTitle: "容器组 IP 池" +weight: 6920 +--- + +容器组 IP 池用于规划容器组网络地址空间,每个容器组 IP 池之间的地址空间不能重叠。创建工作负载时,可选择特定的容器组 IP 池,这样创建出的容器组将从该容器组 IP 池中分配 IP 地址。 + +## 安装前启用容器组 IP 池 + +### 在 Linux 上安装 + +在 Linux 上多节点安装 KubeSphere 时,您需要创建一个配置文件,该文件会列出所有 KubeSphere 组件。 + +1. [在 Linux 上安装 KubeSphere](../../installing-on-linux/introduction/multioverview/) 时,您需要创建一个默认文件 `config-sample.yaml`。执行以下命令修改该文件: + + ```bash + vi config-sample.yaml + ``` + + {{< notice note >}} + 如果您采用 [All-in-one 安装](../../quick-start/all-in-one-on-linux/),则不需要创建 `config-sample.yaml` 文件,因为可以直接创建集群。一般来说,All-in-one 模式针对那些刚接触 KubeSphere 并希望熟悉系统的用户。如果您想在该模式下启用容器组 IP 池(比如用于测试),请参考[下面的部分](#在安装后启用容器组-ip-池),查看如何在安装后启用容器组 IP 池。 + + {{}} + +2. 在该文件中,搜索 `network.ippool.type`,然后将 `none` 更改为 `calico`。完成后保存文件。 + + ```yaml + network: + ippool: + type: calico # 将“none”更改为“calico”。 + ``` + +3. 使用该配置文件创建一个集群: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +### 在 Kubernetes 上安装 + +当您[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 时,需要现在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) 文件中启用容器组 IP 池。 + +1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) 文件并进行编辑。 + + ```bash + vi cluster-configuration.yaml + ``` + +2. 在本地 `cluster-configuration.yaml` 文件中,搜索 `network.ippool.type`,将 `none` 更改为 `calico` 以启用容器组 IP 池。完成后保存文件。 + + ```yaml + network: + ippool: + type: calico # 将“none”更改为“calico”。 + ``` + +3. 执行以下命令开始安装。 + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f cluster-configuration.yaml + ``` + + +## 在安装后启用容器组 IP 池 + +1. 使用 `admin` 用户登录控制台。点击左上角的**平台管理**,然后选择**集群管理**。 + +2. 点击**定制资源定义**,然后在搜索栏中输入 `clusterconfiguration`。点击搜索结果查看其详情页。 + + {{< notice info >}} +定制资源定义(CRD)允许用户在不新增 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些定制资源。 + {{}} + +3. 在**自定义资源**中,点击 `ks-installer` 右侧的 ,然后选择**编辑 YAML**。 + +4. 在该配置文件中,搜寻到 `network`,将 `network.ippool.type` 更改为 `calico`。完成后,点击右下角的**确定**保存配置。 + + ```yaml + network: + ippool: + type: calico # 将“none”更改为“calico”。 + ``` + +5. 在 kubectl 中执行以下命令检查安装过程: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + {{< notice note >}} + +您可以通过点击控制台右下角的 来找到 kubectl 工具。 + {{}} + +## 验证组件的安装 + +在**集群管理**页面,您可以在**网络**下看到**容器组 IP 池**。 diff --git a/content/zh/docs/v3.4/pluggable-components/service-mesh.md b/content/zh/docs/v3.4/pluggable-components/service-mesh.md new file mode 100644 index 000000000..476304a2f --- /dev/null +++ b/content/zh/docs/v3.4/pluggable-components/service-mesh.md @@ -0,0 +1,157 @@ +--- +title: "KubeSphere 服务网格" +keywords: "Kubernetes, Istio, KubeSphere, 服务网格, 微服务" +description: "了解如何启用服务网格,从而提供不同的流量管理策略进行微服务治理。" +linkTitle: "KubeSphere 服务网格" +weight: 6800 +--- + +KubeSphere 服务网格基于 [Istio](https://istio.io/),将微服务治理和流量管理可视化。它拥有强大的工具包,包括**熔断机制、蓝绿部署、金丝雀发布、流量镜像、链路追踪、可观测性和流量控制**等。KubeSphere 服务网格支持代码无侵入的微服务治理,帮助开发者快速上手,Istio 的学习曲线也极大降低。KubeSphere 服务网格的所有功能都旨在满足用户的业务需求。 + +有关更多信息,请参见[灰度发布](../../project-user-guide/grayscale-release/overview/)。 + +## 在安装前启用服务网格 + +### 在 Linux 上安装 + +当您在 Linux 上安装多节点 KubeSphere 时,需要创建一个配置文件,该文件列出了所有 KubeSphere 组件。 + +1. [在 Linux 上安装 KubeSphere](../../installing-on-linux/introduction/multioverview/) 时,您需要创建一个默认文件 `config-sample.yaml`。执行以下命令修改该文件: + + ```bash + vi config-sample.yaml + ``` + + {{< notice note >}} +如果您采用 [All-in-One 安装](../../quick-start/all-in-one-on-linux/),则不需要创建 `config-sample.yaml` 文件,因为可以直接创建集群。一般来说,All-in-One 模式是为那些刚接触 KubeSphere 并希望熟悉系统的用户而准备的。如果您想在该模式下启用服务网格(例如用于测试),请参考[下面的部分](#在安装后启用服务网格),查看如何在安装后启用服务网格。 + {{}} + +2. 在该文件中,搜索 `servicemesh`,并将 `enabled` 的 `false` 改为 `true`。完成后保存文件。 + + ```yaml + servicemesh: + enabled: true # 将“false”更改为“true”。 + istio: # Customizing the istio installation configuration, refer to https://istio.io/latest/docs/setup/additional-setup/customize-installation/ + components: + ingressGateways: + - name: istio-ingressgateway # 将服务暴露至服务网格之外。默认不开启。 + enabled: false + cni: + enabled: false # 启用后,会在 Kubernetes pod 生命周期的网络设置阶段完成 Istio 网格的 pod 流量转发设置工作。 + ``` + + {{< notice note >}} + - 关于开启 Ingress Gateway 后如何访问服务,请参阅 [Ingress Gateway](https://istio.io/latest/zh/docs/tasks/traffic-management/ingress/ingress-control/)。 + - 更多关于 Istio CNI 插件的信息,请参阅[安装 Istio CNI 插件](https://istio.io/latest/zh/docs/setup/additional-setup/cni/)。 + {{}} + +3. 执行以下命令使用该配置文件创建集群: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +### 在 Kubernetes 上安装 + +当您[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 时,需要先在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) 文件中启用服务网格。 + +1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) 文件,执行以下命令打开并编辑该文件: + + ```bash + vi cluster-configuration.yaml + ``` + +2. 在 `cluster-configuration.yaml` 文件中,搜索 `servicemesh`,并将 `enabled` 的 `false` 改为 `true`。完成后保存文件。 + + ```yaml + servicemesh: + enabled: true # 将“false”更改为“true”。 + istio: # Customizing the istio installation configuration, refer to https://istio.io/latest/docs/setup/additional-setup/customize-installation/ + components: + ingressGateways: + - name: istio-ingressgateway # 将服务暴露至服务网格之外。默认不开启。 + enabled: false + cni: + enabled: false # 启用后,会在 Kubernetes pod 生命周期的网络设置阶段完成 Istio 网格的 pod 流量转发设置工作。 + ``` + +3. 执行以下命令开始安装: + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f cluster-configuration.yaml + ``` + +## 在安装后启用服务网格 + +1. 以 `admin` 用户登录控制台。点击左上角的**平台管理**,选择**集群管理**。 + +2. 点击**定制资源定义**,在搜索栏中输入 `clusterconfiguration`。点击结果查看其详情页。 + + {{< notice info >}} +定制资源定义(CRD)允许用户在不新增 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些定制资源。 + {{}} + +3. 在**自定义资源**中,点击 `ks-installer` 右侧的 ,选择**编辑 YAML**。 + +4. 在该配置文件中,搜索 `servicemesh`,并将 `enabled` 的 `false` 改为 `true`。完成后,点击右下角的**确定**,保存配置。 + + ```yaml + servicemesh: + enabled: true # 将“false”更改为“true”。 + istio: # Customizing the istio installation configuration, refer to https://istio.io/latest/docs/setup/additional-setup/customize-installation/ + components: + ingressGateways: + - name: istio-ingressgateway # 将服务暴露至服务网格之外。默认不开启。 + enabled: false + cni: + enabled: false # 启用后,会在 Kubernetes pod 生命周期的网络设置阶段完成 Istio 网格的 pod 流量转发设置工作。 + ``` + +5. 在 kubectl 中执行以下命令检查安装过程: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + {{< notice note >}} + + +您可以通过点击控制台右下角的 找到 kubectl 工具。 + {{}} + +## 验证组件的安装 + +{{< tabs >}} + +{{< tab "在仪表板中验证组件的安装" >}} + +进入**系统组件**,检查 **Istio** 标签页中的所有组件是否都处于**健康**状态。如果是,组件安装成功。 + +{{}} + +{{< tab "通过 kubectl 验证组件的安装" >}} + +执行以下命令检查容器组的状态: + +```bash +kubectl get pod -n istio-system +``` + +如果组件运行成功,输出结果可能如下: + +```bash +NAME READY STATUS RESTARTS AGE +istio-ingressgateway-78dbc5fbfd-f4cwt 1/1 Running 0 9m5s +istiod-1-6-10-7db56f875b-mbj5p 1/1 Running 0 10m +jaeger-collector-76bf54b467-k8blr 1/1 Running 0 6m48s +jaeger-operator-7559f9d455-89hqm 1/1 Running 0 7m +jaeger-query-b478c5655-4lzrn 2/2 Running 0 6m48s +kiali-f9f7d6f9f-gfsfl 1/1 Running 0 4m1s +kiali-operator-7d5dc9d766-qpkb6 1/1 Running 0 6m53s +``` + +{{}} + +{{}} diff --git a/content/zh/docs/v3.4/pluggable-components/service-topology.md b/content/zh/docs/v3.4/pluggable-components/service-topology.md new file mode 100644 index 000000000..83bf661b2 --- /dev/null +++ b/content/zh/docs/v3.4/pluggable-components/service-topology.md @@ -0,0 +1,131 @@ +--- +title: "服务拓扑图" +keywords: "Kubernetes, KubeSphere, 服务, 拓扑图" +description: "了解如何启用服务拓扑图,以基于 Weave Scope 查看容器组的上下文详情。" +linkTitle: "服务拓扑图" +weight: 6915 +--- + +您可以启用服务拓扑图以集成 [Weave Scope](https://www.weave.works/oss/scope/)(Docker 和 Kubernetes 的可视化和监控工具)。Weave Scope 使用既定的 API 收集信息,为应用和容器构建拓扑图。服务拓扑图显示在您的项目中,将服务之间的连接关系可视化。 + +## 安装前启用服务拓扑图 + +### 在 Linux 上安装 + +在 Linux 上多节点安装 KubeSphere 时,您需要创建一个配置文件,该文件会列出所有 KubeSphere 组件。 + +1. [在 Linux 上安装 KubeSphere](../../installing-on-linux/introduction/multioverview/) 时,您需要创建一个默认文件 `config-sample.yaml`。执行以下命令修改该文件: + + ```bash + vi config-sample.yaml + ``` + + {{< notice note >}} + 如果您采用 [All-in-one 安装](../../quick-start/all-in-one-on-linux/),则不需要创建 `config-sample.yaml` 文件,因为可以直接创建集群。一般来说,All-in-one 模式针对那些刚接触 KubeSphere 并希望熟悉系统的用户。如果您想在该模式下启用服务拓扑图(比如用于测试),请参考[下面的部分](#在安装后启用服务拓扑图),查看如何在安装后启用服务拓扑图。 + + {{}} + +2. 在该文件中,搜索 `network.topology.type`,并将 `none` 改为 `weave-scope`。完成后保存文件。 + + ```yaml + network: + topology: + type: weave-scope # 将“none”更改为“weave-scope”。 + ``` + +3. 执行以下命令使用该配置文件创建集群: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +### 在 Kubernetes 上安装 + +当您[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 时,需要先在[cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) 文件中启用服务拓扑图。 + +1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) 文件并进行编辑。 + + ```bash + vi cluster-configuration.yaml + ``` + +2. 在 `cluster-configuration.yaml` 文件中,搜索 `network.topology.type`,将 `none` 更改为 `weave-scope` 以启用服务拓扑图。完成后保存文件。 + + ```yaml + network: + topology: + type: weave-scope # 将“none”更改为“weave-scope”。 + ``` + +3. 执行以下命令开始安装: + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f cluster-configuration.yaml + ``` + + +## 在安装后启用服务拓扑图 + +1. 以 `admin` 用户登录控制台。点击左上角的**平台管理**,然后选择**集群管理**。 + +2. 点击**定制资源定义**,然后在搜索栏中输入 `clusterconfiguration`。点击搜索结果查看其详情页。 + + {{< notice info >}} +定制资源定义(CRD)允许用户在不新增 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些定制资源。 + {{}} + +3. 在**自定义资源**中,点击 `ks-installer` 右侧的 ,然后选择**编辑 YAML**。 + +4. 在该配置文件中,搜寻到 `network`,将 `network.topology.type` 更改为 `weave-scope`。完成后,点击右下角的**确定**保存配置。 + + ```yaml + network: + topology: + type: weave-scope # 将“none”更改为“weave-scope”。 + ``` + +5. 在 kubectl 中执行以下命令检查安装过程: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + {{< notice note >}} + +您可以通过点击控制台右下角的 来找到 kubectl 工具。 + {{}} + +## 验证组件的安装 + +{{< tabs >}} + +{{< tab "在仪表板中验证组件的安装" >}} + +进入一个项目中,导航到**应用负载**下的**服务**,即可看到**服务拓扑**页签下**服务**的拓扑图。 + +{{}} + +{{< tab "通过 kubectl 验证组件的安装" >}} + +执行以下命令来检查容器组的状态: + +```bash +kubectl get pod -n weave +``` + +如果组件运行成功,输出结果可能如下: + +```bash +NAME READY STATUS RESTARTS AGE +weave-scope-agent-48cjp 1/1 Running 0 3m1s +weave-scope-agent-9jb4g 1/1 Running 0 3m1s +weave-scope-agent-ql5cf 1/1 Running 0 3m1s +weave-scope-app-5b76897b6f-8bsls 1/1 Running 0 3m1s +weave-scope-cluster-agent-8d9b8c464-5zlpp 1/1 Running 0 3m1s +``` + +{{}} + +{{}} \ No newline at end of file diff --git a/content/zh/docs/v3.4/pluggable-components/uninstall-pluggable-components.md b/content/zh/docs/v3.4/pluggable-components/uninstall-pluggable-components.md new file mode 100644 index 000000000..9d64fdd2c --- /dev/null +++ b/content/zh/docs/v3.4/pluggable-components/uninstall-pluggable-components.md @@ -0,0 +1,204 @@ +--- +title: "卸载可插拔组件" +keywords: "Installer, uninstall, KubeSphere, Kubernetes" +description: "学习如何在 KubeSphere上卸载所有可插拔组件。" +linkTitle: "卸载可插拔组件" +Weight: 6940 +--- + +[启用 KubeSphere 可插拔组件之后](../../pluggable-components/),还可以根据以下步骤卸载他们。请在卸载这些组件之前,备份所有重要数据。 +## 准备工作 + +在卸载除服务拓扑图和容器组 IP 池之外的可插拔组件之前,必须将 CRD 配置文件 `ClusterConfiguration` 中的 `ks-installer` 中的 `enabled` 字段的值从 `true` 改为 `false`。 + +使用下列任一方法更改 `enabled` 字段的值: + +- 运行以下命令编辑 `ks-installer`: + +```bash +kubectl -n kubesphere-system edit clusterconfiguration ks-installer +``` + +- 使用 `admin` 身份登录 KubeSphere Web 控制台,左上角点击**平台管理**,选择**集群管理**,在**定制资源定义**中搜索 `ClusterConfiguration`。有关更多信息,请参见[启用可插拔组件](../../pluggable-components/)。 + +{{< notice note >}} + +更改值之后,需要等待配置更新完成,然后继续进行后续操作。 + +{{}} + +## 卸载 KubeSphere 应用商店 + +将 CRD `ClusterConfiguration` 配置文件中 `ks-installer` 参数的 `openpitrix.store.enabled` 字段的值从 `true` 改为 `false`。 + +## 卸载 KubeSphere DevOps + +1. 卸载 DevOps: + + ```bash + helm uninstall -n kubesphere-devops-system devops + kubectl patch -n kubesphere-system cc ks-installer --type=json -p='[{"op": "remove", "path": "/status/devops"}]' + kubectl patch -n kubesphere-system cc ks-installer --type=json -p='[{"op": "replace", "path": "/spec/devops/enabled", "value": false}]' + ``` +2. 删除 DevOps 资源: + + ```bash + # 删除所有 DevOps 相关资源 + for devops_crd in $(kubectl get crd -o=jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | grep "devops.kubesphere.io"); do + for ns in $(kubectl get ns -ojsonpath='{.items..metadata.name}'); do + for devops_res in $(kubectl get $devops_crd -n $ns -oname); do + kubectl patch $devops_res -n $ns -p '{"metadata":{"finalizers":[]}}' --type=merge + done + done + done + # 删除所有 DevOps CRD + kubectl get crd -o=jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | grep "devops.kubesphere.io" | xargs -I crd_name kubectl delete crd crd_name + # 删除 DevOps 命名空间 + kubectl delete namespace kubesphere-devops-system + ``` + + +## 卸载 KubeSphere 日志系统 + +1. 将 CRD `ClusterConfiguration` 配置文件中 `ks-installer` 参数的 `logging.enabled` 字段的值从 `true` 改为 `false`。 + +2. 仅禁用日志收集: + + ```bash + kubectl delete inputs.logging.kubesphere.io -n kubesphere-logging-system tail + ``` + + {{< notice note >}} + + 运行此命令后,默认情况下仍可查看 Kubernetes 提供的容器最近日志。但是,容器历史记录日志将被清除,您无法再浏览它们。 + + {{}} + +3. 卸载包括 Elasticsearch 的日志系统,请执行以下操作: + + ```bash + kubectl delete crd fluentbitconfigs.logging.kubesphere.io + kubectl delete crd fluentbits.logging.kubesphere.io + kubectl delete crd inputs.logging.kubesphere.io + kubectl delete crd outputs.logging.kubesphere.io + kubectl delete crd parsers.logging.kubesphere.io + kubectl delete deployments.apps -n kubesphere-logging-system fluentbit-operator + helm uninstall elasticsearch-logging --namespace kubesphere-logging-system + ``` + + {{< notice warning >}} + + 此操作可能导致审计、事件和服务网格的异常。 + + {{}} + +3. 运行以下命令: + + ```bash + kubectl delete deployment logsidecar-injector-deploy -n kubesphere-logging-system + kubectl delete ns kubesphere-logging-system + ``` + +## 卸载 KubeSphere 事件系统 + +1. 将 CRD `ClusterConfiguration` 配置文件中 `ks-installer` 参数的 `events.enabled` 字段的值从 `true` 改为 `false`。 + +2. 运行以下命令: + + ```bash + helm delete ks-events -n kubesphere-logging-system + ``` + +## 卸载 KubeSphere 告警系统 + +1. 将 CRD `ClusterConfiguration` 配置文件中 `ks-installer` 参数的 `alerting.enabled` 字段的值从 `true` 改为 `false`。 + +2. 运行以下命令: + + ```bash + kubectl -n kubesphere-monitoring-system delete thanosruler kubesphere + ``` + + {{< notice note >}} + + KubeSphere 3.3 通知系统为默认安装,您无需卸载。 + + {{}} + + +## 卸载 KubeSphere 审计 + +1. 将 CRD `ClusterConfiguration` 配置文件中 `ks-installer` 参数的 `auditing.enabled` 字段的值从 `true` 改为 `false`。 + +2. 运行以下命令: + + ```bash + helm uninstall kube-auditing -n kubesphere-logging-system + kubectl delete crd rules.auditing.kubesphere.io + kubectl delete crd webhooks.auditing.kubesphere.io + ``` + +## 卸载 KubeSphere 服务网格 + +1. 将 CRD `ClusterConfiguration` 配置文件中 `ks-installer` 参数的 `servicemesh.enabled` 字段的值从 `true` 改为 `false`。 + +2. 运行以下命令: + + ```bash + curl -L https://istio.io/downloadIstio | sh - + istioctl x uninstall --purge + + kubectl -n istio-system delete kiali kiali + helm -n istio-system delete kiali-operator + + kubectl -n istio-system delete jaeger jaeger + helm -n istio-system delete jaeger-operator + ``` + +## 卸载网络策略 + +对于 NetworkPolicy 组件,禁用它不需要卸载组件,因为其控制器位于 `ks-controller-manager` 中。如果想要将其从 KubeSphere 控制台中移除,将 CRD `ClusterConfiguration` 配置文件中参数 `ks-installer` 中 `network.networkpolicy.enabled` 的值从 `true` 改为 `false`。 + +## 卸载 Metrics Server + +1. 将 CRD `ClusterConfiguration` 配置文件中参数 `ks-installer` 中 `metrics_server.enabled` 的值从 `true` 改为 `false`。 + +2. 运行以下命令: + + ```bash + kubectl delete apiservice v1beta1.metrics.k8s.io + kubectl -n kube-system delete service metrics-server + kubectl -n kube-system delete deployment metrics-server + ``` + +## 卸载服务拓扑图 + +1. 将 CRD `ClusterConfiguration` 配置文件中参数 `ks-installer` 中 `network.topology.type` 的值从 `weave-scope` 改为 `none`。 + +2. 运行以下命令: + + ```bash + kubectl delete ns weave + ``` + +## 卸载容器组 IP 池 + +将 CRD `ClusterConfiguration` 配置文件中参数 `ks-installer` 中 `network.ippool.type` 的值从 `calico` 改为 `none`。 + +## 卸载 KubeEdge + +1. 将 CRD `ClusterConfiguration` 配置文件中参数 `ks-installer` 中 `kubeedege.enabled` 和 `edgeruntime.enabled` 的值从 `true` 改为 `false`。 + +2. 运行以下命令: + + ```bash + helm uninstall kubeedge -n kubeedge + kubectl delete ns kubeedge + ``` + + {{< notice note >}} + + 卸载后,您将无法为集群添加边缘节点。 + + {{}} + diff --git a/content/zh/docs/v3.4/project-administration/_index.md b/content/zh/docs/v3.4/project-administration/_index.md new file mode 100644 index 000000000..0c7518ed4 --- /dev/null +++ b/content/zh/docs/v3.4/project-administration/_index.md @@ -0,0 +1,13 @@ +--- +title: "项目管理" +description: "帮助您更好地管理 KubeSphere 项目" +layout: "second" + +linkTitle: "项目管理" +weight: 13000 + +icon: "/images/docs/v3.3/docs.svg" + +--- + +KubeSphere 的项目即 Kubernetes 的命名空间。项目有两种类型,即单集群项目和多集群项目。单集群项目是 Kubernetes 常规命名空间,多集群项目是跨多个集群的联邦命名空间。项目管理员负责创建项目、设置限制范围、配置网络隔离以及其他操作。 diff --git a/content/zh/docs/v3.4/project-administration/container-limit-ranges.md b/content/zh/docs/v3.4/project-administration/container-limit-ranges.md new file mode 100644 index 000000000..00341fae6 --- /dev/null +++ b/content/zh/docs/v3.4/project-administration/container-limit-ranges.md @@ -0,0 +1,49 @@ +--- +title: "容器限制范围" +keywords: 'Kubernetes, KubeSphere, 资源, 配额, 限制, 请求, 限制范围, 容器' +description: '了解如何在项目中设置默认容器限制范围。' +linkTitle: "容器限制范围" +weight: 13400 +--- + +容器所使用的 CPU 和内存资源上限由[项目资源配额](../../workspace-administration/project-quotas/)指定。同时,KubeSphere 使用请求 (Request) 和限制 (Limit) 来控制单个容器的资源(例如 CPU 和内存)使用情况,在 Kubernetes 中也称为 [LimitRange](https://kubernetes.io/zh/docs/concepts/policy/limit-range/)。请求确保容器能够获得其所需要的资源,因为这些资源已经得到明确保障和预留。相反地,限制确保容器不能使用超过特定值的资源。 + +当您创建工作负载(例如部署)时,您可以为容器配置资源请求和资源限制。要预先填充这些请求字段和限制字段的值,您可以设置默认限制范围。 + +本教程演示如何为项目中的容器设置默认限制范围。 + +## 准备工作 + +您需要有一个可用的企业空间、一个项目和一个用户 (`project-admin`)。该用户必须在项目层级拥有 `admin` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../quick-start/create-workspace-and-project/)。 + +## 设置默认限制范围 + +1. 以 `project-admin` 身份登录控制台,进入一个项目。如果该项目是新创建的项目,您在**概览**页面上会看到默认配额尚未设置。点击**默认容器配额未设置**旁的**编辑配额**来配置限制范围。 + +2. 在弹出的对话框中,您可以看到 KubeSphere 默认不设置任何请求或限制。要设置请求和限制来控制 CPU 和内存资源,请移动滑块至期望的值或者直接输入数值。字段留空意味着不设置任何请求或限制。 + + {{< notice note >}} + + 限制必须大于请求。 + + {{}} + +3. 点击**确定**完成限制范围设置。 + +4. 在**项目设置**下的**基本信息**页面,您可以查看项目中容器的默认容器配额。 + +5. 要更改默认容器配额,请在**基本信息**页面点击**管理**,然后选择**编辑默认容器配额**。 + +6. 在弹出的对话框中直接更改容器配额,然后点击**确定**。 + +7. 当您创建工作负载时,容器的请求和限制将预先填充对应的值。 + + {{< notice note >}} + + 有关更多信息,请参见[容器镜像设置](../../project-user-guide/application-workloads/container-image-settings/)中的**资源请求**。 + + {{}} + +## 另请参见 + +[项目配额](../../workspace-administration/project-quotas/) \ No newline at end of file diff --git a/content/zh/docs/v3.4/project-administration/disk-log-collection.md b/content/zh/docs/v3.4/project-administration/disk-log-collection.md new file mode 100644 index 000000000..63578fba7 --- /dev/null +++ b/content/zh/docs/v3.4/project-administration/disk-log-collection.md @@ -0,0 +1,78 @@ +--- +title: "日志收集" +keywords: 'KubeSphere, Kubernetes, 项目, 日志, 收集' +description: '启用日志收集,对日志进行统一收集、管理和分析。' +linkTitle: "日志收集" +weight: 13600 +--- + +KubeSphere 支持多种日志收集方式,使运维团队能够以灵活统一的方式收集、管理和分析日志。 + +本教程演示了如何为示例应用收集日志。 + +## 准备工作 + +- 您需要创建企业空间、项目和帐户 (`project-admin`)。该用户必须被邀请到项目中,并在项目级别具有 `admin` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../quick-start/create-workspace-and-project/)。 + +- 您需要启用 [KubeSphere 日志系统](../../pluggable-components/logging/)。 + +## 启用日志收集 + +1. 以 `project-admin` 身份登录 KubeSphere 的 Web 控制台,进入项目。 + +2. 在左侧导航栏中,选择**项目设置**中的**日志收集**,点击 以启用该功能。 + + +## 创建部署 + +1. 在左侧导航栏中,选择**应用负载**中的**工作负载**。在**部署**选项卡下,点击**创建**。 + +2. 在出现的对话框中,设置部署的名称(例如 `demo-deployment`),选择将要创建资源的项目,点击**下一步**。 + +3. 在**容器组设置**下,点击**添加容器**。 + +4. 在搜索栏中输入 `alpine`,以该镜像(标签:`latest`)作为示例。 + +5. 向下滚动并勾选**启动命令**。在**命令**和**参数**中分别输入以下值,点击 **√**,然后点击**下一步**。 + + **命令** + + ```bash + /bin/sh + ``` + + **参数** + + ```bash + -c,if [ ! -d /data/log ];then mkdir -p /data/log;fi; while true; do date >> /data/log/app-test.log; sleep 30;done + ``` + + {{< notice note >}} + + 以上命令及参数意味着每 30 秒将日期信息导出到 `/data/log` 的 `app-test.log` 中。 + + {{}} + +6. 在**存储设置**选项卡下,切换 启用**收集卷上日志**,点击**挂载卷**。 + +7. 在**临时卷**选项卡下,输入卷名称(例如 `demo-disk-log-collection`),并设置访问模式和路径。 + + 点击 **√**,然后点击**下一步**继续。 + +8. 点击**高级设置**中的**创建**以完成创建。 + + {{< notice note >}} + + 有关更多信息,请参见[部署](../../project-user-guide/application-workloads/deployments/)。 + + {{}} + +## 查看日志 + +1. 在**部署**选项卡下,点击刚才创建的部署以访问其详情页。 + +2. 在**资源状态**中,点击 查看容器详情,然后点击 `logsidecar-container`(filebeat 容器)日志图标 icon 以检查日志。 + +3. 或者,您也可以使用右下角**工具箱**中的**日志查询**功能来查看标准输出日志。例如,使用该部署的 Pod 名称进行模糊匹配。 + + diff --git a/content/zh/docs/v3.4/project-administration/project-and-multicluster-project.md b/content/zh/docs/v3.4/project-administration/project-and-multicluster-project.md new file mode 100644 index 000000000..5fd4de97e --- /dev/null +++ b/content/zh/docs/v3.4/project-administration/project-and-multicluster-project.md @@ -0,0 +1,97 @@ +--- +title: "项目和多集群项目" +keywords: 'KubeSphere, Kubernetes, 项目, 多集群项目' +description: '了解如何创建不同类型的项目。' + +linkTitle: "项目和多集群项目" +weight: 13100 +--- + +KubeSphere 中的项目即 Kubernetes [命名空间](https://kubernetes.io/zh/docs/concepts/overview/working-with-objects/namespaces/),用于将资源划分成互不重叠的分组。这一功能可在多个租户之间分配集群资源,是一种逻辑分区功能。 + +多集群项目跨集群运行,能为用户提供高可用性,并在问题发生时将问题隔离在某个集群内,避免影响业务。有关更多信息,请参见[多集群管理](../../multicluster-management/)。 + +本教程演示如何管理项目和多集群项目。 + +## 准备工作 + +- 您需要有一个可用的企业空间和一个用户 (`project-admin`)。该用户必须在该企业空间拥有 `workspace-self-provisioner` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../quick-start/create-workspace-and-project/)。 +- 在创建多集群项目前,您需要通过[直接连接](../../multicluster-management/enable-multicluster/direct-connection/)或[代理连接](../../multicluster-management/enable-multicluster/agent-connection/)启用多集群功能。 + +## 项目 + +### 创建项目 + +1. 前往企业空间的**项目**页面,点击**项目**选项卡下的**创建**。 + + {{< notice note >}} + +- 您可以在**集群**下拉列表中更改创建项目的集群。该下拉列表只有在启用多集群功能后才可见。 + +- 如果页面上没有**创建**按钮,则表示您的企业空间没有可用的集群。您需要联系平台管理员或集群管理员,以便在集群中创建企业空间资源。平台管理员或集群管理员需要在**集群管理**页面设置**集群可见性**,才能[将集群分配给企业空间](../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/)。 + + {{}} + +2. 在弹出的**创建项目**窗口中输入项目名称,根据需要添加别名或说明。在**集群设置**下,选择要创建项目的集群(如果没有启用多集群功能,则不会出现此选项),然后点击**确定**。 + +3. 创建的项目会显示在下图所示的列表中。您可以点击项目名称打开**概览**页面。 + + +### 编辑项目 + +1. 前往您的项目,选择**项目设置**下的**基本信息**,在页面右侧点击**管理**。 + +2. 从下拉菜单中选择**编辑信息**。 + + {{< notice note >}} +项目名称无法编辑。如需修改其他信息,请参考相应的文档教程。 + +{{}} + +3. 若要删除项目,选择该下拉菜单中的**删除**,在弹出的对话框中输入项目名称,点击**确定**。 + + {{< notice warning >}} + +项目被删除后无法恢复,项目中的资源也会从项目中移除。 + +{{}} + +## 多集群项目 + +### 创建多集群项目 + +1. 前往企业空间的**项目**页面,点击**多集群项目**选项卡,再点击**创建**。 + + {{< notice note >}} + +- 如果页面上没有**创建**按钮,则表示您的企业空间没有可用的集群。您需要联系平台管理员或集群管理员,以便在集群中创建企业空间资源。平台管理员或集群管理员需要在**集群管理**页面设置**集群可见性**,才能[将集群分配给企业空间](../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/)。 +- 请确保至少有两个集群已分配给您的企业空间。 + + {{}} + +2. 在弹出的**创建多集群项目**窗口中输入项目名称,并根据需要添加别名或说明。在**集群设置**下,点击**添加集群**为项目选择多个集群,然后点击**确定**。 +3. 创建的多集群项目会显示在列表中。点击多集群项目右侧的 ,从下拉菜单中选择一个操作: + + - **编辑信息**:编辑多集群项目的基本信息。 + - **添加集群**:在弹出对话框的下拉列表中选择一个集群并点击**确定**,为多集群项目添加一个集群。 + - **删除**:删除多集群项目。 + +### 编辑多集群项目 + +1. 前往您的多集群项目,选择**项目设置**下的**基本信息**,在页面右侧点击**管理**。 + +2. 从下拉菜单中选择**编辑信息**。 + + {{< notice note >}} + +项目名称无法编辑。如需修改其他信息,请参考相应的文档教程。 + +{{}} + +3. 若要删除多集群项目,选择该下拉菜单中的**删除项目**,在弹出的对话框中输入项目名称,点击**确定**。 + + {{< notice warning >}} + +多集群项目被删除后无法恢复,项目中的资源也会从项目中移除。 + +{{}} \ No newline at end of file diff --git a/content/zh/docs/v3.4/project-administration/project-gateway.md b/content/zh/docs/v3.4/project-administration/project-gateway.md new file mode 100644 index 000000000..e1524b0a1 --- /dev/null +++ b/content/zh/docs/v3.4/project-administration/project-gateway.md @@ -0,0 +1,64 @@ +--- +title: "项目网关" +keywords: 'KubeSphere, Kubernetes, 项目, 网关, NodePort, LoadBalancer' +description: '了解项目网关的概念以及如何进行管理。' +linkTitle: "项目网关" +weight: 13500 +--- + +KubeSphere 项目中的网关是一个[ NGINX Ingress 控制器](https://www.nginx.com/products/nginx-ingress-controller/)。KubeSphere 内置的用于 HTTP 负载均衡的机制称为[应用路由](../../project-user-guide/application-workloads/routes/),它定义了从外部到集群服务的连接规则。如需允许从外部访问服务,用户可创建路由资源来定义 URI 路径、后端服务名称等信息。 + +KubeSphere 除了提供项目范围的网关外,还提供[集群范围的网关](../../cluster-administration/cluster-settings/cluster-gateway/),使得所有项目都能共享全局网关。 + +本教程演示如何在 KubeSphere 中开启项目网关以从外部访问服务和路由。 + +## 准备工作 + +您需要创建一个企业空间、一个项目和一个用户 (`project-admin`)。该用户必须被邀请至项目,并且在项目中的角色为 `admin`。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../quick-start/create-workspace-and-project/)。 + +## 开启网关 + +1. 以 `project-admin` 用户登录 KubeSphere Web 控制台,进入您的项目,从左侧导航栏进入**项目设置**下的**网关设置**页面,然后点击**开启网关**。 + +2. 在弹出的对话框中选择网关的访问方式。 + + **NodePort**:通过网关访问服务对应的节点端口。 + + **LoadBalancer**:通过网关访问服务的单独 IP 地址。 + +3. 在**开启网关**对话框,您可以启用**链路追踪**。创建自制应用时,您必须开启**链路追踪**,以使用链路追踪功能和[不同的灰度发布策略](../../project-user-guide/grayscale-release/overview/)。如果启用**链路追踪**后无法访问路由,请在路由 (Ingress) 中添加注解(例如 `nginx.ingress.kubernetes.io/service-upstream: true`)。 + +3. 在**配置选项**中,添加键值对,为 NGINX Ingress 控制器的系统组件提供配置信息。有关更多信息,请参阅 [NGINX Ingress 控制器官方文档](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#configuration-options)。 + +4. 选择访问方式后点击**保存**。 + +## NodePort + +如果您选择 **NodePort**,KubeSphere 将为 HTTP 请求和 HTTPS 请求分别设置一个端口。您可以用 `EIP:NodePort` 或 `Hostname:NodePort` 地址访问服务。 + +例如,如果您的服务配置了的弹性 IP 地址 (EIP),请访问: + +- `http://EIP:32734` +- `https://EIP:32471` + +当创建[路由](../../project-user-guide/application-workloads/routes/) (Ingress) 时,您可以自定义主机名用于访问服务。例如,如果您的路由中配置了服务的主机名,请访问: + +- `http://demo.kubesphere.io:32734` +- `https://demo.kubesphere.io:32471` + +{{< notice note >}} + +- 取决于您的环境,您可能需要在安全组中放行端口并配置相关的端口转发规则 。 + +- 如果使用主机名访问服务,请确保您设置的域名可以解析为对应的 IP 地址。 +- 在生产环境中不建议使用 **NodePort**,请使用 **LoadBalancer**。 + +{{}} + +## LoadBalancer + +在选择 **LoadBalancer** 前,您必须先配置负载均衡器。负载均衡器的 IP 地址将与网关绑定以便内部的服务和路由可以访问。 +{{< notice note >}} +云厂商通常支持负载均衡器插件。如果在主流的 Kubernetes Engine 上安装 KubeSphere,您可能会发现环境中已有可用的负载均衡器。如果在裸金属环境中安装 KubeSphere,您可以使用 [OpenELB](https://github.com/kubesphere/openelb) 作为负载均衡器。 + +{{}} \ No newline at end of file diff --git a/content/zh/docs/v3.4/project-administration/project-network-isolation.md b/content/zh/docs/v3.4/project-administration/project-network-isolation.md new file mode 100644 index 000000000..bfb07de28 --- /dev/null +++ b/content/zh/docs/v3.4/project-administration/project-network-isolation.md @@ -0,0 +1,210 @@ +--- +title: "项目网络隔离" +keywords: 'KubeSphere, Kubernetes, Calico, 网络策略' +description: '了解网络隔离的概念以及如何配置项目网络策略。' +linkTitle: "项目网络隔离" +weight: 13300 +--- + +项目网络隔离使项目管理员能够使用不同的规则来放行不同的网络流量。本教程演示如何开启项目间的网络隔离并设置规则控制网络流量。 + +## 准备工作 + +- 已经启用[网络策略](../../pluggable-components/network-policy/)。 +- 您必须有一个可用的项目和一个在项目层级拥有 `admin` 角色的用户 (`project-admin`)。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../quick-start/create-workspace-and-project/)。 + +{{< notice note >}} + +关于网络策略的实现,您可以参考 [KubeSphere NetworkPolicy](https://github.com/kubesphere/community/blob/master/sig-network/concepts-and-designs/kubesphere-network-policy.md)。 + +{{}} + +## 开启/关闭项目网络隔离 + +1. 以 `project-admin` 身份登录 KubeSphere 控制台,进入您的项目,在**项目设置**下选择**网络隔离**。项目网络隔离默认关闭。 + +2. 要启用项目网络隔离,请点击**开启**。 + + {{< notice note >}} + + 当网络隔离开启时,默认放行出站流量,而不同项目的进站流量将被拒绝。若您添加出站网络策略,只有符合策略的流量才会被放行。 + + {{}} + +3. 您也可以在这个页面关闭网络隔离。 + + {{< notice note >}} + + 关闭网络隔离时,先前创建的所有网络策略都将被删除。 + + {{}} + +## 设置网络策略 + +若开启网络隔离后的默认策略无法满足您的需求,您可以自定义网络策略来满足您的需求。目前,您可以在 KubeSphere 中为集群内部的流量或来自集群外部的入站流量添加自定义网络策略。 + +### 集群内部的流量 + +集群内部项目层级的网络策略用于控制同一集群内的其他项目是否能访问该项目中的资源,以及您能访问哪些服务 (Service)。 + +假设在另一个项目 `demo-project-2` 中已创建一个 NGINX 部署 (Deployment) 工作负载,并通过 `nginx` 服务使用 `TCP` 协议在 `80` 端口进行暴露。下面是如何设置入站和出站流量规则的示例。 + +{{< notice note >}} + +有关如何创建工作负载的更多信息,请分别参见[部署](../../project-user-guide/application-workloads/deployments/)和[服务](../../project-user-guide/application-workloads/services/)。 + +{{}} + +#### 放行来自不同项目的工作负载的入站流量 + +1. 在当前项目的**网络隔离**页面,选择**内部白名单**选项卡。 + +2. 点击**添加白名单条目**。 + +3. 在**流量方向**下选择**入站**。 + +4. 在**类型**下选择**项目**选项卡。 + +5. 选择 `demo-project-2` 项目。 + +6. 点击**确定**,然后您可以在白名单中看到该项目。 + + +{{< notice note >}} + +如果设置网络策略后仍无法访问该网络,您需要检查对等项目是否设置有相应的出站规则。 + +{{}} + +#### 放行前往不同项目的服务的出站流量 + +1. 在当前项目的**网络隔离**页面,选择**内部白名单**选项卡。 + +2. 点击**添加白名单条目**。 + +3. 在**流量方向**下选择**出站**。 + +4. 在**类型**下选择**服务**选项卡。 + +5. 在下拉列表中选择 `demo-project-2` 项目。 + +6. 选择允许接收出站流量的服务。在本例中,请选择 `nginx`。 + +7. 点击**确定**,然后您可以在白名单中看到该服务。 + + +{{< notice note >}} + +创建服务时,您必须确保该服务的选择器不为空。 + +{{}} + +### 集群外部的入站流量 + +KubeSphere 使用 CIDR 来区分对等方。假设当前项目中已创建一个 Tomcat 部署,并通过 `NodePort` 服务 `demo-service` 使用 `TCP` 协议在 `80` 端口进行暴露。要让 IP 地址为 `192.168.1.1` 的外部客户端访问该服务,您需要为其添加一个规则。 + +#### 放行来自集群外部客户端的入站流量 + +1. 在当前项目的**网络隔离**页面,选择**外部白名单**选项卡,然后点击**添加白名单条目**。 + +2. 在**流量方向**下选择**入站**。 + +3. 在 **网段** 中输入 `192.168.1.1/32`。 + +4. 选择 `TCP` 协议并输入 `80` 作为端口号。 + +5. 点击**确定**,然后您可以看到该规则已经添加。 + + +{{< notice note >}} + +建议在服务配置中将 `spec.externalTrafficPolicy` 设置为 `local`,以便数据包的源地址保持不变,即数据包的源地址就是客户端的源地址。 + +{{}} + +假设外部客户端的 IP 地址是 `http://10.1.0.1:80`,您需要为出站流量设置规则,以便内部服务可以访问它。 + +#### 放行前往集群外部服务的出站流量 + +1. 在当前项目的**网络隔离**页面,选择**外部白名单**选项卡,然后点击**添加白名单条目**。 + +2. 在**流量方向**下选择**出站**。 + +3. 在 **网段** 中输入 `10.1.0.1/32`。 + +4. 选择 `TCP` 协议并输入 `80` 作为端口号。 + +5. 点击**确定**,然后您可以看到该规则已经添加。 + + +{{< notice note >}} + +在步骤 4 中,若您选择 **SCTP**,请务必确保 SCTP [已启用](https://kubernetes.io/zh/docs/concepts/services-networking/network-policies/#sctp-支持)。 + +{{}} + +### 最佳做法 + +要确保一个项目中的所有 Pod 都安全,一个最佳做法是启用网络隔离。当网络隔离开启时,其他项目无法访问当前项目。如果需要让其他工作负载访问当前工作负载,您需要按照以下步骤操作: + +1. 在**项目设置**中设置[网关](../../project-administration/project-gateway/)。 +2. 通过服务将需要被访问的工作负载暴露给网关。 +3. 放行来自网关所在命名空间的入站流量。 + +如果出站流量受控,您需要对能够访问哪些项目、服务和 IP 地址有一个清晰的计划,并逐个添加规则。如果您不确定要制定什么规则,建议保持现有网络策略不变。 + +## 常见问题 + +问:开启网络隔离后,为什么 KubeSphere 自定义监控系统无法获取数据? + +答:您启用自定义监控后,KubeSphere 监控系统将访问 Pod 的指标。您需要放行来自 KubeSphere 监控系统的入站流量,否则无法访问 Pod 指标。 + +KubeSphere 提供 `allowedIngressNamespaces` 配置项来简化类似配置,在配置中列出的所有项目都会被放行。 + +```yaml +root@node1:~# kubectl get -n kubesphere-system clusterconfigurations.installer.kubesphere.io ks-installer -o yaml +apiVersion: installer.kubesphere.io/v1alpha1 +kind: ClusterConfiguration +metadata: + ... + name: ks-installer + namespace: kubesphere-system + ... +spec: + ... + networkpolicy: + enabled: true + nsnpOptions: + allowedIngressNamespaces: + - kubesphere-system + - kubesphere-monitoring-system + ... +``` + +问:通过服务 (Service) 设置网络策略后,为什么无法访问服务? + +答:若您添加网络策略后通过集群 IP 地址访问服务但网络不通,请检查 kube-proxy 配置中的 `masqueradeAll` 是否为 `false`。 + + ```yaml + root@node1:~# kubectl get cm -n kube-system kube-proxy -o yaml + apiVersion: v1 + data: + config.conf: |- + ... + iptables: + masqueradeAll: false + ... + ... + kind: ConfigMap + metadata: + ... + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system + ... + ``` + +问:设置入站规则时,如何确定 CIDR? + +答:在 Kubernetes 中,数据包的源 IP 地址通常由 NAT 处理,因此您需要确定数据包的源地址,然后再添加规则。有关更多信息,请参考 [Source IP](https://github.com/kubesphere/community/blob/master/sig-network/concepts-and-designs/kubesphere-network-policy.md#source-ip)。 diff --git a/content/zh/docs/v3.4/project-administration/role-and-member-management.md b/content/zh/docs/v3.4/project-administration/role-and-member-management.md new file mode 100644 index 000000000..f68a4f3db --- /dev/null +++ b/content/zh/docs/v3.4/project-administration/role-and-member-management.md @@ -0,0 +1,79 @@ +--- +title: "项目角色和成员管理" +keywords: 'KubeSphere, Kubernetes, 角色, 成员, 管理, 项目' +description: '了解如何进行项目访问管理。' +linkTitle: "项目角色和成员管理" +weight: 13200 +--- + +本教程演示如何在项目中管理角色和成员。在项目级别,您可以向角色授予以下模块中的权限: + +- **应用负载** +- **存储管理** +- **配置中心** +- **监控告警** +- **访问控制** +- **项目设置** + +## 准备工作 + +您需要至少创建一个项目(例如 `demo-project`)。此外,您还需要准备一个在项目级别具有 `admin` 角色的用户(例如 `project-admin`)。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../quick-start/create-workspace-and-project/)。 + +## 内置角色 + +**项目角色**页面列出了以下三个可用的内置角色。创建项目时,KubeSphere 会自动创建内置角色,并且内置角色无法进行编辑或删除。您只能查看内置角色的权限或将其分配给用户。 + + + + + + + + + + + + + + + + + + +
内置角色描述
viewer项目观察者,可以查看项目下所有的资源。
operator项目维护者,可以管理项目下除用户和角色之外的资源。
admin项目管理员,可以对项目下的所有资源执行所有操作。此角色可以完全控制项目下的所有资源。
+ +若要查看角色所含权限: + +1. 以 `project-admin` 身份登录控制台。在**项目角色**中,点击一个角色(例如,`admin`)以查看角色详情。 + +2. 点击**授权用户**选项卡,查看所有被授予该角色的用户。 + +## 创建项目角色 + +1. 转到**项目设置**下的**项目角色**。 + +2. 在**项目角色**中,点击**创建**并设置**角色标识符**(例如,`project-monitor`)。点击**编辑权限**继续。 + +3. 在弹出的窗口中,权限归类在不同的**功能模块**下。在本示例中,为该角色选择**应用负载**中的**应用负载查看**,以及**监控告警**中的**告警消息查看**和**告警策略查看**。点击**确定**完成操作。 + + {{< notice note >}} + +**依赖于**表示当前授权项依赖所列出的授权项,勾选该权限后系统会自动选上所有依赖权限。 + +{{}} + +4. 新创建的角色将在**项目角色**中列出,点击右侧的 以编辑该角色。 + + +## 邀请新成员 + +1. 转到**项目设置**下的**项目成员**,点击**邀请**。 + +2. 点击右侧的 以邀请一名成员加入项目,并为其分配一个角色。 + +3. 将成员加入项目后,点击**确定**。您可以在**项目成员**列表中查看新邀请的成员。 + +4. 若要编辑现有成员的角色或将其从项目中移除,点击右侧的 并选择对应的操作。 + + + diff --git a/content/zh/docs/v3.4/project-user-guide/_index.md b/content/zh/docs/v3.4/project-user-guide/_index.md new file mode 100644 index 000000000..b868e4620 --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/_index.md @@ -0,0 +1,12 @@ +--- +title: "项目用户指南" +description: "帮助您更好地管理 KubeSphere 项目中的资源" +layout: "second" + +linkTitle: "项目用户指南" +weight: 10000 + +icon: "/images/docs/v3.3/docs.svg" +--- + +在 KubeSphere 中,具有必要权限的项目用户能够执行一系列任务,例如创建各种工作负载,配置卷、密钥和 ConfigMap,设置各种发布策略,监控应用程序指标以及创建告警策略。由于 KubeSphere 具有极大的灵活性和兼容性,无需将任何代码植入到原生 Kubernetes 中,因此用户可以在测试、开发和生产环境快速上手 KubeSphere 的各种功能。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/project-user-guide/alerting/_index.md b/content/zh/docs/v3.4/project-user-guide/alerting/_index.md new file mode 100644 index 000000000..22c233ac3 --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/alerting/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "告警" +weight: 10700 + +_build: + render: false +--- diff --git a/content/zh/docs/v3.4/project-user-guide/alerting/alerting-message.md b/content/zh/docs/v3.4/project-user-guide/alerting/alerting-message.md new file mode 100644 index 000000000..0dbba710c --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/alerting/alerting-message.md @@ -0,0 +1,28 @@ +--- +title: "告警消息(工作负载级别)" +keywords: 'KubeSphere, Kubernetes, 工作负载, 告警, 消息, 通知' +description: '了解如何查看工作负载的告警策略。' + +linkTitle: "告警消息(工作负载级别)" +weight: 10720 +--- + +告警消息中记录着按照告警规则触发的告警的详细信息。本教程演示如何查看工作负载级别的告警消息。 + +## 准备工作 + +* 您需要启用 [KubeSphere 告警系统](../../../pluggable-components/alerting/)。 +* 您需要创建一个企业空间、一个项目和一个用户 (`project-regular`)。该用户必须已邀请至该项目,并具有 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 +* 您需要创建一个工作负载级别的告警策略并且已经触发该告警。有关更多信息,请参考[告警策略(工作负载级别)](../alerting-policy/)。 + +## 查看告警消息 + +1. 使用 `project-regular` 帐户登录控制台并进入您的项目,导航到**监控告警**下的**告警消息**。 + +2. 在**告警消息**页面,可以看到列表中的全部告警消息。第一列显示您在告警通知中定义的标题和消息。如需查看某一告警消息的详情,点击该告警策略的名称,然后在显示的页面中点击**告警历史**选项卡。 + +3. 在**告警历史**选项卡,您可以看到告警级别、监控目标以及告警激活时间。 + +## 查看通知 + +如果需要接收告警通知(例如,邮件和 Slack 消息),则须先配置[一个通知渠道](../../../cluster-administration/platform-settings/notification-management/configure-email/)。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/project-user-guide/alerting/alerting-policy.md b/content/zh/docs/v3.4/project-user-guide/alerting/alerting-policy.md new file mode 100644 index 000000000..62390f8ac --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/alerting/alerting-policy.md @@ -0,0 +1,60 @@ +--- +title: "告警策略(工作负载级别)" +keywords: 'KubeSphere, Kubernetes, 工作负载, 告警, 策略, 通知' +description: '了解如何为工作负载设置告警策略。' +linkTitle: "告警策略(工作负载级别)" +weight: 10710 +--- + +KubeSphere 支持针对节点和工作负载的告警策略。本教程演示如何为项目中的工作负载创建告警策略。有关如何为节点配置告警策略,请参见[告警策略(节点级别)](../../../cluster-administration/cluster-wide-alerting-and-notification/alerting-policy/)。 + +## 准备工作 + +- 您需要启用 [KubeSphere 告警系统](../../../pluggable-components/alerting/)。 +- 若想接收告警通知,您需要预先配置一个[通知渠道](../../../cluster-administration/platform-settings/notification-management/configure-email/)。 +- 您需要创建一个企业空间、一个项目和一个用户(例如 `project-regular`)。该用户必须已邀请至该项目,并具有 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要确保项目中存在工作负载。如果项目中没有工作负载,请参见[部署并访问 Bookinfo](../../../quick-start/deploy-bookinfo-to-k8s/) 来创建示例应用。 + +## 创建告警策略 + +1. 以 `project-regular` 身份登录控制台并访问您的项目。导航到**监控告警**下的**告警策略**,点击**创建**。 + +2. 在弹出的对话框中,提供如下基本信息。点击**下一步**继续。 + + - **名称**:使用简明名称作为其唯一标识符,例如 `alert-demo`。 + - **别名**:帮助您更好地识别告警策略。 + - **描述信息**:对该告警策略的简要介绍。 + - **阈值时间(分钟)**:告警规则中设置的情形持续时间达到该阈值后,告警策略将变为触发中状态。 + - **告警级别**:提供的值包括**一般告警**、**重要告警**和**危险告警**,代表告警的严重程度。 + +3. 在**规则设置**选项卡,您可以使用规则模板或创建自定义规则。若想使用模板,请填写以下字段。 + + - **资源类型**:选择想要监控的资源类型,例如**部署**、**有状态副本集**或**守护进程集**。 + - **监控目标**:取决于您所选择的资源类型,目标可能有所不同。如果项目中没有工作负载,则无法看到任何监控目标。 + - **告警规则**:为告警策略定义规则。这些规则基于 Prometheus 表达式,满足条件时将会触发告警。您可以对 CPU、内存等对象进行监控。 + + {{< notice note >}} + + 您可以在**监控指标**字段输入表达式(支持自动补全),以使用 PromQL 创建自定义规则。有关更多信息,请参见 [Querying Prometheus](https://prometheus.io/docs/prometheus/latest/querying/basics/)。 + + {{}} + + 点击**下一步**继续。 + +4. 在**消息设置**选项卡,输入想要在包含在通知中的告警标题和消息,然后点击**创建**。 + +5. 告警策略刚创建后将显示为**未触发**状态;一旦满足规则表达式中的条件,则会首先达到**待触发**状态;满足告警条件的时间达到阈值时间后,将变为**触发中**状态。 + +## 编辑告警策略 + +若要在创建后编辑告警策略,点击**告警策略**页面右侧的 。 + +1. 点击下拉菜单中的**编辑**,按照创建时相同的步骤来编辑告警策略。点击**消息设置**页面的**确定**保存更改。 + +2. 点击下拉菜单中的**删除**来删除告警策略。 + +## 查看告警策略 + +在**告警策略**页面,点击任一告警策略来查看其详情,包括告警规则和告警历史。您还可以看到创建告警策略时基于所使用模板的告警规则表达式。 + +在**告警监控**下,**告警监控**图显示一段时间内的实际资源使用情况或使用量。**告警消息**显示您在通知中设置的自定义消息。 diff --git a/content/zh/docs/v3.4/project-user-guide/application-workloads/_index.md b/content/zh/docs/v3.4/project-user-guide/application-workloads/_index.md new file mode 100644 index 000000000..009f71bd9 --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/application-workloads/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "应用负载" +weight: 10200 + +_build: + render: false +--- diff --git a/content/zh/docs/v3.4/project-user-guide/application-workloads/container-image-settings.md b/content/zh/docs/v3.4/project-user-guide/application-workloads/container-image-settings.md new file mode 100644 index 000000000..9bbd013ce --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/application-workloads/container-image-settings.md @@ -0,0 +1,268 @@ +--- +title: "容器组设置" +keywords: 'KubeSphere, Kubernetes, 镜像, 工作负载, 设置, 容器' +description: '在为工作负载设置容器组时,详细了解仪表板上的不同属性。' + +weight: 10280 +--- + +创建部署 (Deployment)、有状态副本集 (StatefulSet) 或者守护进程集 (DaemonSet) 时,您需要指定一个容器组。同时,KubeSphere 向用户提供多种选项,用于自定义工作负载配置,例如健康检查探针、环境变量和启动命令。本页内容详细说明了**容器组设置**中的不同属性。 + +{{< notice tip >}} + +您可以在右上角启用**编辑 YAML**,查看仪表板上的属性对应到清单文件(YAML 格式)中的值。 + +{{}} + +## 容器组设置 + +### 容器组副本数量 + +点击 iconicon 图标设置容器组副本数量,该参数显示在清单文件中的 `.spec.replicas` 字段。该选项对守护进程集不可用。 + +如果您在多集群项目中创建部署,请在**副本调度模式**下选择一个副本调度模式: + +- **指定副本数量**:选择集群并设置每个集群的容器组副本数。 +- **指定权重**:选择集群,在**副本总数**中设置容器组副本总数,并指定每个集群的权重。容器组副本将根据权重成比例地调度到每个集群。若要在创建部署后修改权重,请点击部署名称前往其详情页,在**资源状态**页签下的**权重**区域修改权重。 + +如果您在多集群项目中创建有状态副本集,请在**容器组副本数量**下选择集群并设置每个集群的容器组副本数。 + +### 添加容器 + +点击**添加容器**来添加容器。 + +#### 镜像搜索栏 + +您可以点击右边的 icon,从列表中选择一个镜像,或者输入镜像名称进行搜索。KubeSphere 提供 Docker Hub 的镜像以及您的私有镜像仓库的镜像。如果想使用私有镜像仓库,您需要先在**配置**下的**保密字典**中创建镜像仓库保密字典。 + +{{< notice note >}} + +在搜索栏输入镜像名称后,请记得按键盘上的**回车键**。 + +{{}} + +#### 镜像标签 + +您可以输入一个标签,例如 `imagename:tag`。如果您不指定标签,则会默认为最新版本。 + +#### 容器名称 + +容器名称由 KubeSphere 自动创建,显示在 `.spec.containers.name` 中。 + +#### 容器类型 + +如果您选择**初始容器**,则会为该工作负载创建初始容器。有关初始容器的更多信息,请访问 [Init 容器](https://kubernetes.io/zh/docs/concepts/workloads/pods/init-containers/)。 + +#### 资源请求 + +容器预留的资源配额包括 CPU 和内存资源。这意味着容器独占这些资源,防止其他服务或进程因资源不足争夺资源而导致应用程序不可用。 + +- CPU 预留显示在清单文件中的 `.spec.containers[].resources.requests.cpu`,实际用量可以超过 CPU 预留。 +- 内存预留显示在清单文件中的 `.spec.containers[].resources.requests.memory`。实际用量可以超过内存预留,但节点内存不足时可能会清理容器。 + +#### 资源限制 + +您可以指定应用程序能使用的资源上限,包括 CPU、内存、GPU,防止占用过多资源。 + +- CPU 限制显示在清单文件中的 `.spec.containers[].resources.limits.cpu`。实际用量可以短时间超过 CPU 限制,容器不会被停止。 +- 内存限制显示在清单文件中的 `.spec.containers[].resources.limits.memory`。实际用量不能超过内存限制,如果超过了,容器可能会被停止或者被调度到其他资源充足的机器上。 + +{{< notice note >}} + +CPU 资源以 CPU 单位计量,即 KubeSphere 中的 **Core**。内存资源以字节计量,即 KubeSphere 中的 **MiB**。 + +{{}} + +要设置 **GPU 类型**,请在下拉列表中选择一个 GPU 类型,默认为 `nvidia.com/gpu`。**GPU 限制**默认为不限制。 + +#### **端口设置** + +您需要为容器设置访问协议和端口信息。请点击**使用默认端口**以自动填充默认设置。 + +#### **镜像拉取策略** + +该值显示在 `imagePullPolicy` 字段。在仪表板上,您可以从下拉列表的以下三个选项中选择一个。 + +- **优先使用本地镜像**:只有本地不存在镜像时才会拉取镜像。 + +- **每次都拉取镜像**:只要启动容器组就会拉取镜像。 + +- **仅使用本地镜像**:无论镜像是否存在都不会拉取镜像。 + +{{< notice tip>}} + +- 默认值是**优先使用本地镜像**,但标记为 `:latest` 的镜像的默认值是**每次都拉取镜像**。 +- Docker 会在拉取镜像时进行检查,如果 MD5 值没有变,则不会拉取镜像。 +- 在生产环境中应尽量避免使用 `:latest`,在开发环境中使用 `:latest` 会自动拉取最新的镜像。 + +{{< /notice >}} + +#### **健康检查** + +支持存活检查、就绪检查和启动检查。 + +- **存活检查**:使用存活探针检测容器是否在运行,该参数显示在 `livenessProbe` 字段。 + +- **就绪检查**:使用就绪探针检测容器是否准备好处理请求,该参数显示在 `readinessProbe` 字段。 + +- **启动检查**:使用启动探针检测容器应用程序是否已经启动,该参数显示在 `startupProbe` 字段。 + +存活、就绪和启动检查包含以下配置: + +- **HTTP 请求**:在容器 IP 地址的指定端口和路径上执行 HTTP `Get` 请求,如果响应状态码大于等于 200 且小于 400,则认为诊断成功。支持的参数包括: + + - **路径**:HTTP 或 HTTPS,由 `scheme` 指定;访问 HTTP 服务器的路径,由 `path` 指定;访问端口或端口名由容器暴露,端口号必须在 1 和 65535 之间,该值由 `port` 指定。 + - **初始延迟(s)**:容器启动后,存活探针启动之前等待的秒数,由 `initialDelaySeconds` 指定。默认为 0。 + - **检查间隔(s)**:探测频率(以秒为单位),由 `periodSeconds` 指定。默认为 10,最小值为 1。 + - **超时时间(s)**:探针超时的秒数,由 `timeoutSeconds` 指定。默认为 1,最小值为 1。 + - **成功阈值**:探测失败后,视为探测成功的最小连续成功次数,由 `successThreshold` 指定。默认为 1,存活探针和启动探针的该值必须为 1。最小值为 1。 + - **失败阈值**:探测成功后,视为探测失败的最小连续失败次数,由 `failureThreshold` 指定。默认为 3,最小值为 1。 + +- **TCP 端口**:在容器 IP 地址的指定端口上执行 TCP 检查。如果该端口打开,则认为诊断成功。支持的参数包括: + + - **端口**:访问端口或端口名由容器暴露。端口号必须在 1 和 65535 之间。该值由 `port` 指定。 + - **初始延迟(s)**:容器启动后,存活探针启动之前等待的秒数,由 `initialDelaySeconds` 指定。默认为 0。 + - **检查间隔(s)**:探测频率(以秒为单位),由 `periodSeconds` 指定。默认为 10,最小值为 1。 + - **超时时间(s)**:探针超时的秒数,由 `timeoutSeconds` 指定。默认为 1,最小值为 1。 + - **成功阈值**:探测失败后,视为探测成功的最小连续成功次数,由 `successThreshold` 指定。默认为 1,存活探针和启动探针的该值必须为 1。最小值为 1。 + - **失败阈值**:探测成功后,视为探测失败的最小连续失败次数,由 `failureThreshold` 指定。默认为 3,最小值为 1。 + +- **命令**:在容器中执行指定命令。如果命令退出时返回代码为 0,则认为诊断成功。支持的参数包括: + + - **命令**:用于检测容器健康状态的检测命令,由 `exec.command` 指定。 + - **初始延迟(s)**:容器启动后,存活探针启动之前等待的秒数,由 `initialDelaySeconds` 指定。默认为 0。 + - **检查间隔(s)**:探测频率(以秒为单位),由 `periodSeconds` 指定。默认为 10,最小值为 1。 + - **超时时间(s)**:探针超时的秒数,由 `timeoutSeconds` 指定。默认为 1,最小值为 1。 + - **成功阈值**:探测失败后,视为探测成功的最小连续成功次数,由 `successThreshold` 指定。默认为 1,存活探针和启动探针的该值必须为 1。最小值为 1。 + - **失败阈值**:探测成功后,视为探测失败的最小连续失败次数,由 `failureThreshold` 指定。默认为 3,最小值为 1。 + +有关健康检查的更多信息,请访问[容器探针](https://kubernetes.io/zh/docs/concepts/workloads/pods/pod-lifecycle/#container-probes)。 + +#### **启动命令** + +默认情况下,容器会运行默认镜像命令。 + +- **命令**对应清单文件中容器的 `command` 字段。 +- **参数**对应清单文件中容器的 `args` 字段。 + +有关该命令的更多信息,请访问[为容器设置启动时要执行的命令和参数](https://kubernetes.io/zh/docs/tasks/inject-data-application/define-command-argument-container/)。 + +#### **环境变量** + +以键值对形式为容器组配置环境变量。 + +- 名称:环境变量的名称,由 `env.name` 指定。 +- 值:变量引用的值,由 `env.value` 指定。 +- 点击**使用配置字典或保密字典**来使用现有的配置字典或保密字典。 + +有关该命令的更多信息,请访问[容器组变量](https://kubernetes.io/zh/docs/tasks/inject-data-application/environment-variable-expose-pod-information/)。 + +#### **容器安全上下文** + +安全上下文(Security Context)定义容器组或容器的特权和访问控制设置。有关安全上下文的更多信息,请访问 [容器组安全策略](https://kubernetes.io/docs/concepts/security/pod-security-policy/)。 + +#### **同步主机时区** + +同步后,容器的时区将和主机的时区一致。 + +## **更新策略** + +### 容器组更新 + +不同工作负载使用不同的更新策略。 + +{{< tabs >}} + +{{< tab "部署" >}} + +`.spec.strategy` 字段指定用于用新容器组替换旧容器组的策略。`.spec.strategy.type` 可以是 `Recreate` 或 `RollingUpdate`。默认值是 `RollingUpdate`。 + +- **滚动更新(推荐)** + + 滚动更新将逐步用新版本的实例替换旧版本的实例。升级过程中,流量会同时负载均衡分布到新老版本的实例上,因此服务不会中断。 + +- **同时更新** + + 替换升级会先删除现有的容器组,再创建新的容器组。请注意,升级过程中服务会中断。 + +有关升级策略的更多信息,请访问[部署的策略部分](https://kubernetes.io/zh/docs/concepts/workloads/controllers/deployment/#strategy)。 + +{{}} + +{{< tab "有状态副本集" >}} + +**更新策略**下的下拉菜单显示在清单文件中有状态副本集的 `.spec.updateStrategy` 字段。您可以处理容器组容器、标签、资源预留或限制以及注解的更新。有两种策略: + +- **滚动更新(推荐)** + + 如果 `.spec.template` 已更新,有状态副本集中的容器组将被自动删除,并创建新的容器组来替换。容器组将按照反向顺序更新,依次删除和创建。前一个容器组更新完成并开始运行后,才会开始更新下一个新的容器组。 + +- **删除容器组时更新** + + 如果 `.spec.template` 已更新,有状态副本集中的容器组将不会自动更新。您需要手动删除旧的容器组,控制器才会创建新的容器组。 + +有关更新策略的更多信息,请访问[有状态副本集更新策略](https://kubernetes.io/zh/docs/concepts/workloads/controllers/statefulset/#update-strategies)。 + +{{}} + +{{< tab "守护进程集" >}} + +**更新策略**下的下拉菜单显示在清单文件中守护进程集的 `.spec.updateStrategy` 字段。您可以处理容器组容器、标签、资源预留或限制以及注解的更新。有两种策略: + +- **滚动更新(推荐)** + + 如果 `.spec.template` 已更新,旧的守护进程集容器组将被终止,并以受控方式自动创建新的容器组。整个更新过程中,每个节点上至多只有一个守护进程集的容器组运行。 + +- **删除容器组时更新** + + 如果 `.spec.template` 已更新,只有当您手动删除旧的守护进程集容器组时才会创建新的守护进程集容器组。这与 1.5 或之前版本 Kubernetes 中的守护进程集的操作行为相同。 + +有关更新策略的更多信息,请访问[守护进程集更新策略](https://kubernetes.io/zh/docs/tasks/manage-daemon/update-daemon-set/#daemonset-%E6%9B%B4%E6%96%B0%E7%AD%96%E7%95%A5)。 + +{{}} + +{{}} + +### 滚动更新设置 + +{{< tabs >}} + +{{< tab "部署" >}} + +部署中的**滚动更新设置**与有状态副本集中的不同。 + +- **最大不可用容器组数量**:升级过程中允许不可用的容器组的最大数量,由 `maxUnavailable` 指定。默认值是 25%。 +- **最大多余容器组数量**:可调度的超过期望数量的容器组的最大数量,由 `maxSurge` 指定。默认值是 25%。 + +{{}} + +{{< tab "有状态副本集" >}} + +**容器组副本分组序号**:如果您对更新进行分区,当更新有状态副本集的容器组配置时,所有序号大于等于该分区序号值的容器组都会被更新。该字段由 `.spec.updateStrategy.rollingUpdate.partition` 指定,默认值是 0。有关分区的更多信息,请访问[分区](https://kubernetes.io/zh/docs/concepts/workloads/controllers/statefulset/#partitions)。 + +{{}} + +{{< tab "守护进程集" >}} + +守护进程集中的**滚动更新设置**与有状态副本集中的不同。 + +- **最大不可用容器组数量**:升级过程中允许不可用的容器组的最大数量,由 `maxUnavailable` 指定。默认值是 20%。 +- **容器组就绪最短运行时长(s)**:新创建的守护进程集的容器组被视为可用之前的最少秒数,由 `minReadySeconds` 指定。默认值是 0。 + +{{}} + +{{}} + +### 容器组安全上下文 + +安全上下文(Security Context)定义容器组或容器的特权和访问控制设置。有关容器组安全上下文的更多信息,请访问[容器组安全策略](https://kubernetes.io/zh/docs/concepts/policy/pod-security-policy/)。 + +### 容器组调度规则 + +您可以选择不同的容器组调度规则,切换容器组间亲和与容器组间反亲和。在 Kubernetes 中,容器组间亲和由 `affinity` 字段下的 `podAffinity` 字段指定,而容器组间反亲和由 `affinity` 字段下的 `podAntiAffinity` 字段指定。在 KubeSphere 中,`podAffinity` 和 `podAntiAffinity` 都设置为 `preferredDuringSchedulingIgnoredDuringExecution`。您可以在右上角启用**编辑 YAML**查看字段详情, + +- **分散调度**代表反亲和性。 +- **集中调度**代表亲和性。 +- **自定义规则**即按需添加自定义调度规则。 + +有关亲和性和反亲和性的更多信息,请访问 [容器组亲和性](https://kubernetes.io/zh/docs/concepts/scheduling-eviction/assign-pod-node/#pod-%E9%97%B4%E4%BA%B2%E5%92%8C%E4%B8%8E%E5%8F%8D%E4%BA%B2%E5%92%8C)。 diff --git a/content/zh/docs/v3.4/project-user-guide/application-workloads/cronjobs.md b/content/zh/docs/v3.4/project-user-guide/application-workloads/cronjobs.md new file mode 100644 index 000000000..fc1551fbb --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/application-workloads/cronjobs.md @@ -0,0 +1,104 @@ +--- +title: "定时任务" +keywords: "KubeSphere, Kubernetes, 任务, 定时任务" +description: "了解定时任务的基本概念以及如何在 KubeSphere 中创建定时任务。" +linkTitle: "定时任务" + +weight: 10260 +--- + +定时任务 (CronJob) 对于创建周期性和重复性任务非常有用,例如运行备份或发送电子邮件。定时任务还可以在特定时间或间隔执行单个任务,例如在集群可能处于空闲状态时执行任务。 + +有关更多信息,请参见 [Kubernetes 官方文档](https://kubernetes.io/zh/docs/concepts/workloads/controllers/cron-jobs/)。 + +## 准备工作 + +您需要创建一个企业空间、一个项目以及一个用户 (`project-regular`)。必须邀请该用户至该项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## 创建定时任务 + +### 步骤 1:打开仪表板 + +以 `project-regular` 身份登录控制台。转到项目的**任务**页面,然后在**定时任务**选项卡下点击**创建**。 + +### 步骤 2:输入基本信息 + +您可以参考下面的说明在每个字段中输入基本信息。完成操作后,点击**下一步**。 + +- **名称**:定时任务的名称,也是唯一标识符。 +- **别名**:定时任务的别名,使资源易于识别。 +- **定时计划**:按照给定的时间计划运行任务。语法参照 [CRON](https://zh.wikipedia.org/wiki/Cron)。KubeSphere 中提供了一些预置 CRON 语句以简化输入。该字段由 `.spec.schedule` 指定。对于此定时任务,输入 `*/1 * * * *`,这意味着它每分钟运行一次。 + + | 类型 | CRON | + | ----------- | ----------- | + | 每小时 | `0 * * * *` | + | 每天 | `0 0 * * *` | + | 每周 | `0 0 * * 0` | + | 每月 | `0 0 1 * *` | + +- **高级设置**: + + - **最大启动延后时间(s)**:由清单文件中的 `.spec.startingDeadlineSeconds` 指定,此可选字段表示如果由于任何原因错过计划时间,定时任务启动所需的最大秒数。错过执行的定时任务将被计为失败。如果未指定此字段,则此定时任务没有启动期限。 + - **成功任务保留数量**:由清单文件中的 `.spec.successfulJobsHistoryLimit` 指定,此字段表示要保留的定时任务执行成功的次数,用于区分显式零和未指定这两种情况。默认值为 3。 + - **失败任务保留数量**:由清单文件中的 `.spec.failedJobsHistoryLimit` 指定,此字段表示要保留的定时任务执行失败的次数,用于区分显式零和未指定这两种情况。默认值为 1。 + - **并发策略**:由 `.spec.concurrencyPolicy` 指定,它表示如何处理任务的并发执行: + - **同时运行任务** (默认值):允许定时任务并发运行。 + - **跳过新任务**:禁止并发运行,如果前一个运行还没有完成,则跳过下一个运行。 + - **跳过旧任务**:取消当前正在运行的任务,用一个新的来替换。 + +{{< notice note >}} + +您可以在右上角开启**编辑 YAML**,查看此定时任务的 YAML 格式清单文件。 + +{{}} + +### 步骤 3:定时任务设置(可选) + +请参考[任务](../jobs/#步骤-3策略设置可选)。 + +### 步骤 4:设置容器组 + +1. 点击**容器**下的**添加容器镜像**,在搜索栏中输入 `busybox`,然后按**回车**键。 + +2. 向下滚动到**命令**然后在**参数**框中输入 `/bin/sh,-c,date; echo "KubeSphere!"`。 + +3. 点击 **√** 完成镜像设置,然后点击**下一步**继续。 + + {{< notice note >}} + +- 此示例定时任务输出 `KubeSphere`。有关设置镜像的更多信息,请参见[容器组设置](../container-image-settings/)。 +- 有关**重启策略**的更多信息,请参见[任务](../jobs/#步骤-4设置镜像)。 +- 您可以跳过本教程的**存储设置**和**高级设置**。有关更多信息,请参见部署一文中的[挂载持久卷](../deployments/#步骤-4挂载持久卷)和[配置高级设置](../deployments/#步骤-5配置高级设置)。 + + {{}} + +### 步骤 5:检查结果 + +1. 在最后一步**高级设置**中,点击**创建**完成操作。如果创建成功,定时任务列表中将添加一个新条目。此外,您还可以在**任务**选项卡下查看任务。 + +2. 在**定时任务**选项卡下,点击此定时任务,然后转到**任务记录**选项卡,您可以在其中查看每个执行记录的信息。由于**成功任务保留数量**字段设置为 3,因此这里显示定时任务成功执行 3 次。 + +3. 点击任意记录,您将转到该任务的详情页面。 + +4. 在**资源状态**中,您可以检查容器组状态。点击右侧的 icon,然后点击 icon 可以检查容器日志,如下所示,该日志显示预期输出。 + +## 定时任务操作 + +在定时任务详情页面上,您可以在创建定时任务之后对其进行管理。 + +- **编辑信息**:编辑基本信息,但无法编辑该定时任务的`名称`。 +- **暂停/启动**:暂停或启动该定时任务。暂停定时任务将告知控制器暂停后续执行任务,但已经启动的执行不受影响。 +- **编辑 YAML**:编辑该定时任务的 YAML 文件配置。 +- **删除**:删除该定时任务,然后返回定时任务列表页面。 + +### 任务记录 + +点击**任务记录**选项卡查看定时任务的执行记录。 + +### 元数据 + +点击**元数据**选项卡查看定时任务的标签和注解。 + +### 事件 + +点击**事件**选项卡查看定时任务的事件。 diff --git a/content/zh/docs/v3.4/project-user-guide/application-workloads/daemonsets.md b/content/zh/docs/v3.4/project-user-guide/application-workloads/daemonsets.md new file mode 100644 index 000000000..e93e39a2f --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/application-workloads/daemonsets.md @@ -0,0 +1,136 @@ +--- +title: "守护进程集" +keywords: 'KubeSphere, Kubernetes, 守护进程集, 工作负载' +description: '了解守护进程集的基本概念以及如何在 KubeSphere 中创建守护进程集。' +linkTitle: "守护进程集" + +weight: 10230 +--- + +守护进程集管理多组容器组副本,确保所有(或某些)节点运行一个容器组的副本。集群添加节点时,守护进程集会根据需要自动将容器组添加到新节点。 + +有关更多信息,请参见 [Kubernetes 官方文档](https://kubernetes.io/zh/docs/concepts/workloads/controllers/daemonset/)。 + +## 使用守护进程集 + +如果您想在所有节点或者没有用户干预的特定节点上部署持续运行的后台任务,守护进程集会非常有用。例如: + +- 在每个节点上运行日志收集守护进程,例如 Fluentd 和 Logstash 等。 +- 在每个节点上运行节点监控守护进程,例如 Prometheus Node Exporter、collectd 和 AppDynamics Agent 等。 +- 在每个节点上运行集群存储守护进程和系统程序,例如 Glusterd、Ceph、kube-dns 和 kube-proxy 等。 + +## 准备工作 + +您需要创建一个企业空间、一个项目和一个用户 (`project-regular`),务必邀请该用户到项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## 创建守护进程集 + +### 步骤 1:打开仪表板 + +以 `project-regular` 身份登录控制台。转到项目的**应用负载**,选择**工作负载**,点击**守护进程集**选项卡下面的**创建**。 + +### 步骤 2:输入基本信息 + +为该守护进程集指定一个名称(例如 `demo-daemonset`),选择项目,点击**下一步**继续。 + +### 步骤 3:设置容器组 + +1. 点击**添加容器**。 + +2. 输入镜像名称,该镜像可以来自公共 Docker Hub,也可以来自您指定的[私有仓库](../../../project-user-guide/configuration/image-registry/)。例如,在搜索栏输入 `fluentd` 然后按**回车键**。 + + {{< notice note >}} + +- 在搜索栏输入镜像名称后,请记得按键盘上的**回车键**。 +- 如果想使用您的私有镜像仓库,您应该先通过**配置**下面的**保密字典**[创建镜像仓库保密字典](../../../project-user-guide/configuration/image-registry/)。 + + {{}} + +3. 根据您的需求设置 CPU 和内存的资源请求和限制。有关更多信息,请参见[容器镜像设置中关于资源请求和资源限制的内容](../../../project-user-guide/application-workloads/container-image-settings/#添加容器镜像)。 + +4. 点击**使用默认端口**以自动填充**端口设置**,或者您可以自定义**协议**、**名称**和**容器端口**。 + +5. 在下拉菜单中选择镜像拉取策略。有关更多信息,请参见[容器镜像设置中关于镜像拉取策略的内容](../../../project-user-guide/application-workloads/container-image-settings/#添加容器镜像)。 + +6. 对于其他设置(**健康检查**、**启动命令**、**环境变量**、**容器安全上下文** 以及**同步主机时区**),您也可以在仪表板上配置它们。有关更多信息,请参见[容器组设置](../../../project-user-guide/application-workloads/container-image-settings/#添加容器镜像)中对这些属性的详细说明。操作完成后,点击右下角的 **√** 继续。 + +7. 在下拉菜单中选择更新策略。建议您选择**滚动更新**。有关更多信息,请参见[更新策略](../../../project-user-guide/application-workloads/container-image-settings/#更新策略)。 + +8. 选择容器组调度规则。有关更多信息,请参见[容器组调度规则](../../../project-user-guide/application-workloads/container-image-settings/#容器组调度规则)。 + +9. 完成容器组设置后,点击**下一步**继续。 + +### 步骤 4:挂载卷 + +您可以直接添加持久卷或者挂载配置字典或保密字典,或者直接点击**下一步**跳过该步骤。有关持久卷的更多信息,请访问[持久卷声明](../../../project-user-guide/storage/volumes/#挂载持久卷)。 + +{{< notice note >}} + +守护进程集无法使用持久卷声明模板,而有状态副本集可以使用。 + +{{}} + +### 步骤 5:配置高级设置 + +您可以在该部分添加元数据。完成操作后,点击**创建**完成创建守护进程集的整个流程。 + +- **添加元数据** + + 为资源进行额外的元数据设置,例如**标签**和**注解**。 + +## 查看守护进程集详情 + +### 详情页面 + +1. 守护进程集创建后会显示列表中。您可以点击右边的 icon,在弹出菜单中选择操作,修改您的守护进程集。 + + - **编辑信息**:查看并编辑基本信息。 + - **编辑 YAML**:查看、上传、下载或者更新 YAML 文件。 + - **重新创建**:重新创建该守护进程集。 + - **删除**:删除该守护进程集。 + +2. 点击守护进程集名称可以进入它的详情页面。 + +3. 点击**更多操作**,显示您可以对该守护进程集进行的操作。 + + - **回退**:选择要回退的版本。 + - **编辑设置**:配置更新策略、容器和存储。 + - **编辑 YAML**:查看、上传、下载或者更新 YAML 文件。 + - **重新创建**:重新创建该守护进程集。 + - **删除**:删除该守护进程集并返回守护进程集列表页面。 + +4. 点击**资源状态**选项卡,查看该守护进程集的端口和容器组信息。 + + - **副本运行状态**:您无法更改守护进程集的容器组副本数量。 + - **容器组** + + - 容器组列表中显示了容器组详情(运行状态、节点、容器组IP 以及资源使用情况)。 + - 您可以点击容器组条目查看容器信息。 + - 点击容器日志图标查看容器的输出日志。 + - 您可以点击容器组名称查看容器组详情页面。 + +### 版本记录 + +修改工作负载的资源模板后,会生成一个新的日志并重新调度容器组进行版本更新。默认保存 10 个最近的版本。您可以根据修改日志进行重新创建。 + +### 元数据 + +点击**元数据**选项卡以查看守护进程集的标签和注解。 + +### 监控 + +1. 点击**监控**选项卡以查看 CPU 使用量、内存使用量、网络流入速率和网络流出速率。 + +2. 点击右上角的下拉菜单以自定义时间范围和采样间隔。 + +3. 点击右上角的 icon/icon 以开始或停止自动刷新数据。 + +4. 点击右上角的 icon 以手动刷新数据。 + +### 环境变量 + +点击**环境变量**选项卡以查看守护进程集的环境变量。 + +### 事件 + +点击**事件**以查看守护进程集的事件。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/project-user-guide/application-workloads/deployments.md b/content/zh/docs/v3.4/project-user-guide/application-workloads/deployments.md new file mode 100644 index 000000000..beb4e12d7 --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/application-workloads/deployments.md @@ -0,0 +1,139 @@ +--- +title: "部署" +keywords: 'KubeSphere, Kubernetes, 部署, 工作负载' +description: '了解部署的基本概念以及如何在 KubeSphere 中创建部署。' +linkTitle: "部署" + +weight: 10210 +--- + +部署控制器为容器组和副本集提供声明式升级。您可以在部署对象中描述一个期望状态,部署控制器会以受控速率将实际状态变更为期望状态。一个部署运行着应用程序的几个副本,它会自动替换宕机或故障的实例。因此,部署能够确保应用实例可用,处理用户请求。 + +有关更多信息,请参见 [Kubernetes 官方文档](https://kubernetes.io/zh/docs/concepts/workloads/controllers/deployment/)。 + +## 准备工作 + +您需要创建一个企业空间、一个项目和一个用户 (`project-regular`),务必邀请该用户到项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## 创建部署 + +### 步骤 1:打开仪表板 + +以 `project-regular` 身份登录控制台。转到项目的**应用负载**,选择**工作负载**,点击**部署**选项卡下面的**创建**。 + +### 步骤 2:输入基本信息 + +为该部署指定一个名称(例如 `demo-deployment`),选择一个项目,点击**下一步**继续。 + +### 步骤 3:设置容器组 + +1. 设置镜像前,请点击**容器组副本数量**中的 iconicon 来定义容器组的副本数量,该参数显示在清单文件中的 `.spec.replicas` 字段。 + + {{< notice tip >}} +您可以启用右上角的**编辑 YAML**,查看 YAML 格式的部署清单文件。KubeSphere 使您可以直接编辑清单文件创建部署,或者您可以按照下列步骤使用仪表板创建部署。 + {{}} + +2. 点击**添加容器**。 + +3. 输入镜像名称,该镜像可以来自公共 Docker Hub,也可以来自您指定的[私有仓库](../../../project-user-guide/configuration/image-registry/)。例如,在搜索栏输入 `nginx` 然后按**回车键**。 + + {{< notice note >}} + +- 在搜索栏输入镜像名称后,请记得按键盘上的**回车键**。 +- 如果想使用您的私有镜像仓库,您应该先通过**配置**下面的**保密字典**[创建镜像仓库保密字典](../../../project-user-guide/configuration/image-registry/)。 + + {{}} + +4. 根据您的需求设置 CPU 和内存的资源请求和限制。有关更多信息,请参见[容器镜像设置中关于资源请求和资源限制的内容](../../../project-user-guide/application-workloads/container-image-settings/#添加容器镜像)。 + +5. 点击**使用默认端口**以自动填充**端口设置**,或者您可以自定义**协议**、**名称**和**容器端口**。 + +6. 在下拉列表中选择镜像拉取策略。有关更多信息,请参见[容器镜像设置中关于镜像拉取策略的内容](../../../project-user-guide/application-workloads/container-image-settings/#添加容器镜像)。 + +7. 对于其他设置(**健康检查**、**启动命令**、**环境变量**、**容器安全上下文** 以及**同步主机时区**),您也可以在仪表板上配置它们。有关更多信息,请参见[容器组设置](../../../project-user-guide/application-workloads/container-image-settings/#添加容器镜像)中对这些属性的详细说明。操作完成后,点击右下角的 **√** 继续。 + +8. 在下拉菜单中选择更新策略。建议您选择**滚动更新**。有关更多信息,请参见[更新策略](../../../project-user-guide/application-workloads/container-image-settings/#更新策略)。 + +9. 选择容器组调度规则。有关更多信息,请参见[容器组调度规则](../../../project-user-guide/application-workloads/container-image-settings/#容器组调度规则)。 + +10. 完成容器组设置后,点击**下一步**继续。 + +### 步骤 4:挂载持久卷 + +您可以直接添加持久卷或者挂载配置字典或保密字典,或者直接点击**下一步**跳过该步骤。有关持久卷的更多信息,请访问[持久卷声明](../../../project-user-guide/storage/volumes/#挂载持久卷声明)。 + +{{< notice note >}} + +部署无法使用持久卷模板,而有状态副本集可以使用。 + +{{}} + +### 步骤 5:配置高级设置 + +您可以在该部分设置节点调度策略并添加元数据。完成操作后,点击**创建**完成创建部署的整个流程。 + +- **选择节点** + + 分配容器组副本在指定节点上运行。该参数在 `nodeSelector` 字段中指定。 + +- **添加元数据** + + 为资源进行额外的元数据设置,例如**标签**和**注解**。 + +## 查看部署详情 + +### 详情页面 + +1. 部署创建后会显示在列表中。您可以点击右边的 icon,在弹出菜单中选择操作,修改您的部署。 + + - **编辑信息**:查看并编辑基本信息。 + - **编辑 YAML**:查看、上传、下载或者更新 YAML 文件。 + - **重新创建**:重新创建该部署。 + - **删除**:删除该部署。 + +2. 点击部署名称可以进入它的详情页面。 + +3. 点击**更多操作**,显示您可以对该部署进行的操作。 + + - **回退**:选择要回退的版本。 + - **编辑自动扩缩**:根据 CPU 和内存使用情况自动伸缩副本。如果 CPU 和内存都已指定,则在满足任一条件时会添加或删除副本。 + - **编辑设置**:配置更新策略、容器和存储。 + - **编辑 YAML**:查看、上传、下载或者更新 YAML 文件。 + - **重新创建**:重新创建该部署。 + - **删除**:删除该部署并返回部署列表页面。 + +4. 点击**资源状态**选项卡,查看该部署的端口和容器组信息。 + + - **副本运行状态**:点击 iconicon 来增加或减少容器组副本数量。 + - **容器组** + + - 容器组列表中显示了容器组详情(运行状态、节点、容器组 IP 以及资源使用情况)。 + - 您可以点击容器组条目查看容器信息。 + - 点击容器日志图标查看容器的输出日志。 + - 您可以点击容器组名称查看容器组详情页面。 + +### 版本记录 + +修改工作负载的资源模板后,会生成一个新的日志并重新调度容器组进行版本更新。默认保存 10 个最近的版本。您可以根据修改日志进行重新部署。 + +### 元数据 + +点击**元数据**选项卡以查看部署的标签和注解。 + +### 监控 + +1. 点击**监控**选项卡以查看部署的 CPU 使用量、内存使用量、网络流出速率和网络流入速率。 + +2. 点击右上角的下拉菜单以自定义时间范围和采样间隔。 + +3. 点击右上角的 / 以开始或停止数据自动刷新。 + +4. 点击右上角的 以手动刷新数据。 + +### 环境变量 + +点击**环境变量**选项卡以查看部署的环境变量。 + +### 事件 + +点击**事件**选项卡以查看部署的事件。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/project-user-guide/application-workloads/horizontal-pod-autoscaling.md b/content/zh/docs/v3.4/project-user-guide/application-workloads/horizontal-pod-autoscaling.md new file mode 100755 index 000000000..2f88f3a3d --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/application-workloads/horizontal-pod-autoscaling.md @@ -0,0 +1,103 @@ +--- +title: "容器组弹性伸缩" +keywords: "容器组, 弹性伸缩, 弹性伸缩程序" +description: "如何在 KubeSphere 上配置容器组弹性伸缩." +weight: 10290 + +--- + +本文档描述了如何在 KubeSphere 上配置容器组弹性伸缩 (HPA)。 + +HPA 功能会自动调整容器组的数量,将容器组的平均资源使用(CPU 和内存)保持在预设值附近。有关 HPA 功能的详细情况,请参见 [Kubernetes 官方文档](https://kubernetes.io/zh/docs/tasks/run-application/horizontal-pod-autoscale/)。 + +本文档使用基于 CPU 使用率的 HPA 作为示例,基于内存使用量的 HPA 操作与其相似。 + +## 准备工作 + +- 您需要[启用 Metrics Server](../../../pluggable-components/metrics-server/)。 +- 您需要创建一个企业空间、一个项目以及一个用户(例如,`project-regular`)。`project-regular` 必须被邀请至此项目中,并被赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## 创建服务 + +1. 以 `project-regular` 身份登录 KubeSphere 的 Web 控制台,然后访问您的项目。 + +2. 在左侧导航栏中选择**应用负载**下的**服务**,然后点击右侧的**创建**。 + +3. 在**创建服务**对话框中,点击**无状态服务**。 + +4. 设置服务名称(例如,`hpa`),然后点击**下一步**。 + +5. 点击**添加容器**,将**镜像**设置为 `mirrorgooglecontainers/hpa-example` 并点击**使用默认端口**。 + +6. 为每个容器设置 CPU 请求(例如,0.15 core),点击 **√**,然后点击**下一步**。 + + {{< notice note >}} + + * 若要使用基于 CPU 使用率的 HPA,就必须为每个容器设置 CPU 请求,即为每个容器预留的最低 CPU 资源(有关详细信息,请参见 [Kubernetes 官方文档](https://kubernetes.io/zh/docs/tasks/run-application/horizontal-pod-autoscale/))。HPA 功能会将容器组平均 CPU 使用率与容器组平均 CPU 请求的目标比率进行比较。 + * 若要使用基于内存使用量的 HPA,则不需要配置内存请求。 + + {{}} + +7. 点击**存储设置**选项卡上的**下一步**,然后点击**高级设置**选项卡上的**创建**。 + +## 配置 HPA + +1. 左侧导航栏上选择**工作负载**中的**部署**,然后点击右侧的 HPA 部署(例如,hpa-v1)。 + +2. 点击**更多操作**,从下拉菜单中选择**编辑自动扩缩**。 + +3. 在**自动伸缩**对话框中,配置 HPA 参数,然后点击**确定**。 + + * **目标 CPU 用量(%)**:容器组平均 CPU 请求的目标比率。 + * **目标内存用量(MiB)**:以 MiB 为单位的容器组平均内存目标使用量。 + * **最小副本数**:容器组的最小数量。 + * **最大副本数**:容器组的最大数量。 + + 在示例中,**目标 CPU 用量(%)**设置为 `60`,**最小副本数**设置为 `1`,**最大副本数**设置为 `10`。 + + {{< notice note >}} + + 当容器组的数量达到最大值时,请确保集群可以为所有容器组提供足够的资源。否则,一些容器组将创建失败。 + + {{}} + +## 验证 HPA + +本节使用将请求发送到 HPA 服务的部署,以验证 HPA 是否会自动调整容器组的数量来满足资源使用目标。 + +### 创建负载生成器部署 + +1. 在左侧导航栏中选择**应用负载**中的**工作负载**,然后点击右侧的**创建**。 + +2. 在**创建部署**对话框中,设置部署名称(例如,`load-generator`),然后点击**下一步**。 + +3. 点击**添加容器**,将**镜像**设置为 `busybox`。 + +4. 在对话框中向下滚动,选择**启动命令**,然后将**命令**设置为 `sh,-c`,将**参数**设置为 `while true; do wget -q -O- http://..svc.cluster.local; done`(例如,`while true; do wget -q -O- http://hpa.demo-project.svc.cluster.local; done`)。 + +5. 点击 **√**,然后点击**下一步**。 + +6. 点击**存储设置**选项卡上的**下一步**,然后点击**高级设置**选项卡上的**创建**。 + +### 查看 HPA 部署状态 + +1. 负载生成器部署创建好后,在左侧导航栏中选择**应用负载**下的**工作负载**,然后点击右侧的 HPA 部署(例如,hpa-v1)。页面中显示的容器组的数量会自动增加以满足资源使用目标。 + +2. 在左侧导航栏选择**应用负载**中的**工作负载**,点击负载生成器部署(例如,load-generator-v1)右侧的 icon,从下拉菜单中选择**删除**。负载生成器部署删除后,再次检查 HPA 部署的状态。容器组的数量会减少到最小值。 + +{{< notice note >}} + +系统可能需要一些时间来调整容器组的数量以及收集数据。 + +{{}} + +## 编辑 HPA 配置 + +您可以重复[配置 HPA](#配置-hpa) 中的步骤来编辑 HPA 配置。 + +## 取消 HPA + +1. 在左侧导航栏选择**应用负载**中的**工作负载**,点击右侧的 HPA 部署(例如,hpa-v1)。 + +2. 点击**自动伸缩**右侧的 icon,从下拉菜单中选择**取消**。 + diff --git a/content/zh/docs/v3.4/project-user-guide/application-workloads/jobs.md b/content/zh/docs/v3.4/project-user-guide/application-workloads/jobs.md new file mode 100644 index 000000000..15028da8d --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/application-workloads/jobs.md @@ -0,0 +1,163 @@ +--- +title: 任务 +keywords: "KubeSphere, Kubernetes, Docker, 任务" +description: "了解任务的基本概念以及如何在 KubeSphere 中创建任务。" +linkTitle: "任务" + +weight: 10250 +--- + +任务会创建一个或者多个容器组,并确保指定数量的容器组成功结束。随着容器组成功结束,任务跟踪记录成功结束的容器组数量。当达到指定的成功结束数量时,任务(即 Job)完成。删除任务的操作会清除其创建的全部容器组。 + +在简单的使用场景中,您可以创建一个任务对象,以便可靠地运行一个容器组直到结束。当第一个容器组故障或者被删除(例如因为节点硬件故障或者节点重启)时,任务对象会启动一个新的容器组。您也可以使用一个任务并行运行多个容器组。 + +下面的示例演示了在 KubeSphere 中创建任务的具体步骤,该任务会计算 π 到小数点后 2000 位。 + +## 准备工作 + +您需要创建一个企业空间、一个项目和一个用户 (`project-regular`),务必邀请该用户到项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## 创建任务 + +### 步骤 1:打开仪表板 + +以 `project-regular` 身份登录控制台。转到**应用负载**下的**任务**,点击**创建**。 + +### 步骤 2:输入基本信息 + +输入基本信息。参数解释如下: + +- **名称**:任务的名称,也是唯一标识符。 +- **别名**:任务的别名,使资源易于识别。 +- **描述信息**:任务的描述,简要介绍任务。 + +### 步骤 3:策略设置(可选) + +您可以在该步骤设置值,或点击**下一步**以使用默认值。有关每个字段的详细说明,请参考下表。 + +| 名称 | 定义 | 描述信息 | +| ---------------------- | ---------------------------- | ------------------------------------------------------------ | +| 最大重试次数 | `spec.backoffLimit` | 指定将该任务视为失败之前的重试次数。默认值为 6。 | +| 容器组完成数量 | `spec.completions` | 指定该任务应该运行至成功结束的容器组的期望数量。如果设置为 nil,则意味着任何容器组成功结束即标志着所有容器组成功结束,并且允许并行数为任何正数值。如果设置为 1,则意味着并行数限制为 1,并且该容器组成功结束标志着任务成功完成。有关更多信息,请参见 [Jobs](https://kubernetes.io/zh/docs/concepts/workloads/controllers/job/)。 | +| 并行容器组数量 | `spec.parallelism` | 指定该任务在任何给定时间应该运行的最大期望容器组数量。当剩余工作小于最大并行数时 ((`.spec.completions - .status.successful`) < `.spec.parallelism`),实际稳定运行的容器组数量会小于该值。有关更多信息,请参见 [Jobs](https://kubernetes.io/zh/docs/concepts/workloads/controllers/job/)。 | +| 最大运行时间(s) | `spec.activeDeadlineSeconds` | 指定该任务在系统尝试终止任务前处于运行状态的持续时间(相对于 stratTime),单位为秒;该值必须是正整数。 | + +### 步骤 4:设置容器组 + +1. **重启策略**选择**重新创建容器组**。当任务未完成时,您只能将**重启策略**指定为**重新创建容器组**或**重启容器**: + + - 如果将**重启策略**设置为**重新创建容器组**,当容器组发生故障时,任务将创建一个新的容器组,并且故障的容器组不会消失。 + + - 如果将**重启策略**设置为**重启容器**,当容器组发生故障时,任务会在内部重启容器,而不是创建新的容器组。 + +2. 点击**添加容器**,它将引导您进入**添加容器**页面。在镜像搜索栏中输入 `perl`,然后按**回车**键。 + +3. 在该页面向下滚动到**启动命令**。在命令框中输入以下命令,计算 pi 到小数点后 2000 位并输出结果。点击右下角的 **√**,然后选择**下一步**继续。 + + ```bash + perl,-Mbignum=bpi,-wle,print bpi(2000) + ``` + + {{< notice note >}}有关设置镜像的更多信息,请参见[容器组设置](../../../project-user-guide/application-workloads/container-image-settings/)。{{}} + +### 步骤 5:检查任务清单(可选) + +1. 在右上角启用**编辑 YAML**,显示任务的清单文件。您可以看到所有值都是根据先前步骤中指定的值而设置。 + + ```yaml + apiVersion: batch/v1 + kind: Job + metadata: + namespace: cc + labels: + app: job-test-1 + name: job-test-1 + annotations: + kubesphere.io/alias-name: Test + kubesphere.io/description: A job test + spec: + template: + metadata: + labels: + app: job-test-1 + annotations: + kubesphere.io/containerSecrets: null + spec: + containers: + - name: container-xv4p2o + imagePullPolicy: IfNotPresent + image: perl + command: + - perl + - '-Mbignum=bpi' + - '-wle' + - print bpi(2000) + restartPolicy: Never + serviceAccount: default + initContainers: [] + volumes: [] + imagePullSecrets: null + backoffLimit: 5 + parallelism: 2 + completions: 4 + activeDeadlineSeconds: 300 + ``` + +2. 您可以直接在清单文件中进行调整,然后点击**创建**,或者关闭**编辑 YAML**然后返回**创建任务**页面。 + + {{< notice note >}}您可以跳过本教程的**存储设置**和**高级设置**。有关更多信息,请参见[挂载持久卷](../../../project-user-guide/application-workloads/deployments/#步骤-4挂载持久卷)和[配置高级设置](../../../project-user-guide/application-workloads/deployments/#步骤-5配置高级设置)。{{}} + +### 步骤 6:检查结果 + +1. 在最后一步**高级设置**中,点击**创建**完成操作。如果创建成功,将添加新条目到任务列表中。 + +2. 点击此任务,然后转到**任务记录**选项卡,您可以在其中查看每个执行记录的信息。先前在步骤 3 中**完成数**设置为 `4`,因此有四个已结束的容器组。 + + {{< notice tip >}}如果任务失败,您可以重新运行该任务,失败原因显示在**消息**下。{{}} + +3. 在**资源状态**中,您可以查看容器组状态。先前将**并行容器组数量**设置为 2,因此每次会创建两个容器组。点击右侧的 icon,然后点击 icon 查看容器日志,该日志显示了预期的计算结果。 + + {{< notice tip >}} + +- 在**资源状态**中,容器组列表提供了容器组的详细信息(例如创建时间、节点、容器组 IP 和监控数据)。 +- 您可以点击容器组查看容器信息。 +- 点击容器日志图标查看容器的输出日志。 +- 您可以点击容器组名称查看容器组详情页面。 + + {{}} + +## 查看任务详情 + +### 任务操作 + +在任务详情页面上,您可以在任务创建后对其进行管理。 + +- **编辑信息**:编辑基本信息,但`名称`无法编辑。 +- **重新执行**:重新执行任务,容器组将重启,并生成新的执行记录。 +- **查看配置文件**:查看 YAML 格式的任务规格。 +- **删除**:删除该任务并返回到任务列表页面。 + +### 任务记录 + +1. 点击**任务记录**选项卡查看任务的执行记录。 + +2. 点击 刷新执行记录。 + +### 资源状态 + +1. 点击**资源状态**选项卡查看任务的容器组。 + +2. 点击 刷新容器组信息,点击 / 显示或隐藏每个容器组中的容器。 + +### 元数据 + +点击**元数据**选项卡查看任务的标签和注解。 + +### 环境变量 + +点击**环境变量**选项卡查看任务的环境变量。 + +### 事件 + +点击**事件**选项卡查看任务的事件。 + diff --git a/content/zh/docs/v3.4/project-user-guide/application-workloads/routes.md b/content/zh/docs/v3.4/project-user-guide/application-workloads/routes.md new file mode 100644 index 000000000..7d09c1690 --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/application-workloads/routes.md @@ -0,0 +1,132 @@ +--- +title: "应用路由" +keywords: "KubeSphere, Kubernetes, 路由, 应用路由" +description: "了解应用路由(即 Ingress)的基本概念以及如何在 KubeSphere 中创建应用路由。" +weight: 10270 +--- + +本文档介绍了如何在 KubeSphere 上创建、使用和编辑应用路由。 + +KubeSphere 上的应用路由和 Kubernetes 上的 [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/#what-is-ingress) 相同,您可以使用应用路由和单个 IP 地址来聚合和暴露多个服务。 + +## 准备工作 + +- 您需要创建一个企业空间、一个项目以及两个用户(例如,`project-admin` 和 `project-regular`)。在此项目中,`project-admin` 必须具有 `admin` 角色,`project-regular` 必须具有 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 +- 若要以 HTTPS 模式访问应用路由,则需要[创建保密证字典](../../../project-user-guide/configuration/secrets/)用于加密,密钥中需要包含 `tls.crt`(TLS 证书)和 `tls.key`(TLS 私钥)。 +- 您需要[创建至少一个服务](../../../project-user-guide/application-workloads/services/)。本文档使用演示服务作为示例,该服务会将容器组名称返回给外部请求。 + +## 配置应用路由访问方式 + +1. 以 `project-admin` 身份登录 KubeSphere 的 Web 控制台,然后访问您的项目。 + +2. 在左侧导航栏中选择**项目设置**下的**网关设置**,点击右侧的**开启网关**。 + +3. 在出现的对话框中,将**访问模式**设置为 **NodePort** 或 **LoadBalancer**,然后点击**确认**。 + + {{< notice note >}} + + 若将**访问模式**设置为 **LoadBalancer**,则可能需要根据插件用户指南在您的环境中启用负载均衡器插件。 + + {{}} + +## 创建应用路由 + +### 步骤 1:配置基本信息 + +1. 登出 KubeSphere 的 Web 控制台,以 `project-regular` 身份登录,并访问同一个项目。 + +2. 选择左侧导航栏**应用负载**中的**应用路由**,点击右侧的**创建**。 + +3. 在**基本信息**选项卡中,配置应用路由的基本信息,并点击**下一步**。 + * **名称**:应用路由的名称,用作此应用路由的唯一标识符。 + * **别名**:应用路由的别名。 + * **描述信息**:应用路由的描述信息。 + +### 步骤 2:配置路由规则 + +1. 在**路由规则**选项卡中,点击**添加路由规则**。 + +2. 选择一种模式来配置路由规则,点击 **√**,然后点击**下一步**。 + + * **自动生成**:KubeSphere 自动以`<服务名称>.<项目名称>.<网关地址>.nip.io` 格式生成域名,该域名由 [nip.io](https://nip.io/) 自动解析为网关地址。该模式仅支持 HTTP。 + + * **域名**:为应用路由设置域名。 + * **协议**:选择 `http` 或 `https`。如果选择了 `https`,则需要选择包含 `tls.crt`(TLS 证书)和 `tls.key`(TLS 私钥)的密钥用于加密。 + * **路径**:将每个服务映射到一条路径。您可以点击**添加**来添加多条路径。 + +### (可选)步骤 3:配置高级设置 + +1. 在**高级设置**选项卡,选择**添加元数据**。 + + 为应用路由配置注解和标签,并点击**创建**。 + + {{< notice note >}} + + 您可以使用注解来自定义应用路由的行为。有关更多信息,请参见 [Nginx Ingress controller 官方文档](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/)。 + + {{}} + +### 步骤 4:获取域名、服务路径和网关地址 + +1. 在左侧导航栏中选择**应用负载**中的**应用路由**,点击右侧的应用路由名称。 + +2. 在**规则**区域获取域名和服务路径以及网关地址。 + + * 如果[应用路由访问模式](#配置应用路由访问方式)设置为 NodePort,则会使用 Kubernetes 集群节点的 IP 地址作为网关地址,NodePort 位于域名之后。 + + * 如果[应用路由访问模式](#配置应用路由访问方式)设置为 LoadBalancer,则网关地址由负载均衡器插件指定。 + +## 配置域名解析 + +若在[配置路由规则](#步骤-2配置路由规则)中选择**自动生成**,则不需要配置域名解析,域名会自动由 [nip.io](https://nip.io/) 解析为网关地址。 + +若在[配置路由规则](#步骤-2配置路由规则)中选择**指定域名**,则需要在 DNS 服务器配置域名解析,或者在客户端机器上将`<路由网关地址> <路由域名>`添加到 `etc/hosts` 文件。 + +## 访问应用路由 + +### NodePort 访问模式 + +1. 登录连接到应用路由网关地址的客户端机器。 + +2. 使用`<路由域名>:/<服务路径>`地址访问应用路由的后端服务。 + +### LoadBalancer 访问方式 + +1. 登录连接到应用路由网关地址的客户端机器。 + +2. 使用`<路由域名>/<服务路径>`地址访问应用路由的后端服务。 + +{{< notice note >}} + +如果您需要使用 NodePort 或 LoadBalancer 从私有网络外部访问应用路由,具体取决于您的网络环境: + +* 您可能需要在基础设施环境中配置流量转发和防火墙规则,以便访问应用路由的网关地址和端口号。 +* 若在[配置路由规则](#步骤-2配置路由规则)中选择**自动生成**,则可能需要手动[编辑路由规则](#编辑路由规则)将路由域名中的网关地址改为您私有网络的外部 IP 地址。 +* 若在[配置路由规则](#步骤-2配置路由规则)中选择**指定域名**,则可能需要改变 DNS 服务器上或者客户端机器 `etc/hosts` 文件中的配置,以便将域名解析为您私有网络的外部 IP 地址。 + +{{}} + +## 查看应用路由详情 + +### 操作 + +1. 在左侧导航栏中选择**工作负载**中的**应用路由**,点击右侧的应用路由名称。 + +2. 点击**编辑信息**,或点击**更多操作**,从下拉菜单中选择一项操作。 + * **编辑信息**:编辑应用路由的基本信息,但无法编辑路由名称。 + * **编辑 YAML**:编辑应用路由的 YAML 配置文件。 + * **编辑路由规则**:编辑应用路由的规则。 + * **编辑注解**:编辑应用路由的注解。有关更多信息,请参见 [Nginx Ingress controller 官方文档](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/)。 + * **删除**:删除应用路由并返回应用路由列表页面。 + +### 资源状态 + +点击**资源状态**选项卡查看应用路由规则。 + +### 元数据 + +点击**元数据**选项卡查看应用路由的标签和注解。 + +### 事件 + +点击**事件**选项卡查看应用路由的事件。 diff --git a/content/zh/docs/v3.4/project-user-guide/application-workloads/services.md b/content/zh/docs/v3.4/project-user-guide/application-workloads/services.md new file mode 100644 index 000000000..71f37690a --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/application-workloads/services.md @@ -0,0 +1,191 @@ +--- +title: "服务" +keywords: 'KubeSphere, Kubernetes, 服务, 工作负载' +description: '了解服务的基本概念以及如何在 KubeSphere 中创建服务。' +linkTitle: "服务" +weight: 10240 +--- + +服务是一种抽象方法,它将运行在一组容器组上的应用程序暴露为网络服务。也就是说,服务将这些容器组的 Endpoint 组成一个单一资源,可以通过不同的方式访问该资源。 + +有了 Kubernetes,您无需修改应用程序来使用不熟悉的服务发现机制。Kubernetes 为容器组提供 IP 地址,为一组容器组提供一个单一 DNS 名称,并且可以在容器组之间进行负载均衡。 + +有关更多信息,请参见 [Kubernetes 官方文档](https://kubernetes.io/zh/docs/concepts/services-networking/service/)。 + +## 访问类型 + +- **虚拟 IP**:虚拟 IP 是基于集群生成的唯一 IP。集群内部可以通过该 IP 访问服务。此访问类型适用于大多数服务。此外,集群外部也可以通过 NodePort 和 LoadBalancer 访问服务。 + +- **Headless**:集群不为服务生成 IP 地址,在集群内通过服务的后端容器组 IP 直接访问服务。此访问类型适用于后端异构服务,例如需要区分 master 和 agent 的服务。 + +{{< notice tip>}} + +在 KubeSphere 中,创建有状态服务和无状态服务时会默认生成一个虚拟 IP。如果您想创建 Headless 服务,请使用 **YAML** 直接进行配置。 + +{{}} + +## 准备工作 + +您需要创建一个企业空间、一个项目和一个用户 (`project-regular`),务必邀请该用户到项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## 服务类型 + +KubeSphere 提供三种创建服务的基本方法:**无状态服务**、**有状态服务**和**外部服务**。另外,您还可以通过**自定义服务**下面的**指定工作负载**和**编辑 YAML** 来自定义服务。 + +- **无状态服务** + + 无状态服务是容器服务中最常用的服务类型。无状态服务定义容器组模板来控制容器组状态,包括滚动更新和回滚。您创建无状态服务时会同时创建**部署**工作负载。有关无状态服务的更多信息,请参见[部署](../../../project-user-guide/application-workloads/deployments/)。 + +- **有状态服务** + + 有状态服务用于管理有状态应用程序,确保有序且优雅的部署和扩缩,还提供稳定的持久化存储以及网络标识符。您创建有状态服务时会同时创建**有状态副本集**工作负载。有关有状态服务的更多信息,请参见[有状态副本集](../../../project-user-guide/application-workloads/statefulsets/)。 + +- **外部服务** + + 与无状态服务和有状态服务不同,外部服务将一个服务映射到一个 DNS 名称,而不是映射到选择器。您需要在**外部服务地址**字段中指定这些服务,该字段显示在 YAML 文件中的 `externalName`。 + +- **指定工作负载** + + 使用现有容器组创建服务。 + +- **编辑 YAML** + + 使用 YAML 直接创建服务。您可以将 YAML 配置文件上传至控制台,也可以从控制台下载 YAML 配置文件。 + + {{< notice tip>}} + +关键字 `annotations:kubesphere.io/serviceType` 的值可以定义为 `statelessservice`、`statefulservice`、`externalservice` 和 `None`。 + + {{}} + +## 创建无状态服务 + +### 步骤 1:打开仪表板 + +1. 在项目页面转到**应用负载**下的**服务**,点击**创建**。 + +2. 点击**无状态服务**。 + + {{< notice note >}} + +创建有状态服务的步骤和创建无状态服务的步骤基本相同。本示例仅使用创建无状态服务的过程来进行演示。 + + {{}} + +### 步骤 2:输入基本信息 + +1. 在弹出的对话框中,您可以看到字段**版本**已经预先填写了 `v1`。您需要输入服务的名称,例如 `demo-service`。完成后,点击**下一步**继续。 + + - **名称**:服务和部署的名称,也是唯一标识符。 + - **别名**:服务的别名,使资源更容易识别。 + - **版本**:只能包含小写字母和数字,最长 16 个字符。 + + {{< notice tip >}} + +**名称**的值用于两个配置中,一个是部署,另一个是服务。您可以启用右上角的**编辑 YAML**查看部署的清单文件以及服务的清单文件。下方是一个示例文件,供您参考。 + + {{}} + + ``` yaml + kind: Deployment + metadata: + labels: + version: v1 + app: xxx + name: xxx-v1 + spec: + selector: + matchLabels: + version: v1 + app: xxx + template: + metadata: + labels: + version: v1 + app: xxx + --- + kind: Service + metadata: + labels: + version: v1 + app: xxx + name: xxx + spec: + metadata: + labels: + version: v1 + app: xxx + ``` + +### 步骤 3:设置容器组 + +为服务添加容器镜像,详情请参见[设置容器组](../../../project-user-guide/application-workloads/deployments/#步骤-3设置容器组)。 + +{{< notice tip >}} + +有关仪表板上各项属性的详细说明,请直接参见[容器组设置](../../../project-user-guide/application-workloads/container-image-settings/)。 + +{{}} + +### 步骤 4:挂载持久卷 + +要为服务挂载持久卷,详情请参见[挂载持久卷](../../../project-user-guide/application-workloads/deployments/#步骤-4挂载持久卷)。 + +### 步骤 5:配置高级设置 + +您可以设置节点调度策略并添加元数据,具体操作与[部署](../../../project-user-guide/application-workloads/deployments/#步骤-5配置高级设置)中的说明相同。对于服务,您可以看到两个额外选项可用,即**外部访问**和**会话保持**。 + +- 外部访问 + + 您可以通过两种方法向外暴露服务,即 NodePort 和 LoadBalancer。 + + - **NodePort**:在每个节点的 IP 地址上通过静态端口暴露服务。 + + - **LoadBalancer**:客户端向负载均衡器的 IP 地址发送请求。 + + {{< notice note >}} + +该值由 `.spec.type` 字段指定。如果您选择 **LoadBalancer**,则需要同时为它添加注解。 + + {{}} + +- 会话保持 + + 您可能想把从单个客户端会话发送的所有流量都路由到跨多个副本运行的应用的同一实例。这种做法降低了延迟,因此能更好地利用缓存。负载均衡的这种行为称为“会话保持 (Sticky Session)”。 + + 您可以在该字段设置最大会话保持时间,由清单文件中的 `.spec.sessionAffinityConfig.clientIP.timeoutSeconds` 指定,默认为 10800。 + +## 查看服务详情 + +### 详情页面 + +1. 创建服务后,您可以点击右侧的 icon 进一步编辑它,例如元数据(**名称**无法编辑)、配置文件、端口以及外部访问。 + + - **编辑信息**:查看和编辑基本信息。 + - **编辑 YAML**:查看、上传、下载或者更新 YAML 文件。 + - **编辑服务**:查看访问类型并设置选择器和端口。 + - **编辑外部访问**:编辑服务的外部访问方法。 + - **删除**:当您删除服务时,会在弹出的对话框中显示关联资源。如果您勾选这些关联资源,则会与服务一同删除。 + +2. 点击服务名称可以转到它的详情页面。 + + - 点击**更多操作**展开下拉菜单,菜单内容与服务列表中的下拉菜单相同。 + - 容器组列表提供容器组的详细信息(运行状态、节点、容器组IP 以及资源使用情况)。 + - 您可以点击容器组条目查看容器信息。 + - 点击容器日志图标查看容器的输出日志。 + - 您可以点击容器组名称来查看容器组详情页面。 + +### 资源状态 + +1. 点击**资源状态**选项卡以查看服务端口、工作负载和容器组信息。 + +2. 在**容器组**区域,点击 icon 以刷新容器组信息,点击 / 以显示或隐藏每个容器组中的容器。 + +### 元数据 + +点击**元数据**选项卡以查看服务的标签和注解。 + +### 事件 + +点击**事件**选项卡以查看服务的事件。 + diff --git a/content/zh/docs/v3.4/project-user-guide/application-workloads/statefulsets.md b/content/zh/docs/v3.4/project-user-guide/application-workloads/statefulsets.md new file mode 100644 index 000000000..ec90a5d6b --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/application-workloads/statefulsets.md @@ -0,0 +1,148 @@ +--- +title: "有状态副本集" +keywords: 'KubeSphere, Kubernetes, 有状态副本集, 仪表板, 服务' +description: '了解有状态副本集的基本概念以及如何在 KubeSphere 中创建有状态副本集。' +linkTitle: "有状态副本集" + +weight: 10220 +--- + +有状态副本集是用于管理有状态应用的工作负载 API 对象,负责一组容器组的部署和扩缩,并保证这些容器组的顺序性和唯一性。 + +与部署类似,有状态副本集管理基于相同容器规范的容器组。与部署不同的是,有状态副本集为其每个容器组维护一个粘性身份。这些容器组根据相同的规范而创建,但不能相互替换:每个容器组都有一个持久的标识符,无论容器组如何调度,该标识符均保持不变。 + +如果您想使用持久卷为工作负载提供持久化存储,可以使用有状态副本集作为解决方案的一部分。尽管有状态副本集中的单个容器组容易出现故障,但持久的容器组标识符可以更容易地将现有持久卷匹配到替换任意故障容器组的新容器组。 + +对于需要满足以下一个或多个需求的应用程序来说,有状态副本集非常有用。 + +- 稳定的、唯一的网络标识符。 +- 稳定的、持久的存储。 +- 有序的、优雅的部署和扩缩。 +- 有序的、自动的滚动更新。 + +有关更多信息,请参见 [Kubernetes 官方文档](https://kubernetes.io/zh/docs/concepts/workloads/controllers/statefulset/)。 + +## 准备工作 + +您需要创建一个企业空间、一个项目以及一个用户 (`project-regular`),务必邀请该用户到项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## 创建有状态副本集 + +在 KubeSphere 中,创建有状态副本集时也会创建 **Headless** 服务。您可以在项目的**应用负载**下的[服务](../services/)中找到 Headless 服务。 + +### 步骤 1:打开仪表板 + +以 `project-regular` 身份登录控制台。转到项目的**应用负载**,选择**工作负载**,然后在**有状态副本集**选项卡下点击**创建**。 + +### 步骤 2:输入基本信息 + +为有状态副本集指定一个名称(例如 `demo-stateful`),选择项目,然后点击**下一步**继续。 + +### 步骤 3:设置容器组 + +1. 设置镜像前,请点击**容器组副本数量**中的 iconicon 来定义容器组的副本数量,该参数显示在清单文件中的 `.spec.replicas` 字段。 + + {{< notice tip >}} + +您可以启用右上角的**编辑 YAML**,查看 YAML 格式的有状态副本集清单文件。KubeSphere 使您可以直接编辑清单文件创建有状态副本集,或者您可以按照下列步骤使用仪表板创建有状态副本集。 + + {{}} + +2. 点击**添加容器**。 + +3. 输入镜像名称,该镜像可以来自公共 Docker Hub,也可以来自您指定的[私有仓库](../../../project-user-guide/configuration/image-registry/)。例如,在搜索栏输入 `nginx` 然后按**回车键**。 + + {{< notice note >}} + +- 在搜索栏输入镜像名称后,请记得按键盘上的**回车键**。 +- 如果想使用您的私有镜像仓库,您应该先通过**配置**下面的**保密字典**[创建镜像仓库保密字典](../../../project-user-guide/configuration/image-registry/)。 + + {{}} + +4. 根据您的需求设置 CPU 和内存的资源请求和限制。有关更多信息,请参见[容器镜像设置中关于资源请求和资源限制的内容](../../../project-user-guide/application-workloads/container-image-settings/#添加容器镜像)。 + +5. 点击**使用默认端口**以自动填充**端口设置**,或者您可以自定义**协议**、**名称**和**容器端口**。 + +6. 从下拉列表中选择镜像拉取策略。有关更多信息,请参见[容器镜像设置中关于镜像拉取策略的内容](../../../project-user-guide/application-workloads/container-image-settings/#添加容器镜像)。 + +7. 对于其他设置(**健康检查**、**启动命令**、**环境变量**、**容器安全上下文** 以及**同步主机时区**),您也可以在仪表板上配置它们。有关更多信息,请参见[容器组设置](../../../project-user-guide/application-workloads/container-image-settings/#添加容器镜像)中对这些属性的详细说明。操作完成后,点击右下角的 **√** 继续。 + +8. 在下拉菜单中选择更新策略。建议您选择**滚动更新**。有关更多信息,请参见[更新策略](../container-image-settings/#更新策略)。 + +9. 选择容器组调度规则。有关更多信息,请参见[容器组调度规则](../../../project-user-guide/application-workloads/container-image-settings/#容器组调度规则)。 + +10. 完成容器组设置后,点击**下一步**继续。 + +### 步骤 4:挂载持久卷 + +有状态副本集可以使用持久卷模板,但是您必须提前在**存储**中创建它。有关持久卷的更多信息,请访问[持久卷](../../../project-user-guide/storage/volumes/#挂载持久卷)。完成后,点击**下一步**继续。 + +### 步骤 5:配置高级设置 + +您可以在此部分中设置节点调度策略并添加元数据。完成操作后,点击**创建**完成创建有状态副本集的整个流程。 + +- **选择节点** + + 分配容器组副本在指定节点上运行。该参数在 `nodeSelector` 字段中指定。 + +- **添加元数据** + + 为资源进行额外的元数据设置,例如**标签**和**注解**。 + +## 查看有状态副本集详情 + +### 详情页面 + +1. 有状态副本集创建后会显示列表中。您可以点击右边的 icon,在弹出菜单中选择操作,修改您的有状态副本集。 + + - **编辑信息**:查看并编辑基本信息。 + - **编辑 YAML**:查看、上传、下载或者更新 YAML 文件。 + - **重新创建**:重新创建该有状态副本集。 + - **删除**:删除该有状态副本集。 + +2. 点击有状态副本集名称可以进入它的详情页面。 + +3. 点击**更多操作**,显示您可以对该有状态副本集进行的操作。 + + - **回退**:选择要回退的版本。 + - **编辑服务**:设置端口来暴露容器镜像和服务端口。 + - **编辑设置**:配置更新策略、容器和存储。 + - **编辑 YAML**:查看、上传、下载或者更新 YAML 文件。 + - **重新创建**:重新创建该有状态副本集。 + - **删除**:删除该有状态副本集并返回有状态副本集列表页面。 + +4. 点击**资源状态**选项卡,查看该有状态副本集的端口和容器组信息。 + + - **副本运行状态**:点击 iconicon 来增加或减少容器组副本数量。 + - **容器组** + + - 容器组列表中显示了容器组详情(运行状态、节点、容器组IP 以及资源使用情况)。 + - 您可以点击容器组条目查看容器信息。 + - 点击容器日志图标查看容器的输出日志。 + - 您可以点击容器组名称查看容器组详情页面。 + +### 版本记录 + +修改工作负载的资源模板后,会生成一个新的日志并重新调度容器组进行版本更新。默认保存 10 个最近的版本。您可以根据修改日志进行重新创建。 + +### 元数据 + +点击**元数据**选项卡以查看有状态副本集的标签和注解。 + +### 监控 + +1. 点击**监控**选项卡以查看有状态副本集的 CPU 使用量、内存使用量、网络流出速率和网络流入速率。 + +2. 点击右上角的下拉菜单以自定义时间范围和采样间隔。 + +3. 点击右上角的 statefulsets_autorefresh_start/icon 以开始或停止自动刷新数据。 + +4. 点击右上角的 icon 以手动刷新数据。 + +### 环境变量 + +点击**环境变量**选项卡查看有状态副本集的环境变量。 + +### 事件 + +点击**事件**查看有状态副本集的事件。 diff --git a/content/zh/docs/v3.4/project-user-guide/application/_index.md b/content/zh/docs/v3.4/project-user-guide/application/_index.md new file mode 100644 index 000000000..017e041ba --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/application/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "应用程序" +weight: 10100 + +_build: + render: false +--- diff --git a/content/zh/docs/v3.4/project-user-guide/application/app-template.md b/content/zh/docs/v3.4/project-user-guide/application/app-template.md new file mode 100644 index 000000000..bd62a79bd --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/application/app-template.md @@ -0,0 +1,33 @@ +--- +title: "应用模板" +keywords: 'Kubernetes, Chart, Helm, KubeSphere, 应用程序, 仓库, 模板' +description: '了解应用模板的概念以及它们如何在企业内部帮助部署应用程序。' +linkTitle: "应用模板" +weight: 10110 +--- + +应用模板是用户上传、交付和管理应用的一种方式。一般来说,根据一个应用的功能以及与外部环境通信的方式,它可以由一个或多个 Kubernetes 工作负载(例如[部署](../../../project-user-guide/application-workloads/deployments/)、[有状态副本集](../../../project-user-guide/application-workloads/statefulsets/)和[守护进程集](../../../project-user-guide/application-workloads/daemonsets/))和[服务](../../../project-user-guide/application-workloads/services/)组成。作为应用模板上传的应用基于 [Helm](https://helm.sh/) 包构建。 + +## 应用模板的使用方式 + +您可以将 Helm Chart 交付至 KubeSphere 的公共仓库,或者导入私有应用仓库来提供应用模板。 + +KubeSphere 的公共仓库也称作应用商店,企业空间中的每位租户都能访问。[上传应用的 Helm Chart](../../../workspace-administration/upload-helm-based-application/) 后,您可以部署应用来测试它的功能,并提交审核。最终待应用审核通过后,您可以选择将它发布至应用商店。有关更多信息,请参见[应用程序生命周期管理](../../../application-store/app-lifecycle-management/)。 + +对于私有仓库,只有拥有必要权限的用户才能在企业空间中[添加私有仓库](../../../workspace-administration/app-repository/import-helm-repository/)。一般来说,私有仓库基于对象存储服务构建,例如 MinIO。这些私有仓库在导入 KubeSphere 后会充当应用程序池,提供应用模板。 + +{{< notice note >}} + +对于 KubeSphere 中[作为 Helm Chart 上传的单个应用](../../../workspace-administration/upload-helm-based-application/),待审核通过并发布后,会和内置应用一同显示在应用商店中。此外,当您从私有应用仓库中选择应用模板时,在下拉列表中也可以看到**当前企业空间**,其中存储了这些作为 Helm Chart 上传的单个应用。 + +{{}} + +KubeSphere 基于 [OpenPitrix](https://github.com/openpitrix/openpitrix)(一个[可插拔组件](../../../pluggable-components/app-store/))部署应用仓库服务。 + +## 为什么选用应用模板 + +应用模板使用户能够以可视化的方式部署并管理应用。对内,应用模板作为企业为团队内部协调和合作而创建的共享资源(例如,数据库、中间件和操作系统)发挥着重要作用。对外,应用模板设立了构建和交付的行业标准。在不同场景中,用户可以通过一键部署来利用应用模板满足他们的自身需求。 + +此外,KubeSphere 集成了 OpenPitrix 来提供应用程序全生命周期管理,平台上的 ISV、开发者和普通用户都可以参与到管理流程中。基于 KubeSphere 的多租户体系,每位租户只负责自己的部分,例如应用上传、应用审核、发布、测试以及版本管理。最终,企业可以通过自定义的标准来构建自己的应用商店并丰富应用程序池,同时也能以标准化的方式来交付应用。 + +有关如何使用应用模板的更多信息,请参见[使用应用模板部署应用](../../../project-user-guide/application/deploy-app-from-template/)。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/project-user-guide/application/compose-app.md b/content/zh/docs/v3.4/project-user-guide/application/compose-app.md new file mode 100644 index 000000000..01709fc9a --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/application/compose-app.md @@ -0,0 +1,96 @@ +--- +title: "构建基于微服务的应用" +keywords: 'KubeSphere, Kubernetes, 服务网格, 微服务' +description: '了解如何从零开始构建基于微服务的应用程序。' +linkTitle: "构建基于微服务的应用" +weight: 10140 +--- + +由于每个微服务都在处理应用的一部分功能,因此一个应用可以被划分为不同的组件。这些组件彼此独立,具有各自的职责和局限。在 KubeSphere 中,这类应用被称为**自制应用**,用户可以通过新创建的服务或者现有服务来构建自制应用。 + +本教程演示了如何创建基于微服务的应用 Bookinfo(包含四种服务),以及如何设置自定义域名以访问该应用。 + +## 准备工作 + +- 您需要为本教程创建一个企业空间、一个项目以及一个用户 (`project-regular`)。该用户需要被邀请至项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 +- `project-admin` 需要[设置项目网关](../../../project-administration/project-gateway/),以便 `project-regular` 能在创建应用时定义域名。 + +## 构建自制应用的微服务 + +1. 登录 KubeSphere 的 Web 控制台,导航到项目**应用负载**中的**应用**。在**自制应用**选项卡中,点击**创建**。 + +2. 设置应用名称(例如 `bookinfo`)并点击**下一步**。 + +3. 在**服务设置**页面,您需要构建自制应用的微服务。点击**创建服务**,选择**无状态服务**。 + +4. 设置服务名称(例如 `productpage`)并点击**下一步**。 + + {{< notice note >}} + + 您可以直接在面板上创建服务,或者启用右上角的**编辑 YAML**以编辑 YAML 文件。 + + {{}} + +5. 点击**容器**下的**添加容器**,在搜索栏中输入 `kubesphere/examples-bookinfo-productpage-v1:1.13.0` 以使用 Docker Hub 镜像。 + + {{< notice note >}} + + 输入镜像名称之后,必须按下键盘上的**回车**键。 + + {{}} + +6. 点击**使用默认端口**。有关更多镜像设置的信息,请参见[容器组设置](../../../project-user-guide/application-workloads/container-image-settings/)。点击右下角的 **√** 和**下一步**以继续操作。 + +7. 在**存储设置**页面,[添加持久卷声明](../../../project-user-guide/storage/volumes/)或点击**下一步**以继续操作。 + +8. 在**高级设置**页面,直接点击**创建**。 + +9. 同样,为该应用添加其他三个微服务。以下是相应的镜像信息: + + | 服务 | 名称 | 镜像 | + | ---------- | --------- | ------------------------------------------------ | + | 无状态服务 | `details` | `kubesphere/examples-bookinfo-details-v1:1.13.0` | + | 无状态服务 | `reviews` | `kubesphere/examples-bookinfo-reviews-v1:1.13.0` | + | 无状态服务 | `ratings` | `kubesphere/examples-bookinfo-ratings-v1:1.13.0` | + +10. 添加微服务完成后,点击**下一步**。 + +11. 在**路由设置**页面,点击**添加路由规则**。在**指定域名**选项卡中,为您的应用设置域名(例如 `demo.bookinfo`)并在**协议**字段选择 `HTTP`。在`路径`一栏,选择服务 `productpage` 以及端口 `9080`。点击**确定**以继续操作。 + + {{< notice note >}} + +若未设置项目网关,则无法看见**添加路由规则**按钮。 + +{{}} + +12. 您可以添加更多规则或点击**创建**以完成创建过程。 + +13. 等待应用达到**就绪**状态。 + + +## 访问应用 + +1. 在为应用设置域名时,您需要在 hosts (`/etc/hosts`) 文件中添加一个条目。 例如,添加如下所示的 IP 地址和主机名: + + ```txt + 192.168.0.9 demo.bookinfo + ``` + + {{< notice note >}} + + 您必须添加**自己的** IP 地址和主机名。 + + {{}} + +2. 在**自制应用**中,点击刚才创建的应用。 + +3. 在**资源状态**中,点击**路由**下的**访问服务**以访问该应用。 + + {{< notice note >}} + + 请确保在您的安全组中打开端口。 + + {{}} + +4. 分别点击 **Normal user** 和 **Test user** 以查看其他**服务**。 + diff --git a/content/zh/docs/v3.4/project-user-guide/application/deploy-app-from-appstore.md b/content/zh/docs/v3.4/project-user-guide/application/deploy-app-from-appstore.md new file mode 100644 index 000000000..3fe6a97b0 --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/application/deploy-app-from-appstore.md @@ -0,0 +1,62 @@ +--- +title: "从应用商店部署应用" +keywords: 'Kubernetes, Chart, Helm, KubeSphere, 应用, 应用商店' +description: '了解如何从应用商店中部署应用程序。' +linkTitle: "从应用商店部署应用" +weight: 10130 +--- + +[应用商店](../../../application-store/)是平台上的公共应用仓库。平台上的每个租户,无论属于哪个企业空间,都可以查看应用商店中的应用。应用商店包含 16 个精选的企业就绪的容器化应用,以及平台上不同企业空间的租户发布的应用。任何经过身份验证的用户都可以从应用商店部署应用。这与私有应用仓库不同,访问私有应用仓库的租户必须属于私有应用仓库所在的企业空间。 + +本教程演示如何从基于 [OpenPitrix](https://github.com/openpitrix/openpitrix) 的 KubeSphere 应用商店快速部署 [NGINX](https://www.nginx.com/),并通过 NodePort 访问其服务。 + +## 准备工作 + +- 您需要启用 [OpenPitrix (App Store)](../../../pluggable-components/app-store/)。 +- 您需要创建一个企业空间、一个项目和一个用户(例如 `project-regular`)。该用户必须被邀请至该项目,并具有 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## 动手实验 + +### 步骤 1:从应用商店部署 NGINX + +1. 以 `project-regular` 身份登录 KubeSphere Web 控制台,点击左上角的**应用商店**。 + + {{< notice note >}} + + 您也可以在您的项目中前往**应用负载**下的**应用**页面,点击**创建**,并选择**来自应用商店**进入应用商店。 + + {{}} + +2. 找到并点击 NGINX,在**应用信息**页面点击**安装**。请确保在**安装须知**对话框中点击**同意**。 + +3. 设置应用的名称和版本,确保 NGINX 部署的位置,点击**下一步**。 + +4. 在**应用设置**页面,设置应用部署的副本数,根据需要启用或禁用 Ingress,然后点击**安装**。 + + {{< notice note >}} + + 如需为 NGINX 设置更多的参数, 可点击 **YAML** 后的切换开关打开应用的 YAML 配置文件,并在配置文件中设置相关参数。 + + {{}} + +5. 等待应用创建完成并开始运行。 + +### 步骤 2:访问 NGINX + +要从集群外访问 NGINX,您需要先用 NodePort 暴露该应用。 + +1. 在已创建的项目中打开**服务**页面并点击 NGINX 的服务名称。 + +2. 在服务详情页面,点击**更多操作**,在下拉菜单中选择**编辑外部访问**。 + +3. 将**访问方式**设置为 **NodePort** 并点击**确定**。有关更多信息,请参见[项目网关](../../../project-administration/project-gateway/)。 + +4. 在**端口**区域查看暴露的端口。 + +5. 用 `:` 地址访问 NGINX。 + + {{< notice note >}} + + 取决于您的 Kubernetes 集群的部署位置,您可能需要在安全组中放行端口并配置相关的端口转发规则。 + + {{}} \ No newline at end of file diff --git a/content/zh/docs/v3.4/project-user-guide/application/deploy-app-from-template.md b/content/zh/docs/v3.4/project-user-guide/application/deploy-app-from-template.md new file mode 100644 index 000000000..fef0b82f2 --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/application/deploy-app-from-template.md @@ -0,0 +1,93 @@ +--- +title: "从应用模板部署应用" +keywords: 'Kubernetes, Chart, Helm, KubeSphere, 应用程序, 应用模板' +description: '了解如何使用基于 Helm 的模板部署应用程序。' +linkTitle: "从应用模板部署应用" + +weight: 10120 +--- + +部署应用时,您可选择使用应用商店。应用商店包含了 KubeSphere 的内置应用和[以 Helm Chart 形式上传的应用](../../../workspace-administration/upload-helm-based-application/)。此外,您还可以使用应用模板。应用模板可由添加至 KubeSphere 的私有应用仓库提供。 + +本教程演示如何使用私有应用仓库中的应用模板快速部署 [Grafana](https://grafana.com/)。该私有应用仓库基于 QingStor 对象存储。 + +## 准备工作 + +- 您需要启用 [OpenPitrix (App Store)](../../../pluggable-components/app-store/)。 +- 您需要先完成[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)教程。您必须创建一个企业空间、一个项目和两个用户(`ws-admin ` 和 `project-regular`)。`ws-admin` 必须被授予企业空间中的 `workspace-admin` 角色, `project-regular` 必须被授予项目中的 `operator` 角色。 + +## 动手实验 + +### 步骤 1:添加应用仓库 + +1. 以 `ws-admin` 用户登录 KubeSphere Web 控制台。在您的企业空间中,进入**应用管理**下的**应用仓库**页面,并点击**添加**。 + +2. 在弹出的对话框中,将应用仓库名称设置为 `test-repo`,将应用仓库的 URL 设置为 `https://charts.kubesphere.io/main`。点击**验证**对 URL 进行验证,根据您的需要设置**同步间隔**,再点击**确定**。 + +3. 应用仓库导入成功后会显示在列表中。 + + {{< notice note >}} + + 有关添加私有仓库时的更多参数信息,请参见[导入 Helm 仓库](../../../workspace-administration/app-repository/import-helm-repository/)。 + + {{}} + +### 步骤 2:从应用模板部署应用 + +1. 登出 KubeSphere 并以 `project-regular` 用户重新登录。在您的项目中,进入**应用负载**下的**应用**页面,再点击**创建**。 + +2. 在弹出的对话框中选择**从应用模板**。 + + **从应用商店**:选择内置的应用和以 Helm Chart 形式单独上传的应用。 + + **从应用模板**:从私有应用仓库和企业空间应用池选择应用。 + +3. 从下拉列表中选择之前添加的私有应用仓库 `test-repo`。 + + {{< notice note >}} + + 下拉列表中的**当前企业空间**选项表示企业空间应用池,包含以 Helm Chart 形式上传的应用。这些应用也属于应用模板。 + + {{}} + +4. 在搜索框中输入 `grafana` 找到该应用,点击搜索结果进行部署。 + + {{< notice note >}} + + 本教程使用的应用仓库与 Google Helm 仓库同步。由于其中的 Helm Chart 由不同的组织维护,部分应用可能无法部署成功。 + + {{}} + +5. 可以查看其应用信息和配置文件,在**版本**下拉列表中选择版本,然后点击**安装**。 + +6. 设置应用名称,确认应用版本和部署位置,点击**下一步**。 + +7. 在**应用设置**页面,手动编辑清单文件或直接点击**安装**。 + +8. 等待 Grafana 创建完成并开始运行。 + +### 步骤 3:暴露 Grafana 服务 + +要从集群外访问 Grafana,您需要先用 NodePort 暴露该应用。 + +1. 打开**服务**页面,点击 Grafana 的服务名称。 + +2. 点击**更多操作**,在下拉菜单中选择**编辑外部访问**。 + +3. 将**访问方式**设置为 **NodePort** 并点击**确定**。有关更多信息,请参见[项目网关](../../../project-administration/project-gateway/)。 + +4. 在**端口**区域查看暴露的端口。 + +### 步骤 4:访问 Grafana + +1. 您需要获取用户名和密码才能登录 Grafana 主页。前往**配置**下的**保密字典**页面,点击与应用名称相同的条目。 + +2. 在详情页面,点击眼睛图标查看用户名和密码。 + +3. 用 `:` 地址访问 Grafana。 + + {{< notice note >}} + + 取决于您的 Kubernetes 集群的部署位置,您可能需要在安全组中放行端口并配置相关的端口转发规则。 + + {{}} \ No newline at end of file diff --git a/content/zh/docs/v3.4/project-user-guide/configuration/_index.md b/content/zh/docs/v3.4/project-user-guide/configuration/_index.md new file mode 100644 index 000000000..388170555 --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/configuration/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "配置" +weight: 10400 + +_build: + render: false +--- diff --git a/content/zh/docs/v3.4/project-user-guide/configuration/configmaps.md b/content/zh/docs/v3.4/project-user-guide/configuration/configmaps.md new file mode 100644 index 000000000..768824948 --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/configuration/configmaps.md @@ -0,0 +1,71 @@ +--- +title: "配置字典" +keywords: 'KubeSphere, Kubernetes, ConfigMap' +description: '了解如何在 KubeSphere 中创建配置字典。' +linkTitle: "配置字典" +weight: 10420 +--- + +Kubernetes [配置字典(ConfigMap)](https://kubernetes.io/docs/concepts/configuration/configmap/) 以键值对的形式存储配置数据。配置字典资源可用于向容器组中注入配置数据。配置字典对象中存储的数据可以被 `ConfigMap` 类型的卷引用,并由容器组中运行的容器化应用使用。配置字典通常用于以下场景: + +- 设置环境变量的值。 +- 设置容器中的命令参数。 +- 在卷中创建配置文件。 + +本教程演示如何在 KubeSphere 中创建配置字典。 + +## 准备工作 + +您需要创建一个企业空间、一个项目和一个用户(例如 `project-regular`)。该用户必须已邀请至该项目,并具有 `operator` 角色。有关更多信息,请参阅[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## 创建配置字典 + +1. 以 `project-regular` 用户登录控制台并进入项目,在左侧导航栏中选择**配置**下的**配置字典**,然后点击**创建**。 + +2. 在弹出的对话框中,设置配置字典的名称(例如 `demo-configmap`),然后点击**下一步**。 + + {{< notice tip >}} + +您可以在对话框右上角启用**编辑 YAML** 来查看配置字典的 YAML 清单文件,并通过直接编辑清单文件来创建配置字典。您也可以继续执行后续步骤在控制台上创建配置字典。 + +{{}} + +3. 在**数据设置**选项卡,点击**添加数据**以配置键值对。 + +4. 输入一个键值对。下图为示例: + + {{< notice note >}} + +- 配置的键值对会显示在清单文件中的 `data` 字段下。 + +- 目前 KubeSphere 控制台只支持在配置字典中配置键值对。未来版本将会支持添加配置文件的路径来创建配置字典。 + +{{}} + +5. 点击对话框右下角的 **√** 以保存配置。您可以再次点击**添加数据**继续配置更多键值对。 +6. 点击**创建**以生成配置字典。 + +## 查看配置字典详情 + +1. 配置字典创建后会显示在**配置字典**页面。您可以点击右侧的 ,并从下拉菜单中选择操作来修改配置字典。 + + - **编辑**:查看和编辑基本信息。 + - **编辑 YAML**:查看、上传、下载或更新 YAML 文件。 + - **编辑设置**:修改配置字典键值对。 + - **删除**:删除配置字典。 + +2. 点击配置字典名称打开其详情页面。在**数据**选项卡,您可以查看配置字典的所有键值对。 + +3. 点击**更多操作**对配置字典进行其他操作。 + + - **编辑 YAML**:查看、上传、下载或更新 YAML 文件。 + - **编辑设置**:修改配置字典键值对。 + - **删除**:删除配置字典并返回配置字典列表页面。 + +4. 点击**编辑信息**来查看和编辑配置字典的基本信息。 + + +## 使用配置字典 + +在创建工作负载、[服务](../../../project-user-guide/application-workloads/services/)、[任务](../../../project-user-guide/application-workloads/jobs/)或[定时任务](../../../project-user-guide/application-workloads/cronjobs/)时,您可以用配置字典为容器添加环境变量。您可以在**添加容器**页面勾选**环境变量**,点击**引用配置字典或保密字典**,然后从下拉列表中选择一个配置字典。 + diff --git a/content/zh/docs/v3.4/project-user-guide/configuration/image-registry.md b/content/zh/docs/v3.4/project-user-guide/configuration/image-registry.md new file mode 100644 index 000000000..5bbd26352 --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/configuration/image-registry.md @@ -0,0 +1,104 @@ +--- +title: "镜像仓库" +keywords: 'KubeSphere, Kubernetes, Docker, 保密字典' +description: '了解如何在 KubeSphere 中创建镜像仓库。' +linkTitle: "镜像仓库" +weight: 10430 +--- + +Docker 镜像是一个只读的模板,可用于部署容器服务。每个镜像都有一个唯一标识符(即`镜像名称:标签`)。例如,一个镜像可以包含只安装有 Apache 和几个应用的完整的 Ubuntu 操作系统软件包。镜像仓库可用于存储和分发 Docker 镜像。 + +本教程演示如何为不同的镜像仓库创建保密字典。 + +## 准备工作 + +您需要创建一个企业空间、一个项目和一个用户(例如 `project-regular`)。该用户必须已邀请至该项目,并具有 `operator` 角色。有关更多信息,请参阅[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## 创建保密字典 + +创建工作负载、[服务](../../../project-user-guide/application-workloads/services/)、[任务](../../../project-user-guide/application-workloads/jobs/)或[定时任务](../../../project-user-guide/application-workloads/cronjobs/)时,除了从公共仓库选择镜像,您还可以从私有仓库选择镜像。要使用私有仓库中的镜像,您必须先为私有仓库创建保密字典,以便在 KubeSphere 中集成该私有仓库。 + +### 步骤 1:进入保密字典页面 + +以 `project-regular` 用户登录 KubeSphere Web 控制台并进入项目,在左侧导航栏中选择**配置**下的**保密字典**,然后点击**创建**。 + +### 步骤 2:配置基本信息 + +设置保密字典的名称(例如 `demo-registry-secret`),然后点击**下一步**。 + +{{< notice tip >}} + +您可以在对话框右上角启用**编辑 YAML** 来查看保密字典的 YAML 清单文件,并通过直接编辑清单文件来创建保密字典。您也可以继续执行后续步骤在控制台上创建保密字典。 + +{{}} + +### 步骤 3:配置镜像服务信息 + +将**类型**设置为 **镜像服务信息**。要在创建应用负载时使用私有仓库中的镜像,您需要配置以下字段: + +- **仓库地址**:镜像仓库的地址,其中包含创建应用负载时需要使用的镜像。 +- **用户名**:登录镜像仓库所需的用户名。 +- **密码**:登录镜像仓库所需的密码。 +- **邮箱**(可选):您的邮箱地址。 + +#### 添加 Docker Hub 仓库 + +1. 在 [Docker Hub](https://hub.docker.com/) 上添加镜像仓库之前,您需要注册一个 Docker Hub 帐户。在**保密字典设置**页面,将**仓库地址**设置为 `docker.io`,将**用户名**和**密码**分别设置为您的 Docker ID 和密码,然后点击**验证**以检查地址是否可用。 + +2. 点击**创建**。保密字典创建后会显示在**保密字典**界面。有关保密字典创建后如何编辑保密字典,请参阅[查看保密字典详情](../../../project-user-guide/configuration/secrets/#查看保密字典详情)。 + +#### 添加 Harbor 镜像仓库 + +[Harbor](https://goharbor.io/) 是一个开源的可信云原生仓库项目,用于对内容进行存储、签名和扫描。通过增加用户经常需要的功能,例如安全、身份验证和管理,Harbor 扩展了开源的 Docker Distribution。Harbor 使用 HTTP 和 HTTPS 为仓库请求提供服务。 + +**HTTP** + +1. 您需要修改集群中所有节点的 Docker 配置。例如,如果外部 Harbor 仓库的 IP 地址为 `http://192.168.0.99`,您需要在 `/etc/systemd/system/docker.service.d/docker-options.conf` 文件中增加 `--insecure-registry=192.168.0.99` 标签。 + + ```bash + [Service] + Environment="DOCKER_OPTS=--registry-mirror=https://registry.docker-cn.com --insecure-registry=10.233.0.0/18 --data-root=/var/lib/docker --log-opt max-size=50m --log-opt max-file=5 \ + --insecure-registry=192.168.0.99" + ``` + + {{< notice note >}} + + - 请将镜像仓库的地址替换成实际的地址。 + + - 有关 `Environment` 字段中的标签,请参阅 [Dockerd Options](https://docs.docker.com/engine/reference/commandline/dockerd/)。 + + - Docker 守护进程需要 `--insecure-registry` 标签才能与不安全的仓库通信。有关该标签的更多信息,请参阅 [Docker 官方文档](https://docs.docker.com/engine/reference/commandline/dockerd/#insecure-registries)。 + + {{}} + +2. 重新加载配置文件并重启 Docker。 + + ```bash + sudo systemctl daemon-reload + ``` + + ```bash + sudo systemctl restart docker + ``` + +3. 在 KubeSphere 控制台上进入创建保密字典的**数据设置**页面,将**类型**设置为**镜像服务信息**,将**仓库地址**设置为您的 Harbor IP 地址,并设置用户名和密码。 + + {{< notice note >}} + + 如需使用 Harbor 域名而非 IP 地址,您需要在集群中配置 CoreDNS 和 nodelocaldns。 + + {{}} + +4. 点击**创建**。保密字典创建后会显示在**保密字典**页面。有关保密字典创建后如何编辑保密字典,请参阅[查看保密字典详情](../../../project-user-guide/configuration/secrets/#查看保密字典详情)。 + +**HTTPS** + +有关如何集成基于 HTTPS 的 Harbor 仓库,请参阅 [Harbor 官方文档](https://goharbor.io/docs/1.10/install-config/configure-https/)。请确保您已使用 `docker login` 命令连接到您的 Harbor 仓库。 + +## 使用镜像仓库 + +如果您已提前创建了私有镜像仓库的保密字典,您可以选择私有镜像仓库中的镜像。例如,创建[部署](../../../project-user-guide/application-workloads/deployments/)时,您可以在**添加容器**页面点击**镜像**下拉列表选择一个仓库,然后输入镜像名称和标签使用镜像。 + +如果您使用 YAML 文件创建工作负载且需要使用私有镜像仓库,需要在本地 YAML 文件中手动添加 `kubesphere.io/imagepullsecrets` 字段,并且取值是 JSON 格式的字符串(其中 `key` 为容器名称,`value` 为保密字典名),以保证 `imagepullsecrets` 字段不被丢失,如下示例图所示。 + +![kubesphere-ecosystem](/images/docs/v3.3/project-user-guide/configurations/image-pull-secrets.png) diff --git a/content/zh/docs/v3.4/project-user-guide/configuration/secrets.md b/content/zh/docs/v3.4/project-user-guide/configuration/secrets.md new file mode 100644 index 000000000..45641802f --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/configuration/secrets.md @@ -0,0 +1,121 @@ +--- +title: "保密字典" +keywords: 'KubeSphere, Kubernetes, 保密字典' +description: '了解如何在 KubeSphere 中创建保密字典。' +linkTitle: "保密字典" +weight: 10410 +--- + +Kubernetes [保密字典 (Secret)](https://kubernetes.io/zh/docs/concepts/configuration/secret/) 可用于存储和管理密码、OAuth 令牌和 SSH 保密字典等敏感信息。容器组可以通过[三种方式](https://kubernetes.io/zh/docs/concepts/configuration/secret/#overview-of-secrets)使用保密字典: + +- 作为挂载到容器组中容器化应用上的卷中的文件。 +- 作为容器组中容器使用的环境变量。 +- 作为 kubelet 为容器组拉取镜像时的镜像仓库凭证。 + +本教程演示如何在 KubeSphere 中创建保密字典。 + +## 准备工作 + +您需要创建一个企业空间、一个项目和一个用户(例如 `project-regular`)。该用户必须已邀请至该项目,并具有 `operator` 角色。有关更多信息,请参阅[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## 创建保密字典 + +### 步骤 1:进入保密字典页面 + +以 `project-regular` 用户登录控制台并进入项目,在左侧导航栏中选择**配置**下的**保密字典**,然后点击**创建**。 + +### 步骤 2:配置基本信息 + +设置保密字典的名称(例如 `demo-secret`),然后点击**下一步**。 + +{{< notice tip >}} + +您可以在对话框右上角启用**编辑 YAML** 来查看保密字典的 YAML 清单文件,并通过直接编辑清单文件来创建保密字典。您也可以继续执行后续步骤在控制台上创建保密字典。 + +{{}} + +### 步骤 3:设置保密字典 + +1. 在**数据设置**选项卡,从**类型**下拉列表中选择保密字典类型。您可以在 KubeSphere 中创建以下保密字典,类型对应 YAML 文件中的 `type` 字段。 + + {{< notice note >}} + + 对于所有的保密字典类型,配置在清单文件中 `data` 字段的所有键值对的值都必须是 base64 编码的字符串。KubeSphere 会自动将您在控制台上配置的值转换成 base64 编码并保存到 YAML 文件中。例如,保密字典类型为**默认**时,如果您在**添加数据**页面将**键**和**值**分别设置为 `password` 和 `hello123`,YAML 文件中显示的实际值为 `aGVsbG8xMjM=`(即 `hello123` 的 base64 编码,由 KubeSphere 自动转换)。 + + {{}} + + - **默认**:对应 Kubernetes 的 [Opaque](https://kubernetes.io/zh/docs/concepts/configuration/secret/#opaque-secret) 保密字典类型,同时也是 Kubernetes 的默认保密字典类型。您可以用此类型保密字典创建任意自定义数据。点击**添加数据**为其添加键值对。 + + - **TLS 信息**:对应 Kubernetes 的 [kubernetes.io/tls](https://kubernetes.io/zh/docs/concepts/configuration/secret/#tls-secret) 保密字典类型,用于存储证书及其相关保密字典。这类数据通常用于 TLS 场景,例如提供给应用路由 (Ingress) 资源用于终结 TLS 链接。使用此类型的保密字典时,您必须为其指定**凭证**和**私钥**,分别对应 YAML 文件中的 `tls.crt` 和 `tls.key` 字段。 + + - **镜像服务信息**:对应 Kubernetes 的 [kubernetes.io/dockerconfigjson](https://kubernetes.io/zh/docs/concepts/configuration/secret/#docker-config-secrets) 保密字典类型,用于存储访问 Docker 镜像仓库所需的凭证。有关更多信息,请参阅[镜像仓库](../image-registry/)。 + + - **用户名和密码**:对应 Kubernetes 的 [kubernetes.io/basic-auth](https://kubernetes.io/zh/docs/concepts/configuration/secret/#basic-authentication-secret) 保密字典类型,用于存储基本身份认证所需的凭证。使用此类型的保密字典时,您必须为其指定**用户名**和**密码**,分别对应 YAML 文件中的 `username` 和 `password` 字段。 + +2. 本教程以默认类型为例。点击**添加数据**,将**键**设置为 `MYSQL_ROOT_PASSWORD` 并将**值**设置为 `123456`,为 MySQL 设置保密字典。 + +3. 点击对话框右下角的 **√** 以确认配置。您可以继续为保密字典添加键值对或点击**创建**完成操作。有关保密字典使用的更多信息,请参阅[创建并发布 WordPress](../../../quick-start/wordpress-deployment/#任务-3创建应用程序)。 + +## 查看保密字典详情 + +1. 保密字典创建后会显示在如图所示的列表中。您可以点击右边的 ,并从下拉菜单中选择操作来修改保密字典。 + + - **编辑信息**:查看和编辑基本信息。 + - **编辑 YAML**:查看、上传、下载或更新 YAML 文件。 + - **编辑设置**:修改保密字典键值对。 + - **删除**:删除保密字典。 + +2. 点击保密字典名称打开保密字典详情页面。在**数据**选项卡,您可以查看保密字典的所有键值对。 + + {{< notice note >}} + +如上文所述,KubeSphere 自动将键值对的值转换成对应的 base64 编码。您可以点击右边的 查看解码后的值。 + +{{}} + +3. 点击**更多操作**对保密字典进行其他操作。 + + - **编辑 YAML**:查看、上传、下载或更新 YAML 文件。 + - **编辑保密字典**:修改保密字典键值对。 + - **删除**:删除保密字典并返回保密字典列表页面。 + + +## 使用保密字典 + +通常情况下,在创建工作负载、[服务](../../../project-user-guide/application-workloads/services/)、[任务](../../../project-user-guide/application-workloads/jobs/)或[定时任务](../../../project-user-guide/application-workloads/cronjobs/)时,您需要使用保密字典。例如,您可以为代码仓库选择保密字典。有关更多信息,请参阅[镜像仓库](../image-registry/)。 + +此外,您还可以用保密字典为容器添加环境变量。您可以在**容器镜像**页面勾选**环境变量**,点击**引用配置文件或保密字典**,然后从下拉列表中选择一个保密字典。 + +## 创建常用保密字典 + +本节介绍如何为 Docker Hub 帐户和 GitHub 帐户创建保密字典。 + +### 创建 Docker Hub 保密字典 + +1. 以 `project-regular` 用户登录 KubeSphere 并进入您的项目。在左侧导航栏中选择**配置**下的**保密字典**,然后在页面右侧点击**创建**。 + +2. 设置保密字典名称(例如 `dockerhub-id`)并点击**下一步**。在**数据设置**页面,设置以下参数,然后点击**验证**以检查设置的信息是否有效。 + + **类型**:选择**镜像服务信息**。 + + **仓库地址**:输入您的 Docker Hub 仓库地址,例如 `docker.io`。 + + **用户名**:输入您的 Docker ID。 + + **密码**:输入您的 Docker Hub 密码。 + +3. 点击**创建**完成操作。 + +### 创建 GitHub 保密字典 + +1. 以 `project-regular` 用户登录 KubeSphere 并进入您的项目。在左侧导航栏中选择**配置**下的**保密字典**,然后在页面右侧点击**创建**。 + +2. 设置保密字典名称(例如 `github-id`)并点击**下一步**。在**数据设置**页面,设置以下参数。 + + **类型**:选择**用户名和密码**。 + + **用户名**:输入您的 GitHub 帐户。 + + **密码**:输入您的 GitHub 密码。 + +3. 点击**创建**完成操作。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/project-user-guide/configuration/serviceaccounts.md b/content/zh/docs/v3.4/project-user-guide/configuration/serviceaccounts.md new file mode 100644 index 000000000..42bab0ec1 --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/configuration/serviceaccounts.md @@ -0,0 +1,51 @@ +--- + +title: "服务帐户" +keywords: 'Kubesphere,Kubernetes,服务帐户' +description: '学习如何在 Kubesphere 上创建服务帐户。' +linkTitle: "服务帐户" +weight: 10440 +--- + +[服务帐户](https://kubernetes.io/zh/docs/tasks/configure-pod-container/configure-service-account/) 为 Pod 中运行的进程提供了标识。当用户访问集群时,API 服务器将用户认证为特定的用户帐户。当这些进程与 API 服务器联系时,Pod 里容器的进程将被验证为特定的服务帐户。 + +该文档描述了如何在 KubeSphere 上创建服务帐户。 + +## 前提条件 + +你已经创建一个企业空间、项目和用户(`project-regular`),将该用户邀请至创建的项目,并为其分配 `operator` 角色。有关更多信息,请参阅 [创建企业空间、项目、用户和平台角色](https://kubesphere.io/zh/docs/quick-start/create-workspace-and-project/)。 + +## 创建服务帐户 + +1. 以 `project-regular` 用户登录到 KubeSphere 控制台,点击**项目**。 + +1. 选择您想要创建服务账户的项目。 + +1. 在左侧导航栏,单击**配置** > **服务账户**。您会在**服务账户**页面看到一个名为 `default` 的服务帐户。该账户是在创建项目时自动创建的。 + + {{< notice note >}} + + 如果在项目中创建工作负载时未指定服务帐户,则将自动分配同一项目中的 `default`服务帐户。 + + {{}} + +2. 单击**创建**。在显示的**创建服务账户**对话框中,您可以设置以下参数: + +- **名称**(必填项):服务帐户的唯一标识符。 +- **别名**:服务帐户的别名,以帮助你更好地识别服务帐户。 +- **简介**:服务帐户简介。 +- **项目角色**:从服务帐户的下拉列表中选择一个项目角色。在一个项目中,不同的项目角色有[不同的权限](../../../project-administration/role-and-member-management/#built-in-roles)。 + +5. 完成参数设置后,单击**创建**。 + +## 查看服务帐户详情页 + +1. 在左侧导航栏,单击**配置** > **服务账户**。单击创建的服务帐户以转到其详细页。 + +2. 单击**编辑信息**以编辑服务账户基本信息,或单击**更多操作**执行下列任一操作: + - **编辑 YAML**:查看、更新或下载 YAML 文件。 + - **修改角色**:修改服务帐户的项目角色。 + - **删除**:删除服务帐户。 + +3. 在右侧的**资源状态**页签,查看服务账户的保密字典和 kubeconfig 详情。 + diff --git a/content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/_index.md b/content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/_index.md new file mode 100644 index 000000000..04b6a22ad --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "自定义应用监控" +weight: 10800 + +_build: + render: false +--- diff --git a/content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/examples/_index.md b/content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/examples/_index.md new file mode 100644 index 000000000..98fbf0bfe --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/examples/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "示例" +weight: 10811 + +_build: + render: false +--- diff --git a/content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/examples/monitor-mysql.md b/content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/examples/monitor-mysql.md new file mode 100644 index 000000000..cb0c6689c --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/examples/monitor-mysql.md @@ -0,0 +1,72 @@ +--- +title: "监控 MySQL" +keywords: '监控, Prometheus, MySQL, MySQL Exporter' +description: '部署 MySQL 和 MySQL Exporter 并为该应用创建监控面板。' +linkTitle: "监控 MySQL" +weight: 10812 +--- +通过[介绍](../../../../project-user-guide/custom-application-monitoring/introduction/#间接暴露)一文,您了解到无法直接将 Prometheus 指标接入 MySQL。若要以 Prometheus 格式暴露 MySQL 指标,您需要先部署 MySQL Exporter。 + +本教程演示如何监控 MySQL 指标并将其可视化。 + +## 准备工作 + +- 请确保已[启用应用商店](../../../../pluggable-components/app-store/)。MySQL 和 MySQL Exporter 将通过应用商店来部署。 +- 您需要创建一个企业空间、一个项目和一个用户 (`project-regular`)。该用户需要在该项目中具有 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../../quick-start/create-workspace-and-project/)。 + +## 步骤 1:部署 MySQL + +首先,请[从应用商店部署 MySQL](../../../../application-store/built-in-apps/mysql-app/)。 + +1. 前往您的项目,点击左上角的**应用商店**。 + +2. 点击 **MySQL** 进入其详情页面,点击**应用信息**选项卡中的**部署**。 + + {{< notice note >}} + +MySQL 是 KubeSphere 应用商店中的内置应用,应用商店启用后可以直接部署和使用 MySQL。 + +{{}} + +3. 在**基本信息**下,设置**名称**并选择**版本**。在**位置**下,选择要部署该应用的项目,然后点击**下一步**。 + +4. 在**应用设置**下,取消 `mysqlRootPassword` 字段的注解,并设置 root 密码,然后点击**安装**。 + +5. 等待 MySQL 启动并运行。 + +## 步骤 2:部署 MySQL Exporter + +您需要在同一个集群上的同一个项目中部署 MySQL Exporter。MySQL Exporter 负责查询 MySQL 状态并以 Prometheus 格式报告数据。 + +1. 前往**应用商店**,点击 **MySQL Exporter**。 + +2. 在详情页面,点击**安装**。 + +3. 在**基本信息**下,设置**名称**并选择**版本**。在**位置**下,选择要部署该应用的项目(须和部署 MySQL 的项目相同),然后点击**下一步**。 + +4. 请确保 `serviceMonitor.enabled` 设为 `true`。内置 MySQL Exporter 默认将其设置为 `true`,故您无需手动修改 `serviceMonitor.enabled`。 + + {{< notice warning >}} +如果您使用外部 Exporter 的 Helm Chart,请务必启用 ServiceMonitor CRD。此类 Chart 通常默认禁用 ServiceMonitor,需要手动修改。 + {{}} + +5. 修改 MySQL 连接参数。MySQL Exporter 需要连接到目标 MySQL。在本教程中,MySQL 以服务名 `mysql-dh3ily` 进行安装。在配置文件的 `mysql` 部分,将 `host` 设置为 `mysql-dh3ily`,`pass` 设置为 `testing`, `user` 设置为 `root`,如下所示。请注意,您 MySQL 服务的**名称可能不同**。编辑完成后,点击**安装**。 + +6. 等待 MySQL Exporter 启动并运行。 + +## 步骤 3:创建监控面板 + +您可以为 MySQL 创建监控面板,并将指标实时可视化。 + +1. 在同一项目中,选择侧边栏中**监控告警**下的**自定义监控**,点击**创建**。 + +2. 在弹出的对话框中,为监控面板设置名称(例如,`mysql-overview`)并选择 MySQL 模板。点击**下一步**继续。 + +3. 点击右上角的**保存模板**保存该模板。新创建的监控面板会显示在**自定义监控面板**页面。 + + {{< notice note >}} + +- 内置 MySQL 模板由 KubeSphere 提供,以便您监控 MySQL 的各项指标。您也可以按需在监控面板上添加更多指标。 + +- 有关监控面板上各属性的更多信息,请参见[可视化](../../../../project-user-guide/custom-application-monitoring/visualization/overview/)。 + {{}} \ No newline at end of file diff --git a/content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/examples/monitor-sample-web.md b/content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/examples/monitor-sample-web.md new file mode 100644 index 000000000..1e62377a5 --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/examples/monitor-sample-web.md @@ -0,0 +1,72 @@ +--- +title: "监控示例 Web 应用程序" +keywords: '监控, prometheus, prometheus operator' +description: '使用 Helm Chart 来部署示例 Web 应用程序并为该应用程序创建监控面板。' +linkTitle: "监控示例 Web 应用程序" +weight: 10813 +--- + +本教程演示如何监控示例 Web 应用程序。该应用程序在代码中已接入 Prometheus Go 客户端,因此可以直接暴露指标,无需使用导出器 (Exporter)。 + +## 准备工作 + +- 请确保[已启用 OpenPitrix 系统](../../../../pluggable-components/app-store/)。 +- 您需要创建一个企业空间、一个项目和一个用户。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../../quick-start/create-workspace-and-project/)。该用户需要是平台普通用户,邀请至该企业空间中并赋予 `self-provisioner` 角色。故请创建一个 `workspace-self-provisioner` 用户,赋予 `self-provisioner` 角色,并使用该用户创建一个项目(例如 `test`)。在本教程中,您以 `workspace-self-provisioner` 身份登录控制台,并在 `demo-workspace` 企业空间的 `test` 项目中进行操作。 + +- 了解 Helm Chart 和 [PromQL](https://prometheus.io/docs/prometheus/latest/querying/examples/)。 + +## 动手实验 + +### 步骤 1:准备示例 Web 应用程序的镜像 + +示例 Web 应用程序暴露一个名为 `myapp_processed_ops_total` 的用户定义指标。这是一个计数器型指标,计算已处理操作的数量。计数器每 2 秒自动增加 1。 + +该示例应用程序通过 Endpoint `http://localhost:2112/metrics` 暴露应用具体指标。 + +在本教程中,您可以使用现成的镜像 `kubespheredev/promethues-example-app`。源代码请见 [kubesphere/prometheus-example-app](https://github.com/kubesphere/prometheus-example-app)。您也可以按照 Prometheus 官方文档 [Instrument A Go Application For Prometheus](https://prometheus.io/docs/guides/go-application/) 进行操作。 + +### 步骤 2:将应用程序打包成 Helm Chart + +将部署、服务和 ServiceMonitor 的 YAML 模板打包成一个 Helm Chart 用来复用。在部署和服务模板中,您可以为指标 Endpoint 定义示例 Web 容器和端口。ServiceMonitor 是由 Prometheus Operator 定义和使用的自定义资源,它连接您的应用程序和 KubeSphere 监控引擎 (Prometheus),以便监控引擎知道抓取指标的位置和方式。KubeSphere 在未来发布版本中将提供易于操作的图形用户界面。 + +源代码请见 [kubesphere/prometheus-example-app](https://github.com/kubesphere/prometheus-example-app) 的 `helm` 文件夹。Helm Chart 包已准备完成并命名为 `prometheus-example-app-0.1.0.tgz`。请下载该 .tgz 文件,用于下面的步骤。 + +### 步骤 3:上传 Helm Chart + +1. 在 `demo-workspace` 企业空间的**概览**页面上转到**应用管理**下的**应用模板**。 + +2. 点击**创建**,上传 `prometheus-example-app-0.1.0.tgz`。 + +### 步骤 4:部署示例 Web 应用程序 + +您需要将示例 Web 应用程序部署至 `test` 项目,可以简单运行一个测试部署用于演示。 + +1. 点击 `prometheus-example-app`。 + +2. 展开菜单,点击**安装**。 + +3. 请确保将示例 Web 应用程序部署至 `test` 项目,点击**下一步**。 + +4. 请确保将 `serviceMonitor.enabled` 设置为 `true`,点击**安装**。 + +5. 在 `test` 项目的**工作负载**下,稍等片刻待示例 Web 应用程序启动并运行。 + +### 步骤 5:创建监控面板 + +该部分演示如何从零创建监控面板。您需要创建一个显示已处理操作总数的文本图表和一个显示操作率的折线图。 + +1. 转到**自定义监控**,点击**创建**。 + +2. 设置名称(例如 `sample-web`),点击**下一步**。 + +3. 在左上角输入标题(例如 `示例 Web 概览`)。 + +4. 点击左列的 icon,创建文本图表。 + +5. 在**监控指标**字段输入 PromQL 表达式 `myapp_processed_ops_total`,并设置图表名称(例如 `操作数`)。点击右下角的 **√** 继续。 + +6. 点击**添加监控项**,选择**折线图**,然后点击**确认**。 + +7. 在**监控指标**中输入 PromQL 表达式 `irate(myapp_processed_ops_total[3m])` 并将图表命名为 `操作率`。要改进外观,可以将**图例名称**设置为 `{{service}}`。它会用图例标签 `service` 的值命名每一段折线。然后将**精确位**设置为 `2`,以便将结果保留两位小数。点击右下角的 **√** 继续。 + +8. 点击**保存模板**进行保存。 diff --git a/content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/introduction.md b/content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/introduction.md new file mode 100644 index 000000000..927154800 --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/introduction.md @@ -0,0 +1,54 @@ +--- +title: "介绍" +keywords: '监控, Prometheus, Prometheus Operator' +description: '介绍 KubeSphere 自定义监控功能和指标暴露,包括暴露方法和 ServiceMonitor CRD。' + +linkTitle: "介绍" +weight: 10810 +--- + +您可以使用 KubeSphere 的自定义监控功能以可视化的形式监控自定义应用指标。应用可以是第三方应用,例如 MySQL、Redis 和 Elasticsearch,也可以是您自己的应用。本文介绍自定义监控功能的使用流程。 + +KubeSphere 的监控引擎基于 Prometheus 和 Prometheus Operator。总体而言,要在 KubeSphere 中集成自定义应用指标,您需要执行以下步骤: + +- 为您的应用[暴露 Prometheus 格式的指标](#步骤-1暴露-prometheus-格式的指标)。 +- [应用 ServiceMonitor CRD](#步骤-2应用-servicemonitor-crd) 将应用程序与监控目标挂钩。 +- [实现指标可视化](#步骤-3实现指标可视化)从而在监控面板上查看自定义指标趋势。 + +### 步骤 1:暴露 Prometheus 格式的指标 + +首先,您的应用必须暴露 Prometheus 格式的指标。Prometheus 暴露格式已经成为云原生监控领域事实上的标准格式。Prometheus 使用[基于文本的暴露格式](https://prometheus.io/docs/instrumenting/exposition_formats/)。取决于您的应用和使用场景,您可以采用以下两种方式暴露指标。 + +#### 直接暴露 + +直接暴露 Prometheus 格式的应用指标是云原生应用的常用方式。这种方式需要开发者在代码中导入 Prometheus 客户端库并在特定的端点 (Endpoint) 暴露指标。许多应用,例如 etcd、CoreDNS 和 Istio,都采用这种方式。 + +Prometheus 社区为大多数编程语言提供了客户端库。您可以在 [Prometheus Client Libraries](https://prometheus.io/docs/instrumenting/clientlibs/) 页面查看支持的语言。使用 Go 语言的开发者可参阅 [Instrumenting a Go Application for Prometheus](https://prometheus.io/docs/guides/go-application/) 了解如何编写符合 Prometheus 规范的应用程序。 + +[示例 Web 应用](../examples/monitor-sample-web/)演示了如何直接暴露 Prometheus 格式的应用指标。 + +#### 间接暴露 + +如果您不希望修改代码,或者由于应用由第三方提供您无法修改代码,您可以部署一个导出器作为代理,用于抓取指标数据并将其转换成 Prometheus 格式。 + +Prometheus 社区为大多数第三方应用,例如 MySQL,提供了生产就绪的导出器。您可以在 [Exporters and Integrations](https://prometheus.io/docs/instrumenting/exporters/) 页面查看可用的导出器。在 KubeSphere 中,建议[启用 OpenPitrix](../../../pluggable-components/app-store/) 并从应用商店部署导出器。应用商店中内置了面向 MySQL、Elasticsearch 和 Redis 的导出器。 + +请参阅[监控 MySQL](../examples/monitor-mysql/) 了解如何部署 MySQL 导出器并监控 MySQL 指标。 + +编写导出器与用 Prometheus 客户端库对应用进行监测类似,唯一的不同在于导出器需要连接应用并将应用指标转换成 Prometheus 格式。 + +### 步骤 2:应用 ServiceMonitor CRD + +在上一步,您已经在 Kubernetes Service 对象中暴露了指标端点。在此步骤中您需要将这些新变更告知 KubeSphere 监控引擎。 + +ServiceMonitor CRD 由 [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator) 定义。ServiceMonitor 包含指标端点的信息。KubeSphere 监控引擎通过 ServiceMonitor 对象获知从何处以及如何抓取指标。对于每一个监控目标,您需要应用一个 ServiceMonitor 对象以使应用程序(或导出器)与 KubeSphere 挂钩。 + +在 KubeSphere v3.0.0,您需要将 ServiceMonitor 和应用(或导出器)打包到 Helm Chart 中以便重复使用。在未来的版本中,KubeSphere 将提供图形化界面以方便操作。 + +请参阅[监控示例 Web 应用](../examples/monitor-sample-web/)了解如何打包 ServiceMonitor 和应用。 + +### 步骤 3:实现指标可视化 + +大约两分钟后,KubeSphere 监控引擎开始抓取和存储指标,随后您可以使用 PromQL 查询指标并设计操作面板和监控面板。 + +请参阅[查询](../visualization/querying/)了解如何编写 PromQL 表达式。有关监控面板功能的更多信息,请参阅[可视化](../visualization/overview/)。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/_index.md b/content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/_index.md new file mode 100644 index 000000000..855978bd8 --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "可视化" +weight: 10814 + +_build: + render: false +--- diff --git a/content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/overview.md b/content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/overview.md new file mode 100644 index 000000000..b9def6f17 --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/overview.md @@ -0,0 +1,71 @@ +--- +title: "概述" +keywords: '监控, Prometheus, Prometheus Operator' +description: '了解创建监控仪表板的一般步骤及其布局。' +linkTitle: "概述" +weight: 10815 +--- + +本节介绍监控面板功能。您将会学习如何在 KubeSphere 中为您的自定义应用实现指标数据的可视化。如果您不知道如何将应用指标集成至 KubeSphere 的监控系统,请先参阅[介绍](../../introduction/)。 + +## 创建监控面板 + +您可以在项目的**监控告警**下的**自定义监控**页面为应用指标创建监控面板。共有三种方式可创建监控面板:使用内置模板创建、使用空白模板进行自定义或者使用 YAML 文件创建。 + +内置模板包括 MySQL、Elasticsearch、Redis等。这些模板仅供演示使用,并会根据 KubeSphere 新版本的发布同步更新。此外,您还可以创建自定义监控面板。 + +KubeSphere 自定义监控面板可以视作为一个 YAML 配置文件。数据模型主要基于 [Grafana](https://github.com/grafana/grafana)(一个用于监控和可观测性的开源工具)创建,您可以在 [kubesphere/monitoring-dashboard](https://github.com/kubesphere/monitoring-dashboard) 中找到 KubeSphere 监控面板数据模型的设计。该配置文件便捷,可进行分享,欢迎您通过 [Monitoring Dashboards Gallery](https://github.com/kubesphere/monitoring-dashboard/tree/master/contrib/gallery) 对 KubeSphere 社区贡献面板模板。 + +### 使用内置模板 + +KubeSphere 为 MySQL、Elasticsearch 和 Redis 提供内置模板方便您快速创建监控面板。如果您想使用内置模板,请选择一种并点击**下一步**。 + +### 使用空白模板 + +若要使用空白模板,请直接点击**下一步**。 + +### 使用 YAML 文件 + +打开右上角的**编辑 YAML** 并粘贴您的面板 YAML 文件。 + +## 面板布局 + +监控面板包括四个部分,全局设置位于页面顶部,最左侧栏以文本图表的形式显示当前指标的数值,中间栏包括多个图表,显示指标在一段时间内的变化,最右侧栏显示图表中的详细信息。 + +### 顶部栏 + +在顶部栏中,您可以配置以下设置:名称、主题、时间范围和刷新间隔。 + +### 文本图表栏 + +您可以在最左侧栏中添加新的文本图表。 + +### 图表显示栏 + +您可以在中间栏中查看图表。 + +### 详情栏 + +您可以在最右侧栏中查看图表详情,包括一段时间内指标的 **max**、**min**、**avg** 和 **last** 等数值。 + +## 编辑监控面板 + +您可以在右上角点击**编辑模板**以修改当前模板。 + +### 添加图表 + +若要添加文本图表,点击左侧栏中的 ➕。若要在中间栏添加图表,点击右下角的**添加监控项**。 + +### 添加监控组 + +若要将监控项分组,您可以点击 icon 将右侧的项目拖放至目标组。若要添加新的分组,点击**添加监控组**。如果您想修改监控组的位置,请将鼠标悬停至监控组上并点击右侧的 。 + +{{< notice note >}} + +监控组在右侧所显示的位置和中间栏图表的位置一致。换言之,如果您修改监控组在右侧的顺序,其所对应的图表位置也会随之变化。 + +{{}} + +## 面板模板 + +您可以在 [Monitoring Dashboards Gallery](https://github.com/kubesphere/monitoring-dashboard/tree/master/contrib/gallery) 中找到并分享面板模板,KubeSphere 社区用户可以在这里贡献他们模板设计。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/panel.md b/content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/panel.md new file mode 100644 index 000000000..457b7b14d --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/panel.md @@ -0,0 +1,34 @@ +--- +title: "图表" +keywords: '监控, Prometheus, Prometheus Operator' +description: '探索仪表板属性和图表指标。' +linkTitle: "图表" +weight: 10816 +--- + +KubeSphere 当前支持两种图表:文本图表和图形图表。 + +## 文本图表 + +文本图表适合显示单个指标的数值。文本图表的编辑窗口包括两部分,上半部分显示指标的实时数值,下半部分可进行编辑。您可以输入 PromQL 表达式以获取单个指标的数值。 + +- **图表名称**:该文本图表的名称。 +- **单位**:指标数据的单位。 +- **精确位**:支持整数。 +- **监控指标**:从包含可用 Prometheus 指标的下拉列表中指定一个监控指标。 + +## 图形图表 + +图形图表适合显示多个指标的数值。图形图表的编辑窗口包括三部分,上半部分显示指标的实时数值,左侧栏用于设置图表主题,右侧栏用于编辑指标和图表描述。 + +- **图表类型**:支持折线图和柱状图。 +- **图例类型**:支持基础图和堆叠图。 +- **图表配色**:修改图表各个指标的颜色。 +- **图表名称**:图表的名称。 +- **描述信息**:图表描述。 +- **添加**:新增查询编辑器。 +- **图例名称**:图表中线条的图例名称,支持参数。例如 `{{pod}}` 表示使用 Prometheus 指标标签 `pod` 来给图表中的线条命名。 +- **间隔**:两个数据点间的步骤值 (Step Value)。 +- **监控指标**:包含可用的 Prometheus 指标。 +- **单位**:指标数据的单位。 +- **精确位**:支持整数。 diff --git a/content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/querying.md b/content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/querying.md new file mode 100644 index 000000000..aed99b5e0 --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/custom-application-monitoring/visualization/querying.md @@ -0,0 +1,13 @@ +--- +title: "查询" +keywords: '监控, Prometheus, Prometheus Operator, 查询' +description: '了解如何指定监控指标。' +linkTitle: "查询" +weight: 10817 +--- + +在查询编辑器中,在**监控指标**中输入 PromQL 表达式以处理和获取指标。若要了解如何编写 PromQL,请参阅 [Query Examples](https://prometheus.io/docs/prometheus/latest/querying/examples/)。 + +![查询编辑器-文本图表](/images/docs/v3.3/zh-cn/project-user-guide/custom-application-monitoring/visualization/querying/text-chart-edit.png) + +![查询编辑器-图形图表](/images/docs/v3.3/zh-cn/project-user-guide/custom-application-monitoring/visualization/querying/graph-chart-edit.png) \ No newline at end of file diff --git a/content/zh/docs/v3.4/project-user-guide/grayscale-release/_index.md b/content/zh/docs/v3.4/project-user-guide/grayscale-release/_index.md new file mode 100644 index 000000000..83875c1cc --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/grayscale-release/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "灰度发布" +weight: 10500 + +_build: + render: false +--- diff --git a/content/zh/docs/v3.4/project-user-guide/grayscale-release/blue-green-deployment.md b/content/zh/docs/v3.4/project-user-guide/grayscale-release/blue-green-deployment.md new file mode 100644 index 000000000..3b027f3bf --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/grayscale-release/blue-green-deployment.md @@ -0,0 +1,74 @@ +--- +title: "蓝绿部署" +keywords: 'KubeSphere, Kubernetes, 服务网格, istio, 发布, 蓝绿部署' +description: '了解如何在 KubeSphere 中发布蓝绿部署。' + +linkTitle: "蓝绿部署" +weight: 10520 +--- + + +蓝绿发布提供零宕机部署,即在保留旧版本的同时部署新版本。在任何时候,只有其中一个版本处于活跃状态,接收所有流量,另一个版本保持空闲状态。如果运行出现问题,您可以快速回滚到旧版本。 + +![blue-green-0](/images/docs/v3.3/zh-cn/project-user-guide/grayscale-release/blue-green-deployment/blue-green-0.PNG) + + +## 准备工作 + +- 您需要启用 [KubeSphere 服务网格](../../../pluggable-components/service-mesh/)。 +- 您需要创建一个企业空间、一个项目和一个用户 (`project-regular`),务必邀请该用户到项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要启用**应用治理**并有一个可用应用,以便您可以实现该应用的蓝绿部署。本教程使用示例应用 Bookinfo。有关更多信息,请参见[部署 Bookinfo 和管理流量](../../../quick-start/deploy-bookinfo-to-k8s/)。 + +## 创建蓝绿部署任务 + +1. 以 `project-regular` 身份登录 KubeSphere,前往**灰度发布**页面,在**发布模式**选项卡下,点击**蓝绿部署**右侧的**创建**。 + +2. 输入名称然后点击**下一步**。 + +3. 在**服务设置**选项卡,从下拉列表选择您的应用以及想实现蓝绿部署的服务。如果您也使用示例应用 Bookinfo,请选择 **reviews** 并点击**下一步**。 + +4. 在**新版本设置**选项卡,添加另一个版本(例如 `kubesphere/examples-bookinfo-reviews-v2:1.16.2`),然后点击**下一步**。 + +5. 在**策略设置**选项卡,要让应用版本 `v2` 接管所有流量,请选择**接管**,然后点击**创建**。 + +6. 蓝绿部署任务创建后,会显示在**任务状态**选项卡下。点击可查看详情。 + +7. 稍等片刻后,您可以看到所有流量都流向 `v2` 版本。 + +8. 新的**部署**也已创建。 + +9. 您可以执行以下命令直接获取虚拟服务来查看权重: + + ```bash + kubectl -n demo-project get virtualservice -o yaml + ``` + + {{< notice note >}} + + - 当您执行上述命令时,请将 `demo-project` 替换成您自己的项目(即命名空间)名称。 + - 如果您想使用 KubeSphere 控制台上的 Web Kubectl 来执行命令,则需要使用 `admin` 帐户。 + + {{}} + +10. 预期输出结果: + + ```yaml + ... + spec: + hosts: + - reviews + http: + - route: + - destination: + host: reviews + port: + number: 9080 + subset: v2 + weight: 100 + ... + ``` + +## 下线任务 + +待您实现蓝绿部署并且结果满足您的预期,您可以点击**删除**来移除 `v1` 版本,从而下线任务。 + diff --git a/content/zh/docs/v3.4/project-user-guide/grayscale-release/canary-release.md b/content/zh/docs/v3.4/project-user-guide/grayscale-release/canary-release.md new file mode 100644 index 000000000..f69777572 --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/grayscale-release/canary-release.md @@ -0,0 +1,127 @@ +--- +title: "金丝雀发布" +keywords: 'KubeSphere, Kubernetes, 金丝雀发布, istio, 服务网格' +description: '了解如何在 KubeSphere 中部署金丝雀服务。' +linkTitle: "金丝雀发布" +weight: 10530 +--- + +KubeSphere 基于 [Istio](https://istio.io/) 向用户提供部署金丝雀服务所需的控制功能。在金丝雀发布中,您可以引入服务的新版本,并向其发送一小部分流量来进行测试。同时,旧版本负责处理其余的流量。如果一切顺利,您就可以逐渐增加向新版本发送的流量,同时逐步停用旧版本。如果出现任何问题,您可以用 KubeSphere 更改流量比例来回滚至先前版本。 + +该方法能够高效地测试服务性能和可靠性,有助于在实际环境中发现潜在问题,同时不影响系统整体稳定性。 + +![canary-release-0](/images/docs/v3.3/zh-cn/project-user-guide/grayscale-release/canary-release/canary-release-0.png) + +## 视频演示 + + + +## 准备工作 + +- 您需要启用 [KubeSphere 服务网格](../../../pluggable-components/service-mesh/)。 +- 您需要启用 [KubeSphere 日志系统](../../../pluggable-components/logging/)以使用 Tracing 功能。 +- 您需要创建一个企业空间、一个项目和一个用户 (`project-regular`)。请务必邀请该用户至项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要开启**应用治理**并有一个可用应用,以便实现该应用的金丝雀发布。本教程中使用的示例应用是 Bookinfo。有关更多信息,请参见[部署 Bookinfo 和管理流量](../../../quick-start/deploy-bookinfo-to-k8s/)。 + +## 步骤 1:创建金丝雀发布任务 + +1. 以 `project-regular` 身份登录 KubeSphere 控制台,转到**灰度发布**页面,在**发布模式**选项卡下,点击**金丝雀发布**右侧的**创建**。 + +2. 设置任务名称,点击**下一步**。 + +3. 在**服务设置**选项卡,从下拉列表中选择您的应用和要实现金丝雀发布的服务。如果您同样使用示例应用 Bookinfo,请选择 **reviews** 并点击**下一步**。 + +4. 在**新版本设置**选项卡,添加另一个版本(例如 `kubesphere/examples-bookinfo-reviews-v2:1.16.2`;将 `v1` 改为 `v2`)并点击**下一步**。 + +5. 您可以使用具体比例或者使用请求内容(例如 `Http Header`、`Cookie` 和 `URI`)分别向这两个版本(`v1` 和 `v2`)发送流量。选择**指定流量分配**,并拖动中间的滑块来更改向这两个版本分别发送的流量比例(例如设置为各 50%)。操作完成后,点击**创建**。 + +## 步骤 2:验证金丝雀发布 + +现在您有两个可用的应用版本,请访问该应用以验证金丝雀发布。 + +1. 访问 Bookinfo 网站,重复刷新浏览器。您会看到 **Book Reviews** 板块以 50% 的比例在 v1 版本和 v2 版本之间切换。 + +2. 金丝雀发布任务创建后会显示在**任务状态**选项卡下。点击该任务查看详情。 + +3. 您可以看到每个版本分别收到一半流量。 + +4. 新的部署也已创建。 + +5. 您可以执行以下命令直接获取虚拟服务来识别权重: + + ```bash + kubectl -n demo-project get virtualservice -o yaml + ``` + + {{< notice note >}} + +- 当您执行上述命令时,请将 `demo-project` 替换为您自己项目(即命名空间)的名称。 +- 如果您想在 KubeSphere 控制台使用 Web kubectl 执行命令,则需要使用 `admin` 帐户登录。 + +{{}} + +6. 预期输出: + + ```bash + ... + spec: + hosts: + - reviews + http: + - route: + - destination: + host: reviews + port: + number: 9080 + subset: v1 + weight: 50 + - destination: + host: reviews + port: + number: 9080 + subset: v2 + weight: 50 + ... + ``` + +## 步骤 3:查看网络拓扑 + +1. 在运行 KubeSphere 的机器上执行以下命令引入真实流量,每 0.5 秒模拟访问一次 Bookinfo。 + + ```bash + watch -n 0.5 "curl http://productpage.demo-project.192.168.0.2.nip.io:32277/productpage?u=normal" + ``` + + {{< notice note >}} + 请确保将以上命令中的主机名和端口号替换成您自己环境的。 + {{}} + +2. 在**流量监控**中,您可以看到不同服务之间的通信、依赖关系、运行状态及性能。 + +3. 点击组件(例如 **reviews**),在右侧可以看到流量监控信息,显示**流量**、**成功率**和**持续时间**的实时数据。 + +## 步骤 4:查看链路追踪详情 + +KubeSphere 提供基于 [Jaeger](https://www.jaegertracing.io/) 的分布式追踪功能,用来对基于微服务的分布式应用程序进行监控及故障排查。 + +1. 在**链路追踪**选项卡中,可以清楚地看到请求的所有阶段及内部调用,以及每个阶段的调用耗时。 + +2. 点击任意条目,可以深入查看请求的详细信息及该请求被处理的位置(在哪个机器或者容器)。 + +## 步骤 5:接管所有流量 + +如果一切运行顺利,则可以将所有流量引入新版本。 + +1. 在**任务状态**中,点击金丝雀发布任务。 + +2. 在弹出的对话框中,点击 **reviews v2** 右侧的 icon,选择**接管**。这代表 100% 的流量将会被发送到新版本 (v2)。 + + {{< notice note >}} + 如果新版本出现任何问题,可以随时回滚到之前的 v1 版本。 + {{}} + +3. 再次访问 Bookinfo,多刷新几次浏览器,您会发现页面只会显示 **reviews v2** 的结果(即带有黑色星标的评级)。 + + diff --git a/content/zh/docs/v3.4/project-user-guide/grayscale-release/overview.md b/content/zh/docs/v3.4/project-user-guide/grayscale-release/overview.md new file mode 100644 index 000000000..e87963f51 --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/grayscale-release/overview.md @@ -0,0 +1,39 @@ +--- +title: "概述" +keywords: 'Kubernetes, KubeSphere, 灰度发布, 概述, 服务网格' +description: '了解灰度发布的基本概念。' +linkTitle: "概述" +weight: 10510 +--- + +现代云原生应用程序通常由一组可独立部署的组件组成,这些组件也称作微服务。在微服务架构中,每个服务网络执行特定功能,开发者能够非常灵活地对应用做出调整,不会影响服务网络。这种组成应用程序的微服务网络也称作**服务网格**。 + +KubeSphere 服务网格基于开源项目 [Istio](https://istio.io/) 构建,可以控制应用程序不同部分之间的通信方式。其中,灰度发布策略为用户在不影响微服务之间通信的情况下测试和发布新的应用版本发挥了重要作用。 + +## 灰度发布策略 + +当您在 KubeSphere 中升级应用至新版本时,灰度发布可以确保平稳过渡。采用的具体策略可能不同,但最终目标相同,即提前识别潜在问题,避免影响在生产环境中运行的应用。这样不仅可以将版本升级的风险降到最低,还能测试应用新构建版本的性能。 + +KubeSphere 为用户提供三种灰度发布策略。 + +### [蓝绿部署](../blue-green-deployment/) + +蓝绿部署会创建一个相同的备用环境,在该环境中运行新的应用版本,从而为发布新版本提供一个高效的方式,不会出现宕机或者服务中断。通过这种方法,KubeSphere 将所有流量路由至其中一个版本,即在任意给定时间只有一个环境接收流量。如果新构建版本出现任何问题,您可以立刻回滚至先前版本。 + +### [金丝雀发布](../canary-release/) + +金丝雀部署缓慢地向一小部分用户推送变更,从而将版本升级的风险降到最低。具体来讲,您可以在高度响应的仪表板上进行定义,选择将新的应用版本暴露给一部分生产流量。另外,您执行金丝雀部署后,KubeSphere 会监控请求,为您提供实时流量的可视化视图。在整个过程中,您可以分析新的应用版本的行为,选择逐渐增加向它发送的流量比例。待您对构建版本有把握后,便可以把所有流量路由至该构建版本。 + +### [流量镜像](../traffic-mirroring/) + +流量镜像复制实时生产流量并发送至镜像服务。默认情况下,KubeSphere 会镜像所有流量,您也可以指定一个值来手动定义镜像流量的百分比。常见用例包括: + +- 测试新的应用版本。您可以对比镜像流量和生产流量的实时输出。 +- 测试集群。您可以将实例的生产流量用于集群测试。 +- 测试数据库。您可以使用空数据库来存储和加载数据。 + +{{< notice note >}} + +当前版本的 KubeSphere 暂不支持为多集群应用创建灰度发布策略。 + +{{}} \ No newline at end of file diff --git a/content/zh/docs/v3.4/project-user-guide/grayscale-release/traffic-mirroring.md b/content/zh/docs/v3.4/project-user-guide/grayscale-release/traffic-mirroring.md new file mode 100644 index 000000000..57f8998a9 --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/grayscale-release/traffic-mirroring.md @@ -0,0 +1,81 @@ +--- +title: "流量镜像" +keywords: 'KubeSphere, Kubernetes, 流量镜像, Istio' +description: '了解如何在 KubeSphere 中执行流量镜像任务。' +linkTitle: "流量镜像" +weight: 10540 +--- + +流量镜像 (Traffic Mirroring),也称为流量影子 (Traffic Shadowing),是一种强大的、无风险的测试应用版本的方法,它将实时流量的副本发送给被镜像的服务。采用这种方法,您可以搭建一个与原环境类似的环境以进行验收测试,从而提前发现问题。由于镜像流量存在于主服务关键请求路径带外,终端用户在测试全过程不会受到影响。 + +## 准备工作 + +- 您需要启用 [KubeSphere 服务网络](../../../pluggable-components/service-mesh/)。 +- 您需要创建一个企业空间、一个项目和一个用户(例如 `project-regular`)。该用户必须已邀请至该项目,并具有 `operator` 角色。有关更多信息,请参阅[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要启用**应用治理**,并有可用的应用,以便为该应用进行流量镜像。本教程以 Bookinfo 为例。有关更多信息,请参阅[部署 Bookinfo 和管理流量](../../../quick-start/deploy-bookinfo-to-k8s/)。 + +## 创建流量镜像任务 + +1. 以 `project-regular` 用户登录 KubeSphere 并进入项目。前往**灰度发布**页面,在页面右侧点击**流量镜像**右侧的**创建**。 + +2. 设置发布任务的名称并点击**下一步**。 + +3. 在**服务设置**选项卡,从下拉列表中选择需要进行流量镜像的应用和对应的服务(本教程以 Bookinfo 应用的 reviews 服务为例),然后点击**下一步**。 + +4. 在**新版本设置**选项卡,为应用添加另一个版本(例如 `kubesphere/examples-bookinfo-reviews-v2:1.16.2`;将 `v1` 改为 `v2`),然后点击**下一步**。 + +5. 在**策略设置**选项卡,点击**创建**。 + +6. 新建的流量镜像任务显示在**任务状态**页面。点击该任务查看详情。 + +7. 在详情页面,您可以看到流量被镜像至 `v2` 版本,同时折线图中显示实时流量。 + +8. 新建的部署也显示在**工作负载**下的**部署**页面。 + +9. 您可以执行以下命令查看虚拟服务的 `mirror` 和 `weight` 字段。 + + ```bash + kubectl -n demo-project get virtualservice -o yaml + ``` + + {{< notice note >}} + + - 请将上述命令中的 `demo-project` 修改成实际的项目(即命名空间)名称。 + - 您需要以 `admin` 用户重新登录才能在 KubeSphere 控制台的 Web kubectl 页面执行上述命令。 + + {{}} + +10. 预期输出结果: + + ```bash + ... + spec: + hosts: + - reviews + http: + - route: + - destination: + host: reviews + port: + number: 9080 + subset: v1 + weight: 100 + mirror: + host: reviews + port: + number: 9080 + subset: v2 + ... + ``` + + 此路由规则将 100% 流量发送至 `v1`。`mirror` 部分的字段指定将流量镜像至 `reviews v2` 服务。当流量被镜像时,发送至镜像服务的请求的 Host/Authority 头部会附带 `-shadow` 标识。例如, `cluster-1` 会变成 `cluster-1-shadow`。 + + {{< notice note >}} + +这些请求以 Fire and Forget 方式镜像,亦即请求的响应会被丢弃。您可以指定 `weight` 字段来只镜像一部分而不是全部流量。如果该字段缺失,为与旧版本兼容,所有流量都会被镜像。有关更多信息,请参阅 [Mirroring](https://istio.io/v1.5/pt-br/docs/tasks/traffic-management/mirroring/)。 + +{{}} + +## 下线任务 + +您可以点击**删除**移除流量镜像任务。此操作不会影响当前的应用版本。 diff --git a/content/zh/docs/v3.4/project-user-guide/image-builder/_index.md b/content/zh/docs/v3.4/project-user-guide/image-builder/_index.md new file mode 100644 index 000000000..d05c53dda --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/image-builder/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "镜像构建器" +weight: 10600 + +_build: + render: false +--- diff --git a/content/zh/docs/v3.4/project-user-guide/image-builder/binary-to-image.md b/content/zh/docs/v3.4/project-user-guide/image-builder/binary-to-image.md new file mode 100644 index 000000000..d08c5daf6 --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/image-builder/binary-to-image.md @@ -0,0 +1,148 @@ +--- +title: "Binary to Image:发布制品到 Kubernetes" +keywords: "KubeSphere, Kubernetes, Docker, B2I, Binary-to-Image" +description: "如何使用 Binary-to-Image 发布制品到 Kubernetes。" +linkTitle: "Binary to Image:发布制品到 Kubernetes" +weight: 10620 +--- + +Binary-to-Image (B2I) 是一个工具箱和工作流,用于从二进制可执行文件(例如 Jar、War 和二进制包)构建可再现容器镜像。更确切地说,您可以上传一个制品并指定一个目标仓库,例如 Docker Hub 或者 Harbor,用于推送镜像。如果一切运行成功,会推送您的镜像至目标仓库,并且如果您在工作流中创建服务 (Service),也会自动部署应用程序至 Kubernetes。 + +在 B2I 工作流中,您不需要编写 Dockerfile。这不仅能降低学习成本,也能提升发布效率,使用户更加专注于业务。 + +本教程演示在 B2I 工作流中基于制品构建镜像的两种不同方式。最终,镜像会发布至 Docker Hub。 + +以下是一些示例制品,用于演示和测试,您可以用来实现 B2I 工作流: + +| 制品包 | GitHub 仓库 | +| ------------------------------------------------------------ | ------------------------------------------------------------ | +| [b2i-war-java8.war](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-war-java8.war) | [spring-mvc-showcase](https://github.com/spring-projects/spring-mvc-showcase) | +| [b2i-war-java11.war](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-war-java11.war) | [springmvc5](https://github.com/kubesphere/s2i-java-container/tree/master/tomcat/examples/springmvc5) | +| [b2i-binary](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-binary) | [devops-go-sample](https://github.com/runzexia/devops-go-sample) | +| [b2i-jar-java11.jar](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-jar-java11.jar) | [ java-maven-example](https://github.com/kubesphere/s2i-java-container/tree/master/java/examples/maven) | +| [b2i-jar-java8.jar](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-jar-java8.jar) | [devops-maven-sample](https://github.com/kubesphere/devops-maven-sample) | + +## 视频演示 + + + +## 准备工作 + +- 您已启用 [KubeSphere DevOps 系统](../../../pluggable-components/devops/)。 +- 您需要创建一个 [Docker Hub](http://www.dockerhub.com/) 帐户,也支持 GitLab 和 Harbor。 +- 您需要创建一个企业空间、一个项目和一个用户 (`project-regular`),请务必邀请该用户至项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 +- 设置一个 CI 专用节点用于构建镜像。该操作不是必需,但建议开发和生产环境进行设置,专用节点会缓存依赖项并缩短构建时间。有关更多信息,请参见[为缓存依赖项设置 CI 节点](../../../devops-user-guide/how-to-use/devops-settings/set-ci-node/)。 + +## 使用 Binary-to-Image (B2I) 创建服务 + +下图中的步骤展示了如何在 B2I 工作流中通过创建服务来上传制品、构建镜像并将其发布至 Kubernetes。 + +![服务构建](/images/docs/v3.3/zh-cn/project-user-guide/image-builder/binary-to-image/service-build.png) + +### 步骤 1:创建 Docker Hub 保密字典 + +您必须创建 Docker Hub 保密字典,以便将通过 B2I 创建的 Docker 镜像推送至 Docker Hub。以 `project-regular` 身份登录 KubeSphere,转到您的项目并创建一个 Docker Hub 保密字典。有关更多信息,请参见[创建常用保密字典](../../../project-user-guide/configuration/secrets/#创建常用保密字典)。 + +### 步骤 2:创建服务 + +1. 在该项目中,转到**应用负载**下的**服务**,点击**创建**。 + +2. 下拉至**通过制品构建服务**,选择 **WAR**。本教程使用 [spring-mvc-showcase](https://github.com/spring-projects/spring-mvc-showcase) 项目作为示例并上传 WAR 制品至 KubeSphere。设置一个名称,例如 `b2i-war-java8`,点击**下一步**。 + +3. 在**构建设置**页面,请提供以下相应信息,并点击**下一步**。 + + **服务类型**:本示例选择**无状态服务**。有关不同服务的更多信息,请参见[服务类型](../../../project-user-guide/application-workloads/services/#服务类型)。 + + **制品文件**:上传 WAR 制品 ([b2i-war-java8](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-war-java8.war))。 + + **构建环境**:选择 **kubesphere/tomcat85-java8-centos7:v2.1.0**。 + + **镜像名称**:输入 `/` 或 `/` 作为镜像名称。 + + **镜像标签**:镜像标签,请输入 `latest`。 + + **目标镜像仓库**:镜像会推送至 Docker Hub,故请选择 Docker Hub 保密字典。 + +4. 在**容器组设置**页面,下拉至**端口设置**,为容器设置访问策略。**协议**选择 **HTTP**,自定义名称(例如 `http-port`),**容器端口**和**服务端口**都输入 `8080`。点击**下一步**继续。 + + {{< notice note >}} + + 有关如何在**容器设置**页面设置其他参数的更多信息,请参见[容器组设置](../../../project-user-guide/application-workloads/container-image-settings/)。 + + {{}} + +5. 在**存储设置**页面,您可以为容器添加持久卷。有关更多信息,请参见[持久卷](../../../project-user-guide/storage/volumes/)。 + +6. 在**高级设置**页面,选中**外部访问**并选择 **NodePort** 作为访问方式。点击**创建**完成整个操作过程。 + +7. 点击左侧导航栏的**镜像构建器**,您可以看到正在构建示例镜像。 + +### 步骤 3:查看结果 + +1. 稍等片刻,您可以看到镜像构建器状态变为**成功**。 + +2. 点击该镜像前往其详情页面。在**任务记录**下,点击记录右侧的 查看构建日志。如果一切运行正常,您可以在日志末尾看到 `Build completed successfully`。 + +3. 回到**服务**、**部署**和**任务**页面,您可以看到该镜像相应的服务、部署和任务都已成功创建。 + +4. 在您的 Docker Hub 仓库,您可以看到 KubeSphere 已经向仓库推送了带有预期标签的镜像。 + +### 步骤 4:访问 B2I 服务 + +1. 在**服务**页面,请点击 B2I 服务前往其详情页面,您可以查看暴露的端口号。 + +2. 通过 `http://://` 访问服务。 + + {{< notice note >}} + + 取决于您的部署环境,您可能需要在安全组中放行端口并配置端口转发规则。 + + {{}} + +## 使用 Image Builder 构建镜像 + +前述示例通过创建服务来实现整个 B2I 工作流。此外,您也可以直接使用镜像构建器基于制品构建镜像,但这个方式不会将镜像发布至 Kubernetes。 + +![build-binary](/images/docs/v3.3/zh-cn/project-user-guide/image-builder/binary-to-image/build-binary.png) + +{{< notice note >}} + +请确保您已经创建了 Docker Hub 保密字典。有关更多信息,请参见[创建常用保密字典](../../../project-user-guide/configuration/secrets/#创建常用保密字典)。 + +{{}} + +### 步骤 1:上传制品 + +1. 以 `project-regular` 身份登录 KubeSphere,转到您的项目。 + +2. 在左侧导航栏中选择**镜像构建器**,然后点击**创建**。 + +3. 在弹出的对话框中,选择 **二进制** 并点击**下一步**。 + +4. 在**构建设置**页面,请提供以下相应信息,然后点击**创建**。 + + **上传制品**:下载 [b2i-binary](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-binary) 并上传至 KubeSphere。 + + **构建环境**:选择 **kubesphere/s2i-binary:v2.1.0**。 + + **镜像名称**:自定义镜像名称。 + + **镜像标签**:镜像标签,请输入 `latest`。 + + **目标镜像仓库**:镜像会推送至 Docker Hub,故请选择 Docker Hub 保密字典。 + +5. 在**镜像构建器**页面,您可以看到正在构建镜像。 + +### 步骤 2:检查结果 + +1. 稍等片刻,您可以看到镜像构建器状态变为**成功**。 + +2. 点击该镜像构建器前往其详情页面。在**任务记录**下,点击记录右侧的 查看构建日志。如果一切运行正常,您可以在日志末尾看到 `Build completed successfully`。 + +3. 前往**任务**页面,您可以看到该镜像相应的任务已成功创建。 + +4. 在您的 Docker Hub 仓库,您可以看到 KubeSphere 已经向仓库推送了带有预期标签的镜像。 + + diff --git a/content/zh/docs/v3.4/project-user-guide/image-builder/s2i-and-b2i-webhooks.md b/content/zh/docs/v3.4/project-user-guide/image-builder/s2i-and-b2i-webhooks.md new file mode 100644 index 000000000..b35e6c0fe --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/image-builder/s2i-and-b2i-webhooks.md @@ -0,0 +1,84 @@ +--- +title: "配置 S2I 和 B2I Webhooks" +keywords: 'KubeSphere, Kubernetes, S2I, Source-to-Image, B2I, Binary-to-Image, Webhook' +description: '学习如何配置 S2I 和 B2I webhooks。' +linkTitle: "配置 S2I 和 B2I Webhooks" +weight: 10650 + +--- + +KubeSphere 提供 Source-to-Image (S2I) 和 Binary-to-Image (B2I) 功能,以自动化镜像构建、推送和应用程序部署。在 KubeSphere 3.3 中,您可以配置 S2I 和 B2I Webhook,以便当代码仓库中存在任何相关活动时,自动触发镜像构建器。 + +本教程演示如何配置 S2I 和 B2I webhooks。 + +## 准备工作 + +- 您需要启用 [KubeSphere DevOps 系统](../../../pluggable-components/devops/),该系统已集成 S2I。 +- 您需要创建一个创建企业空间,一个项目 (`demo-project`) 和一个用户 (`project-regular`)。`project-regular` 需要被邀请到项目中,并赋予 `operator` 角色。有关详细信息,请参考[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/#step-1-create-an-account)。 +- 您需要创建一个 S2I 镜像构建器和 B2I 镜像构建器。有关更多信息,请参见 [Source to Image:无需 Dockerfile 发布应用](../source-to-image/)和[Binary to Image:发布制品到 Kubernetes](../binary-to-image/)。 + +## 配置 S2I Webhook + +### 步骤 1: 暴露 S2I trigger 服务 + +1. 以 `admin` 身份登录 KubeSphere Web 控制台。在左上角点击**平台管理**,然后选择**集群管理**。 + +2. 前往**应用负载**下的**服务**,从下拉框中选择 **kubesphere-devops-system**,然后点击 **s2ioperator-trigger-service** 进入详情页面。 + +3. 点击**更多操作**,选择**编辑外部访问**。 + +4. 在弹出的对话框中,从**访问方式**的下拉菜单中选择 **NodePort**,然后点击**确定**。 + + {{< notice note >}} + + 本教程出于演示目的选择 **NodePort**。根据您的需要,您也可以选择 **LoadBalancer**。 + + {{}} + +5. 在详情界面可以查看 **NodePort**。S2I webhook URL 中将包含此 NodePort。 + +### 步骤 2:配置 S2I webhook + +1. 登出 KubeSphere 并以 `project-regular` 用户登回。然后转到 `demo-project`。 + +2. 在**镜像构建器**中,点击 S2I 镜像构建器,进入详情页面。 + +3. 您可以在**远程触发器**中看到自动生成的链接。复制 `/s2itrigger/v1alpha1/general/namespaces/demo-project/s2ibuilders/felixnoo-s2i-sample-latest-zhd/`,S2I webhook URL 中将包含这个链接。 + +4. 登录您的 GitHub 帐户,转到用于 S2I 镜像构建器的源代码仓库。转到 **Settings** 下的 **Webhooks**,然后点击 **Add webhook**。 + +5. 在 **Payload URL**,输入 `http://:/s2itrigger/v1alpha1/general/namespaces/demo-project/s2ibuilders/felixnoo-s2i-sample-latest-zhd/`。您可以按需选择触发事件,然后点击 **Add webhook**。本教程出于演示目的,选择 **Just the push event**。 + + {{< notice note >}} + + `` 是您自己的 IP 地址,`` 是您在第一步中获得的 NodePort。`/s2itrigger/v1alpha1/general/namespaces/demo-project/s2ibuilders/felixnoo-s2i-sample-latest-zhd/` 来自 S2I 的远程触发器链接。确保您用的是您自己的 IP 地址、Service NodePort 和 S2I 远程触发器链接。根据您 Kubernetes 集群的部署位置,您可能还需要配置必要的端口转发规则并在安全组中打开端口。 + + {{}} + +6. 添加 webhook 后,您可以点击 webhook 查看 **Recent Deliveries** 中的交付详细信息。如果有效负载 URL 有效,您可以看到绿色的勾号。 + +7. 完成上述所有操作后,如果源代码仓库中存在推送事件,则会自动触发 S2I 镜像构建器。 + +## 配置 B2I Webhook + +您可以按照相同的步骤配置 B2I webhook。 + +1. 暴露 S2I 触发器服务。 + +2. 在 B2I 镜像构建器的详细信息页面中查看**远程触发器**。 + +3. 在源代码仓库中添加有效负载 URL。B2I 有效负载 URL 格式与 S2I 有效负载 URL 格式相同。 + + {{< notice note >}} + + 根据您 Kubernetes 集群的部署位置,您可能需要配置必要的端口转发规则并在安全组中打开端口。 + + {{}} + +4. 如果源代码仓库发生相关事件,B2I 镜像构建器将自动触发。 + + + + + + diff --git a/content/zh/docs/v3.4/project-user-guide/image-builder/s2i-introduction.md b/content/zh/docs/v3.4/project-user-guide/image-builder/s2i-introduction.md new file mode 100644 index 000000000..142e758b9 --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/image-builder/s2i-introduction.md @@ -0,0 +1,39 @@ +--- +title: "S2I 工作流程和逻辑" +keywords: 'KubeSphere, Kubernetes, Docker, S2I, Source-to-Image' +description: '了解 S2I 的工作原理及其为何按照预期工作。' +linkTitle: "S2I 工作流程和逻辑" +weight: 10630 +--- + +Source-to-Image (S2I) 是一个将源代码构建成镜像的自动化工具。S2I 将源代码注入负责编译的镜像构建器 (Image Builder) 中,然后自动将编译后的代码打包成 Docker 镜像。 + +有关如何在 KubeSphere 中使用 S2I 的更多信息,请参考 [Source to Image:无需 Dockerfile 发布应用](../source-to-image/)。此外,您还可以参考代码仓库 [S2IOperator](https://github.com/kubesphere/s2ioperator#source-to-image-operator) 和 [S2IRun](https://github.com/kubesphere/s2irun#s2irun) 查看更多详细信息。 + +## S2I 工作流程和逻辑 + +### 镜像构建器 + +对于 Python 和 Ruby 等解释型语言,程序的构建环境和运行时环境通常是相同的。例如,基于 Ruby 的镜像构建器通常包含 Bundler、Rake、Apache、GCC 以及其他构建运行时环境所需的安装包。构建的工作流程如下图所示: + +![s2i-builder](/images/docs/v3.3/zh-cn/project-user-guide/image-builder/s2i-intro/s2i-builder.png) + +### S2I 工作原理 + +S2I 执行以下步骤: + +1. 根据镜像构建器运行容器,并将应用程序的源代码注入到指定目录中。 +2. 执行镜像构建器中的 `assemble` 脚本,通过安装依赖项以及将源代码转移到工作目录下,将源代码构建成可直接运行的应用程序。 +3. 将镜像构建器中提供的 `run` 脚本设置为启动容器的镜像入口点,然后提交新的镜像作为供用户使用的应用程序镜像。 + +S2I 流程图如下: + +![s2i-flow](/images/docs/v3.3/zh-cn/project-user-guide/image-builder/s2i-intro/s2i-flow.png) + +### 运行时镜像 + +对于 Go、C、C++、Java 等编译型语言,编译时所需的依赖项会增加最终镜像的大小。为构建更轻量的镜像,S2I 实行分阶段构建,并从镜像中移除非必要的文件。镜像构建器完成构建后会导出制品,制品可能是 Jar 文件或二进制文件等可执行文件,然后会将制品注入运行时镜像 (Runtime Image) 用于执行。 + +构建的工作流程如下: + +![s2i-runtime-build](/images/docs/v3.3/zh-cn/project-user-guide/image-builder/s2i-intro/s2i-runtime-build.png) \ No newline at end of file diff --git a/content/zh/docs/v3.4/project-user-guide/image-builder/s2i-templates.md b/content/zh/docs/v3.4/project-user-guide/image-builder/s2i-templates.md new file mode 100644 index 000000000..39c322e7c --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/image-builder/s2i-templates.md @@ -0,0 +1,321 @@ +--- +title: "自定义 S2I 模板" +keywords: 'KubeSphere, Kubernetes, Docker, S2I, Source-to-Image' +description: '学习如何自定义 S2I 模板,并理解不同的模板参数。' +linkTitle: "自定义 S2I 模板" +weight: 10640 + +--- + +当您了解了 Source-to-Image (S2I) 的工作流和逻辑,就可以根据您的项目自定义镜像构建器模板(即 S2I / B2I 模板),以扩展 S2I 功能。KubeSphere 提供了几种常见的镜像构建器模板,如 [Python ](https://github.com/kubesphere/s2i-python-container/)和 [Java](https://github.com/kubesphere/s2i-java-container/)。 + +本教程演示如何创建包含 Nginx 服务的镜像构建器。如果需要在项目中使用运行时镜像,请参阅[本文档](https://github.com/kubesphere/s2irun/blob/master/docs/runtime_image.md)以了解有关如何创建运行时镜像的更多信息。 + +## 准备工作 + +S2I 模板自定义分成两部分。 + +- 第一部分:S2I 自定义镜像构建 + - assemble(必需):从源代码构建应用程序制品的脚本 `assemble`。 + - run(必需):用于运行应用程序的脚本。 + - save-artifacts(可选):管理增量构建过程中的所有依赖。 + - usage(可选):提供说明的脚本。 + - test (可选):用于测试的脚本。 +- 第二部分:S2I 模板定义 + +您需要提前准备好 S2I 模板定制所需的元素。 + +{{< notice note >}} + +与 OpenShift 的镜像构建器兼容,您可以在 KubeSphere 中重用它。有关 S2I 镜像构建器的更多信息,请参见 [S2IRun](https://github.com/kubesphere/s2irun/blob/master/docs/builder_image.md#s2i-builder-image-requirements)。 + +{{}} + +## 创建镜像构建器 + +### 步骤 1:准备 S2I 目录 + +1. [S2I 命令行工具](https://github.com/openshift/source-to-image/releases)提供了一个易于使用的命令来初始化构建器所需的基本目录结构。运行以下命令以安装S2I CLI。 + + ```bash + $ wget https://github.com/openshift/source-to-image/releases/download/v1.1.14/source-to-image-v1.1.14-874754de-linux-386.tar.gz + $ tar -xvf source-to-image-v1.1.14-874754de-linux-386.tar.gz + $ ls + s2i source-to-image-v1.1.14-874754de-linux-386.tar.gz sti + $ cp s2i /usr/local/bin + ``` + +2. 本教程使用 `nginx-centos7` 作为镜像构建器的名称。运行 `s2i create` 命令初始化基本目录结构。 + + ```bash + s2i create nginx-centos7 s2i-builder-docs + ``` + +3. 目录结构初始化如下。 + + ``` + s2i-builder-docs/ + Dockerfile - a standard Dockerfile to define the Image Builder + Makefile - a script for testing and building the Image Builder + test/ + run - a script that runs the application to test whether the Image Builder is working properly + test-app/ - directory of the test application + s2i/bin + assemble - a script that builds the application + run - a script that runs the application + usage - a script that prints the usage of the Image Builder + ``` + +### 步骤 2:修改 Dockerfile + +Dockerfile 安装用于构建和运行应用程序的的所有必要工具和库。Dockerfile 还将 S2I 脚本复制到输出镜像中。 + +按如下所示修改 Dockerfile 以定义镜像构建器。 + +#### Dockerfile + +``` +# nginx-centos7 +FROM kubespheredev/s2i-base-centos7:1 + +# Here you can specify the maintainer for the image that you're building +LABEL maintainer="Runze Xia " + +# Define the current version of the application +ENV NGINX_VERSION=1.6.3 + +# Set the labels that are used for KubeSphere to describe the Image Builder. +LABEL io.k8s.description="Nginx Webserver" \ + io.k8s.display-name="Nginx 1.6.3" \ + io.kubesphere.expose-services="8080:http" \ + io.kubesphere.tags="builder,nginx,html" + +# Install the nginx web server package and clean the yum cache +RUN yum install -y epel-release && \ + yum install -y --setopt=tsflags=nodocs nginx && \ + yum clean all + +# Change the default port for nginx +RUN sed -i 's/80/8080/' /etc/nginx/nginx.conf +RUN sed -i 's/user nginx;//' /etc/nginx/nginx.conf + +# Copy the S2I scripts to /usr/libexec/s2i in the Image Builder +COPY ./s2i/bin/ /usr/libexec/s2i + +RUN chown -R 1001:1001 /usr/share/nginx +RUN chown -R 1001:1001 /var/log/nginx +RUN chown -R 1001:1001 /var/lib/nginx +RUN touch /run/nginx.pid +RUN chown -R 1001:1001 /run/nginx.pid +RUN chown -R 1001:1001 /etc/nginx + +USER 1001 + +# Set the default port for applications built using this image +EXPOSE 8080 + +# Modify the usage script in your application dir to inform the user how to run this image. +CMD ["/usr/libexec/s2i/usage"] +``` + +{{< notice note >}} + +S2I 脚本将使用 Dockerfile 中定义的标志作为参数。如果您需要使用与 KubeSphere 提供的基础镜像不同的基础镜像,请参见 [S2I Scripts](https://github.com/kubesphere/s2irun/blob/master/docs/builder_image.md#s2i-scripts)。 + +{{}} + +### 步骤 3:创建 S2I 脚本 + +1. 创建一个 `assemble` 脚本,如下所示,将配置文件和静态内容复制到目标容器中。 + + ```bash + #!/bin/bash -e + + if [[ "$1" == "-h" ]]; then + exec /usr/libexec/s2i/usage + fi + + echo "---> Building and installing application from source..." + if [ -f /tmp/src/nginx.conf ]; then + mv /tmp/src/nginx.conf /etc/nginx/nginx.conf + fi + + if [ "$(ls -A /tmp/src)" ]; then + mv /tmp/src/* /usr/share/nginx/html/ + fi + ``` + + {{< notice note >}} + + 默认情况下,`s2i build` 将应用程序源代码放在 `/tmp/src`。上述命令将应用程序源代码复制到由 `kubespheredev/s2i-base-centos7:1` 定义的工作目录 `/opt/app-root/src`。 + + {{}} + +2. 创建一个 `run` 脚本,如下所示。在本教程中,它只启动 `nginx` 服务器。 + + ```bash + #!/bin/bash -e + + exec /usr/sbin/nginx -g "daemon off;" + ``` + + {{< notice note >}} + 本教程使用 `exec` 命令执行 `nginx` 服务器主机进程,让 `nginx` 接收从 `docker` 发送的所有信号,而 `nginx` 可以使用容器的标准输入和输出流。此外,`save-artifacts` 脚本允许新的构建重用应用程序早期版本镜像内容。`save-artifacts` 脚本可以删除,因为本教程不实现增量构建。 + {{}} + +3. 创建一个 `usage` 脚本,如下所示,它会打印出镜像使用说明。 + + ```bash + #!/bin/bash -e + cat < 48f8574c05df + Step 2/17 : LABEL maintainer="Runze Xia " + ---> Using cache + ---> d60ebf231518 + Step 3/17 : ENV NGINX_VERSION=1.6.3 + ---> Using cache + ---> 5bd34674d1eb + Step 4/17 : LABEL io.k8s.description="Nginx Webserver" io.k8s.display-name="Nginx 1.6.3" io.kubesphere.expose-services="8080:http" io.kubesphere.tags="builder,nginx,html" + ---> Using cache + ---> c837ad649086 + Step 5/17 : RUN yum install -y epel-release && yum install -y --setopt=tsflags=nodocs nginx && yum clean all + ---> Running in d2c8fe644415 + + ………… + ………… + ………… + + Step 17/17 : CMD ["/usr/libexec/s2i/usage"] + ---> Running in c24819f6be27 + Removing intermediate container c24819f6be27 + ---> c147c86f2cb8 + Successfully built c147c86f2cb8 + Successfully tagged kubespheredev/nginx-centos7-s2ibuilder-sample:latest + ``` + +3. 在创建镜像构建器后,运行以下命令创建应用程序镜像。 + + ```bash + $ s2i build ./test/test-app kubespheredev/nginx-centos7-s2ibuilder-sample:latest sample-app + ---> Building and installing application from source... + Build completed successfully + ``` + + {{< notice note >}} + 按照 `assemble` 脚本中定义的逻辑,S2I 使用镜像构建器作为基础创建应用程序镜像,并从 `test/test-app` 目录注入源代码。 + {{}} + +4. 运行以下命令以运行应用程序镜像。 + + ```bash + docker run -p 8080:8080 sample-app + ``` + + 您可以在此位置访问 Nginx 应用程序:`http://localhost:8080`。 + +### 步骤 5:推送镜像与创建 S2I 模板 + +在本地完成 S2I 镜像构建器测试后,可以将镜像推送到自定义镜像仓库。您还需要创建一个 YAML 文件作为 S2I 构建器模板,如下所示。 + +#### s2ibuildertemplate.yaml + +```yaml +apiVersion: devops.kubesphere.io/v1alpha1 +kind: S2iBuilderTemplate +metadata: + labels: + controller-tools.k8s.io: "1.0" + builder-type.kubesphere.io/s2i: "s2i" + name: nginx-demo +spec: + containerInfo: + - builderImage: kubespheredev/nginx-centos7-s2ibuilder-sample + codeFramework: nginx # type of code framework + defaultBaseImage: kubespheredev/nginx-centos7-s2ibuilder-sample # default Image Builder (can be replaced by a customized image) + version: 0.0.1 # Builder template version + description: "This is an S2I builder template for NGINX builds whose result can be run directly without any further application server." # Builder template description +``` + +### 步骤 6:在 KubeSphere 使用 S2I 模板 + +1. 运行以下命令将上面创建的 S2I 模板提交至 KubeSphere。 + + ```bash + $ kubectl apply -f s2ibuildertemplate.yaml + s2ibuildertemplate.devops.kubesphere.io/nginx created + ``` + +2. 在 KubeSphere 上创建 S2I 构建时,可以在**构建环境**中找到自定义 S2I 模板。 + +## S2I 模板参数定义 + +有关 S2I 模板标签作为参数传递给前端分类的详细说明,请参见下表 + +| 标签名称 | 选项 | 定义 | +| ------------------------------------- | -------------------- | ------------------------------------------------------------ | +| builder-type.kubesphere.io/s2i: "s2i" | "s2i" | 模板类型为 S2I,基于应用程序源代码构建镜像。 | +| builder-type.kubesphere.io/b2i | "b2i" | 模板类型为 B2I,基于二进制文件或其他制品构建镜像。 | +| binary-type.kubesphere.io | "jar","war","binary" | 该类型为 B2I 类型的补充,在选择 B2I 类型时需要。例如,当提供 Jar 包时,选择 "jar" 类型。在 KubeSphere v2.1.1 及更高版本,允许自定义 B2I 模板。 | + +参见以下 S2I 模板参数的详细说明。必需参数用星号标记。 + +| 参数 | 类型 | 定义 | +| ------------------------------------------ | -------- | ------------------------------------------------------------ | +| *containerInfo | []struct | 关于镜像构建器的信息。 | +| *containerInfo.builderImage | string | S2I 镜像构建器,如:kubesphere/java-8-centos7:v2.1.0. | +| containerInfo.runtimeImage | string | S2I 运行时镜像,如:kubesphere/java-8-runtime:v2.1.0. | +| containerInfo.buildVolumes | []string | 关于挂载卷的信息。格式为 "volume_name:mount_path", 如:"s2i_java_cache:/tmp/artifacts","test_cache:test_path"]。 | +| containerInfo.runtimeArtifacts | []struct | 输出制品的原始路径和目标路径;仅在分阶段构建中添加。 | +| containerInfo.runtimeArtifacts.source | string | 制品在镜像构建器的原始路径。 | +| containerInfo.runtimeArtifacts.destination | string | 运行时镜像中制品的目标路径。 | +| containerInfo.runtimeArtifacts.keep | bool | 是否将数据保留在输出镜像中。 | +| *defaultBaseImage | string | 默认镜像构建器。 | +| *codeFramework | string | 代码框架类型,如:Java、Ruby。 | +| environment | []struct | 构建过程中的环境变量列表。 | +| environment.key | string | 环境变量的名称。 | +| environment.type | string | 环境变量键的类型。 | +| environment.description | string | 环境变量的描述。 | +| environment.optValues | []string | 环境变量的参数列表。 | +| environment.required | bool | 是否需要设置环境变量。 | +| environment.defaultValue | string | 环境变量的默认值。 | +| environment.value | string | 环境变量的值。 | +| iconPath | string | 应用名称。 | +| version | string | S2I 模板版本。 | +| description | string | 模板功能和用法的说明。 | + diff --git a/content/zh/docs/v3.4/project-user-guide/image-builder/source-to-image.md b/content/zh/docs/v3.4/project-user-guide/image-builder/source-to-image.md new file mode 100644 index 000000000..5f6bb5155 --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/image-builder/source-to-image.md @@ -0,0 +1,119 @@ +--- +title: "Source to Image:无需 Dockerfile 发布应用" +keywords: 'KubeSphere, Kubernetes, Docker, S2I, Source-to-Image' +description: '如何使用 Source-to-Image 发布应用程序。' +linkTitle: "Source to Image:无需 Dockerfile 发布应用" +weight: 10610 +--- + +Source-to-Image (S2I) 是一个工具箱和工作流,用于从源代码构建可再现容器镜像。S2I 通过将源代码注入容器镜像,自动将编译后的代码打包成镜像。KubeSphere 集成 S2I 来自动构建镜像,并且无需任何 Dockerfile 即可发布到 Kubernetes。 + +本教程演示如何通过创建服务 (Service) 使用 S2I 将 Java 示例项目的源代码导入 KubeSphere。KubeSphere Image Builder 将基于源代码创建 Docker 镜像,将其推送至目标仓库,并发布至 Kubernetes。 + +![构建流程](/images/docs/v3.3/zh-cn/project-user-guide/image-builder/source-to-image/build-process.png) + +## 视频演示 + + + +## 准备工作 + +- 您需要启用 [KubeSphere DevOps 系统](../../../pluggable-components/devops/),该系统已集成 S2I。 +- 您需要创建一个 [GitHub](https://github.com/) 帐户和一个 [Docker Hub](http://www.dockerhub.com/) 帐户,也支持 GitLab 和 Harbor。本教程使用 Github 仓库提供源代码,用于构建镜像并推送至 Docker Hub。 +- 您需要创建一个企业空间、一个项目和一个用户 (`project-regular`),请务必邀请该用户至项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 +- 设置一个 CI 专用节点用于构建镜像。该操作不是必需,但建议开发和生产环境进行设置,专用节点会缓存依赖项并缩短构建时间。有关更多信息,请参见[为缓存依赖项设置 CI 节点](../../../devops-user-guide/how-to-use/devops-settings/set-ci-node/)。 + +## 使用 Source-to-Image (S2I) + +### 步骤 1:Fork 示例仓库 + +登录 GitHub 并 Fork GitHub 仓库 [devops-maven-sample](https://github.com/kubesphere/devops-maven-sample) 至您的 GitHub 个人帐户。 + +### 步骤 2:创建保密字典 + +以 `project-regular` 身份登录 KubeSphere 控制台,转到您的项目,分别为 Docker Hub 和 GitHub 创建保密字典。有关更多信息,请参见[创建常用保密字典](../../../project-user-guide/configuration/secrets/#创建常用保密字典)。 + +{{< notice note >}} + +如果您 Fork 的是公开仓库,则不需要创建 GitHub 保密字典。 + +{{}} + +### 步骤 3:创建服务 + +1. 在该项目中,转到**应用负载**下的**服务**,点击**创建**。 + +2. 选中**通过代码创建服务**下的 **Java**,将其命名为 `s2i-demo` 并点击**下一步**。 + + {{< notice note >}} + + KubeSphere 已集成常用的 S2I 模板,例如 Java、Node.js 和 Python。如果您想使用其他语言或自定义 S2I 模板,请参见[自定义 S2I 模板](../s2i-templates/)。 + + {{}} + +3. 在**构建设置**页面,请提供以下相应信息,并点击**下一步**。 + + **服务类型**:本示例选择**无状态服务**。有关不同服务的更多信息,请参见[服务类型](../../../project-user-guide/application-workloads/services/#服务类型)。 + + **构建环境**:选择 **kubesphere/java-8-centos7:v2.1.0**。 + + **代码仓库 URL**:源代码仓库地址(目前支持 Git)。您可以指定代码分支和在源代码终端的相对路径。URL 支持 HTTP 和 HTTPS。在该字段粘贴已 Fork 仓库的 URL(您自己仓库的地址)。 + + **代码仓库分支**:分支用于构建镜像。本教程中在此输入 `master`。您可以输入 `dependency` 进行缓存测试。 + + **代码仓库密钥**:您不需要为公共仓库提供保密字典。如果您想使用私有仓库,请选择 GitHub 保密字典。 + + **镜像名称**:自定义镜像名称。本教程会向 Docker Hub 推送镜像,故请输入 `dockerhub_username/s2i-sample`。`dockerhub_username` 是您的 Docker ID,请确保该 ID 有权限推送和拉取镜像。 + + **镜像标签**:镜像标签,请输入 `latest`。 + + **目标镜像仓库**:镜像会推送至 Docker Hub,故请选择 Docker Hub 保密字典。 + + **高级设置**:您可以定义代码相对路径。该字段请使用默认的 `/`。 + +4. 在**容器组设置**页面,下拉至**端口设置**,为容器设置访问策略。**协议**选择 **HTTP**,自定义名称(例如 `http-1`),**容器端口**和**服务端口**都输入 `8080`。 + +5. 下拉至**健康检查**并选中,填写以下参数设置**就绪检查**。探针设置完成后点击 **√**,然后点击**下一步**继续。 + + **HTTP 请求**:选择 **HTTP** 作为协议,输入 `/` 作为路径(本教程中的根路径),输入 `8080` 作为暴露端口。 + + **初始延迟(s)**:容器启动后,存活探针启动之前等待的秒数。本字段输入 `30`。 + + **超时时间(s)**:探针超时的秒数。本字段输入 `10`。 + + 其他字段请直接使用默认值。有关如何在**容器设置**页面配置探针和设置其他参数的更多信息,请参见[容器组设置](../../../project-user-guide/application-workloads/container-image-settings/)。 + +6. 在**存储设置**页面,您可以为容器添加持久卷。有关更多信息,请参见[持久卷](../../../project-user-guide/storage/volumes/)。点击**下一步**继续。 + +7. 在**高级设置**页面,选中**外部访问**并选择 **NodePort** 作为访问方式。点击**创建**完成整个操作过程。 + +8. 点击左侧导航栏的**镜像构建器**,您可以看到正在构建示例镜像。 + +### 步骤 4:查看结果 + +1. 稍等片刻,您可以看到镜像构建器状态变为**成功**。 + +2. 点击该镜像构建器前往其详情页面。在**任务记录**下,点击记录右侧的 icon 查看构建日志。如果一切运行正常,您可以在日志末尾看到 `Build completed successfully`。 + +3. 回到**服务**、**部署**和**任务**页面,您可以看到该镜像相应的服务、部署和任务都已成功创建。 + +4. 在您的 Docker Hub 仓库,您可以看到 KubeSphere 已经向仓库推送了带有预期标签的镜像。 + +### 步骤 5:访问 S2I 服务 + +1. 在**服务**页面,请点击 S2I 服务前往其详情页面。 + +2. 要访问该服务,您可以执行 `curl` 命令使用 Endpoint 或者访问 `:`。例如: + + ```bash + $ curl 10.10.131.44:8080 + Really appreciate your star, that is the power of our life. + ``` + + {{< notice note >}} + + 如果您想从集群外访问该服务,可能需要根据您的部署环境在安全组中放行端口并配置端口转发规则。 + + {{}} \ No newline at end of file diff --git a/content/zh/docs/v3.4/project-user-guide/storage/_index.md b/content/zh/docs/v3.4/project-user-guide/storage/_index.md new file mode 100644 index 000000000..b8c401d7c --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/storage/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "持久卷声明管理" +weight: 10300 + +_build: + render: false +--- diff --git a/content/zh/docs/v3.4/project-user-guide/storage/volume-snapshots.md b/content/zh/docs/v3.4/project-user-guide/storage/volume-snapshots.md new file mode 100644 index 000000000..a69de9396 --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/storage/volume-snapshots.md @@ -0,0 +1,73 @@ +--- +title: "卷快照" +keywords: 'KubeSphere, Kubernetes, 持久卷声明, 快照' +description: '了解如何在 KubeSphere 中管理持久卷声明的快照。' +linkTitle: "卷快照" +weight: 10330 +--- + +许多存储系统都支持为持久卷创建快照。快照是持久卷声明的一个即时副本,可用于供应新持久卷声明(预先在其中填充快照数据)或将现有持久卷声明恢复到创建快照时的状态。有关更多信息,请参阅 [Kubernetes 官方文档](https://kubernetes.io/zh/docs/concepts/storage/volume-snapshots/)。 + +本教程演示如何创建和使用卷快照。 + +## 准备工作 + +- 您需要创建一个企业空间、一个项目和一个用户(例如 `project-regular`)。该用户必须已邀请至该项目,并具有 `operator` 角色。有关更多信息,请参阅[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +- 您需要确保 Kubernetes 版本为 1.17 或更新版本。 + +- 您需要确保底层存储插件支持快照。 + +- 您需要准备一个可用的持久卷声明以便为其创建快照。有关更多信息,请参阅[持久卷声明](../volumes/)。 + +- 您需要创建一个[卷快照类](../../../cluster-administration/snapshotclass/)。 + +## 创建持久卷声明快照 + +您可以通过两种方式创建卷快照。 +### 方法 1:卷快照页面 + +1. 以 `project-regular` 用户登录 KubeSphere Web 控制台并进入项目。在左侧导航栏选择**存储**下的**卷快照**。 + +2. 在右侧的**卷快照**页面,点击**创建**。 + +3. 在弹出的**创建快照**对话框中,选择支持创建快照的持久卷声明,输入卷快照名称并选择卷快照类,点击**确定**完成创建。在**卷快照**列表查看新建的快照。 + +4. 点击**卷快照内容**查看卷快照状态、容量、卷快照类等信息。 +### 方法 2:持久卷声明页面 + +1. 以 `project-regular` 用户登录 KubeSphere Web 控制台并进入项目。在左侧导航栏选择**存储**下的**持久卷声明**,在页面右侧选择需要创建快照的持久卷声明。 + +2. 在持久卷声明详情页面,点击 **更多操作 > 创建快照**。 + +3. 在弹出的对话框中,设置快照的名称并选择快照类型,然后点击**确定**完成创建。快照的名称将作为快照的唯一标识符。在**卷快照**列表查看新建的快照。 + + +## 用快照创建持久卷声明 + +您可以通过两种方式用快照创建持久卷声明。 +### 方法1:快照详情页面 + +1. 以 `project-regular` 用户登录 KubeSphere Web 控制台,进入快照详情页面,然后点击**创建卷**来使用快照。其他步骤与直接创建持久卷声明基本相同。 + +2. 在弹出的对话框中设置持久卷声明的名称,然后点击**下一步**。 + + {{< notice note >}} + + 您将创建一个 PersistentVolumeClaim (PVC) 资源。 + + {{}} + +3. 在**存储设置**页签,选择访问模式,然后点击**下一步**。 + +4. 在**高级设置**页签,为持久卷声明添加元数据,点击**创建**。 + +### 方法2:持久卷声明页面 + +1. 以 `project-regular` 用户登录 KubeSphere Web 控制台并进入项目。在左侧导航栏选择**存储**下的**持久卷声明**,然后点击**创建**。 + +2. 在弹出的对话框中设置持久卷声明的名称,然后点击**下一步**。 + +3. 在**存储设置**选项卡中,将**创建方式**设置为**通过卷快照创建**,选择一个卷快照和访问模式,然后点击**下一步**。 + +4. 在**高级设置**选项卡中,为持久卷添加元数据,然后点击**创建**。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/project-user-guide/storage/volumes.md b/content/zh/docs/v3.4/project-user-guide/storage/volumes.md new file mode 100644 index 000000000..84f5dabc4 --- /dev/null +++ b/content/zh/docs/v3.4/project-user-guide/storage/volumes.md @@ -0,0 +1,240 @@ +--- +title: "持久卷声明" +keywords: 'Kubernetes, 持久卷, 持久卷申领, 克隆, 快照, 扩容, PV, PVC' +description: '了解如何在 KubeSphere 中创建、编辑和挂载持久卷声明。' +linkTitle: "持久卷声明" +weight: 10310 +--- + + 在项目中创建应用负载时,您可以为应用负载创建[持久卷声明(PVC)](https://kubernetes.io/zh/docs/concepts/storage/persistent-volumes/)。PVC 可用于创建存储请求,从而进一步为应用提供持久化存储。更具体地说,持久卷(PV)资源可用于管理持久化存储。 + +集群管理员需要用存储类型 (Storage Class) 配置持久卷。也就是说,要在项目中创建 PVC,您的集群中必须要有可用的存储类型。如果在安装 KubeSphere 时没有配置自定义存储类型,集群中将默认安装 [OpenEBS](https://openebs.io/) 以提供本地持久化存储。然而,OpenEBS 不支持动态为 PVC 动态供应持久卷。在生产环境中,建议您提前配置存储类型从而为应用提供持久化存储服务。 + +本教程介绍如何创建 PVC、挂载 PVC 和使用 PVC。 + +## 准备工作 + +- 您需要创建一个企业空间、一个项目和一个用户(例如 `project-regular`)。该用户必须已邀请至该项目,并具有 `operator` 角色。有关更多信息,请参阅[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +- 如需使用动态卷供应,您需要配置一个支持动态供应的[存储类](../../../cluster-administration/storageclass/)。 + + + +## 创建持久性声明 + +KubeSphere 将 PVC 绑定到满足您设定的请求条件(例如容量和访问模式)的 PV。在创建应用负载时,您可以选择所需的 PVC 并将其挂载到负载。 + +1. 以 `project-regular` 身份登录 KubeSphere Web 控制台并进入项目,在左侧导航栏中点击**存储**下的**持久卷声明**。页面上显示所有已挂载至项目工作负载的持久卷声明。 + +2. 在**持久卷声明**页面,点击**创建**以创建持久卷声明。 + +3. 在弹出的对话框设置持久卷声明的名称(例如 `demo-volume`),选择项目,然后点击**下一步**。 + + + {{< notice note >}} + + 您可以在对话框右上角启用**编辑 YAML** 来查看持久卷声明的 YAML 清单文件,并通过直接编辑清单文件来创建持久卷声明。您也可继续执行后续步骤在控制台上创建持久卷声明。 + + {{}} + +4. 在**存储设置**页面,选择创建持久卷声明的方式。 + + - **通过存储类创建**:您可以在 KubeSphere [安装前](../../../installing-on-linux/persistent-storage-configurations/understand-persistent-storage/)或[安装后](../../../cluster-administration/storageclass/)配置存储类。 + + - **通过卷快照创建**:如需通过快照创建持久卷声明,您必须先创建卷快照。 + + 选择**通过存储类创建**。有关通过卷快照创建持久卷声明的更多信息,请参阅[卷快照](../volume-snapshots/)。 + +5. 从下拉列表中选择存储类。本教程以青云QingCloud 平台提供的 `csi-standard` 标准存储类为例。您可以根据需要选择其他存储类。 + +6. 选择所需的访问模式。由于一些 PV 只支持特定的访问模式,页面上显示的访问模式会因您选择的存储类而不同。访问模式一共有三种: + + - **ReadWriteOnce**:持久卷声明以单节点读写的形式挂载。 + - **ReadOnlyMany**:持久卷声明以多节点只读的形式挂载。 + - **ReadWriteMany**:持久卷声明以多节点读写的形式挂载。 + +7. 在**卷容量**区域,设置持久卷声明的大小,然后点击**下一步**。 + +8. 在**高级设置**页面,您可以为持久卷声明添加元数据,例如**标签**和**注解**。元数据可用作搜索和调度资源的标识符。 + +9. 点击**创建**。新建的持久卷声明会显示在项目的**持久卷声明**页面。持久卷声明挂载至工作负载后,**挂载状态**列会显示为**已挂载**。 + + {{< notice note >}} + +- 新建的持久卷声明也会显示在**集群管理**中的**持久卷声明**页面。集群管理员需要查看和跟踪项目中创建的持久卷声明。另一方面,集群管理员在**集群管理**中为项目创建的持久卷声明也会显示在项目的**持久卷声明**页面。 + +- 一些持久卷声明是动态供应的持久卷声明,它们的状态会在创建后立刻从**等待中**变为**已绑定**。其他仍处于**等待中**的持久卷声明会在挂载至工作负载后变为**已绑定**。持久卷声明是否支持动态供应取决于其存储类。例如,如果您使用默认的存储类型 (OpenEBS) 安装 KubeSphere,您只能创建不支持动态供应的本地持久卷声明。这类持久卷声明的绑定模式由 YAML 文件中的 `VolumeBindingMode: WaitForFirstConsumer` 字段指定。 + +- 一些持久卷声明是动态供应的持久卷声明,它们的状态会在创建后立刻从**等待中**变为**已绑定**。其他仍处于**等待中**的持久卷声明会在挂载至工作负载后变为**已绑定**。持久卷声明是否支持动态供应取决于其存储类。例如,如果您使用默认的存储类型 (OpenEBS) 安装 KubeSphere,您只能创建不支持动态供应的本地持久卷声明。这类持久卷声明的绑定模式由 YAML 文件中的 `VolumeBindingMode: WaitForFirstConsumer` 字段指定。 + + {{}} + +## 挂载持久卷 + +创建[部署](../../../project-user-guide/application-workloads/deployments/)、[有状态副本集](../../../project-user-guide/application-workloads/statefulsets/)和[守护进程集](../../../project-user-guide/application-workloads/daemonsets/)等应用负载时,您可以为它们挂载持久卷声明。 + +{{< notice note >}} + +关于如何创建应用负载,请参阅[应用负载](../../application-workloads/deployments/)中的相关指南。 + +{{}} + +在**存储**页面,您可以为工作负载挂载不同的持久卷声明。 + +- **添加持久卷声明模板**(仅对[有状态副本集](../../../project-user-guide/application-workloads/statefulsets/)可用):持久卷声明模板用于动态创建 PVC。您需要设置 PVC 名称前缀、存储类、访问模式、卷容量和挂载路径(以上参数都由 `volumeClaimTemplates` 字段指定),以便将对应 StorageClass 的 PVC 挂载至容器组。 + +- **挂载卷**:支持 emptyDir 卷和 PVC。 + + **挂载卷**页面支持以下三种模式: + + - **持久卷**:使用 PVC 挂载。 + + 持久卷可用于保存用户的持久数据。您需要提前创建持久卷声明(PVC),持久卷声明创建后会显示在列表中供选择。 + + - **临时卷**:用 emptyDir 卷挂载。 + + 临时卷即 [emptyDir](https://kubernetes.io/zh/docs/concepts/storage/volumes/#emptydir) 卷,它在容器组分配到节点时创建,并且只要容器组在节点上运行就会一直存在。emptyDir 卷提供了一个空目录,可由容器组中的容器读写。取决于您的部署环境,emptyDir 卷可以存放在节点所使用的任何介质上,例如机械硬盘或 SSD。当容器组由于某些原因从节点上移除时,emptyDir 卷中的数据也会被永久删除。 + + - **HostPath 卷**:用 `hostPath` 卷挂载。 + + `hostPath` 卷将主机节点文件系统中的文件或目录挂载至容器组。大多数容器组可能不需要这类卷,但它可以为一些应用提供了强大的逃生舱 (Escape Hatch)。有关更多信息,请参阅 [Kubernetes 官方文档](https://kubernetes.io/zh/docs/concepts/storage/volumes/#hostpath)。 + +- **挂载配置字典或保密字典**:支持[配置字典](../../../project-user-guide/configuration/configmaps/)或[保密字典](../../../project-user-guide/configuration/secrets/)键值对。 + + [保密字典](https://kubernetes.io/zh/docs/concepts/storage/volumes/#secret)卷用于为容器组提供密码、OAuth 凭证、SSH 密钥等敏感信息。该卷由 tmpfs(基于 RAM 的文件系统)支持,所以数据不会写入非易失性存储中。 + + [配置字典](https://kubernetes.io/zh/docs/concepts/storage/volumes/#configmap)卷以键值对的形式存放配置数据。ConfigMap 资源可用于向容器组中注入配置数据。存放在 ConfigMap 对象中的数据可以由 `configMap` 类型的卷引用,并由容器组中运行的容器化应用使用。ConfigMap 通常用于以下场景: + + - 设置环境变量。 + - 设置容器中的命令参数。 + - 创建卷中的配置文件。 + +## 查看和管理持久性声明 + +持久性声明创建后,您可以查看持久性声明的详情、编辑持久性声明和使用持久性声明功能。 +### 查看持久性声明详情 + +在**持久性声明**页面,点击一个持久性声明名称可打开持久性声明详情页面。 + +1. 点击**资源状态**页签,查看持久卷用量和挂载的容器组。 + +2. 点击**元数据**页签,查看持久卷声明的标签和注解。 + +3. 点击**事件**页签,查看持久卷声明的事件。 + +4. 点击**快照**页签,查看卷快照。 +### 编辑持久性声明 + +在持久性声明详情页面,您可以点击**编辑信息**修改持久性声明的基本信息。点击**更多操作**可编辑 YAML 文件或删除持久性声明。 + +如需删除持久性声明,请确保该持久性声明未挂载至任何工作负载。如需卸载工作负载的持久性声明,请进入该工作负载的详情页面,点击**更多操作**,从下拉菜单中选择**编辑设置**,在弹出的对话框中选择**存储**,然后点击垃圾桶图标卸载该持久性声明。 + +在您点击**删除**后,如果持久性声明长时间处于**删除中**状态,请使用以下命令手动删除: + +```bash +kubectl patch pvc -p '{"metadata":{"finalizers":null}}' +``` + +### 使用持久性声明功能 + +**更多操作**下拉菜单提供了其它额外功能,这些功能基于 KubeSphere 的底层存储插件 `Storage Capability`。具体如下: + +- **克隆**:创建一个相同的持久性声明。 +- **创建快照**:创建一个持久性声明快照,可用于创建其他持久性声明。有关更多信息,请参阅[卷快照](../volume-snapshots/)。 +- **扩容**:增加持久性声明的容量。请注意,您无法在控制台上减少持久性声明的容量,因为数据可能会因此丢失。 + +有关 `Storage Capability` 的更多信息,请参阅[设计文档](https://github.com/kubesphere/community/blob/master/sig-storage/concepts-and-designs/storage-capability-interface.md)。 + +{{< notice note >}} + +`Storage Capability` 可能尚未覆盖一些树内 (in-tree) 或特殊的 CSI 插件。如果某些功能在 KubeSphere 集群中没有正确显示,您可以按照[此文档](https://github.com/kubesphere/kubesphere/issues/2986)修改设置。 + +{{}} + +### 监控持久性声明 + +KubeSphere 从 Kubelet 获取 `Filesystem` 模式的 PVC 的指标数据(包括容量使用情况和 inode 使用情况),从而对持久性声明进行监控。 + +有关持久性声明监控的更多信息,请参阅 [Research on Volume Monitoring](https://github.com/kubesphere/kubesphere/issues/2921)。 +## 查看持久卷列表并管理持久卷 + +### 查看持久卷列表 + +1. 在**持久卷声明**页面,点击**持久卷**页签,可以查看以下信息: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
参数描述
Name 持久卷名称,在该持久卷的清单文件中由 .metadata.name 字段指定。
状态 + 持久卷的当前状态,在该持久卷的清单文件中由 .status.phase 字段指定,包括: +
    +
  • 可用:持久卷可用,尚未绑定至持久卷声明。
  • +
  • 已绑定:持久卷已绑定至持久卷声明。
  • +
  • 删除中:正在删除持久卷。
  • +
  • 失败:持久卷不可用。
  • +
+
容量持久卷的容量,在该持久卷的清单文件中由 .spec.capacity.storage 字段指定。
访问模式 + 持久卷的访问模式,在该持久卷的清单文件中由 .spec.accessModes 字段指定,包括: +
    +
  • RWO:持久卷可挂载为单个节点读写。
  • +
  • ROX:持久卷可挂载为多个节点只读。
  • +
  • RWX:持久卷实例可挂载为多个节点读写。
  • +
+
回收策略 + 持久卷实例的回收策略,在该持久卷实例的清单文件中由 .spec.persistentVolumeReclaimPolicy 字段指定,包括: +
    +
  • Retain:删除持久卷声明后,保留该持久卷,需要手动回收。
  • +
  • Delete:删除该持久卷,同时从卷插件的基础设施中删除所关联的存储设备。
  • +
  • Recycle:清除持久卷的数据,使该持久卷可供新的持久卷声明使用。
  • +
+
创建时间持久卷的创建时间。
+ +2. 点击持久卷实例右侧的 并在下拉菜单中选择一项操作: + - **编辑信息**:编辑持久卷信息。 + - **编辑 YAML**:编辑持久卷的 YAML 文件。 + - **删除**:删除持久卷。处于已绑定状态的持久卷不可删除。 + +### 查看持久卷实例详情页面 + +1. 点击持久卷的名称,进入其详情页面。 + +2. 在详情页面,点击**编辑信息**以编辑持久卷的基本信息。 + +3. 点击**更多操作**,在下拉菜单中选择一项操作: + + - **查看 YAML**:查看持久卷的 YAML 文件。 + - **删除**:删除持久卷并返回列表页面。处于已绑定状态的持久卷不可删除。 + +4. 点击**资源状态**页签,查看持久卷所绑定的持久卷声明。 + +5. 点击**元数据**页签,查看持久卷的标签和注解。 + +6. 点击**事件**页签,查看持久卷的事件。 diff --git a/content/zh/docs/v3.4/quick-start/_index.md b/content/zh/docs/v3.4/quick-start/_index.md new file mode 100644 index 000000000..547c3cc4b --- /dev/null +++ b/content/zh/docs/v3.4/quick-start/_index.md @@ -0,0 +1,14 @@ +--- +title: "快速入门" +description: "通过详细的图文帮助您更好地了解 KubeSphere" +layout: "second" + +linkTitle: "快速入门" + +weight: 2000 + +icon: "/images/docs/v3.3/docs.svg" + +--- + +快速入门中包括六个动手实验操作,能够帮助您快速了解 KubeSphere。强烈建议您仔细阅读这些教程,探索 KubeSphere 的基本功能。 diff --git a/content/zh/docs/v3.4/quick-start/all-in-one-on-linux.md b/content/zh/docs/v3.4/quick-start/all-in-one-on-linux.md new file mode 100644 index 000000000..b1a20679e --- /dev/null +++ b/content/zh/docs/v3.4/quick-start/all-in-one-on-linux.md @@ -0,0 +1,261 @@ +--- +title: "在 Linux 上以 All-in-One 模式安装 KubeSphere" +keywords: 'KubeSphere, Kubernetes, All-in-One, 安装' +description: '了解在 Linux 上如何使用最小安装包安装 KubeSphere。本教程为您理解容器平台提供了基础入门知识,为学习以下指南奠定基础。' +linkTitle: "在 Linux 上以 All-in-One 模式安装 KubeSphere" +weight: 2100 +--- + +对于刚接触 KubeSphere 并想快速上手该[容器平台](https://kubesphere.io/)的用户,All-in-One 安装模式是最佳的选择,它能够帮助您零配置快速部署 KubeSphere 和 Kubernetes。 + +## 视频演示 + + + +## 步骤 1:准备 Linux 机器 + +若要以 All-in-One 模式进行安装,您仅需参考以下对机器硬件和操作系统的要求准备一台主机。 + +### 硬件推荐配置 + + + + + + + + + + + + + + + + + + + + + + + + + +
操作系统最低配置
Ubuntu 16.04, 18.04, 20.04, 22.042 核 CPU,4 GB 内存,40 GB 磁盘空间
Debian Buster, Stretch2 核 CPU,4 GB 内存,40 GB 磁盘空间
CentOS 7.x2 核 CPU,4 GB 内存,40 GB 磁盘空间
Red Hat Enterprise Linux 72 核 CPU,4 GB 内存,40 GB 磁盘空间
SUSE Linux Enterprise Server 15/openSUSE Leap 15.22 核 CPU,4 GB 内存,40 GB 磁盘空间
+ +{{< notice note >}} + +以上的系统要求和以下的教程适用于没有启用任何可选组件的默认最小化安装。如果您的机器至少有 8 核 CPU 和 16 GB 内存,则建议启用所有组件。有关更多信息,请参见[启用可插拔组件](../../pluggable-components/)。 + +{{}} + +### 节点要求 + +- 节点必须能够通过 `SSH` 连接。 +- 节点上可以使用 `sudo`/`curl`/`openssl`/`tar` 命令。 + +### 容器运行时 + +您的集群必须有一个可用的容器运行时。如果您使用 KubeKey 搭建集群,KubeKey 会默认安装最新版本的 Docker。或者,您也可以在创建集群前手动安装 Docker 或其他容器运行时。 + + + + + + + + + + + + + + + + + + + + + + +
支持的容器运行时版本
Docker19.3.8 +
containerd最新版
CRI-O(试验版,未经充分测试)最新版
iSula(试验版,未经充分测试)最新版
+ + +### 依赖项要求 + +KubeKey 可以将 Kubernetes 和 KubeSphere 一同安装。针对不同的 Kubernetes 版本,需要安装的依赖项可能有所不同。您可以参考以下列表,查看是否需要提前在节点上安装相关的依赖项。 + + + + + + + + + + + + + + + + + + + + + + + + + + + +
依赖项Kubernetes 版本 ≥ 1.18Kubernetes 版本 < 1.18
socat必须可选但建议
conntrack必须可选但建议
ebtables可选但建议可选但建议
ipset可选但建议可选但建议
+ +{{< notice info >}} + +KubeKey 是用 Go 语言开发的一款全新的安装工具,代替了以前基于 ansible 的安装程序。KubeKey 为用户提供了灵活的安装选择,可以分别安装 KubeSphere 和 Kubernetes 或二者同时安装,既方便又高效。 + +{{}} + +### 网络和 DNS 要求 + +{{< content "common/network-requirements.md" >}} + +{{< notice tip >}} + +- 建议您的操作系统处于干净状态(不安装任何其他软件),否则可能会发生冲突。 +- 如果您无法从 `dockerhub.io` 下载容器镜像,建议提前准备仓库的镜像地址(即加速器)。有关更多信息,请参见[为安装配置加速器](../../faq/installation/configure-booster/)。 + +{{}} + +## 步骤 2:下载 KubeKey + +请按照以下步骤下载 KubeKey。 + +{{< tabs >}} + +{{< tab "如果您能正常访问 GitHub/Googleapis" >}} + +从 [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) 下载 KubeKey 或直接使用以下命令(ubuntu使用bash替换sh)。 + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "如果您访问 GitHub/Googleapis 受限" >}} + +先执行以下命令以确保您从正确的区域下载 KubeKey。 + +```bash +export KKZONE=cn +``` + +执行以下命令下载 KubeKey。 + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +在您下载 KubeKey 后,如果您将其传至新的机器,且访问 Googleapis 同样受限,在您执行以下步骤之前请务必再次执行 `export KKZONE=cn` 命令。 + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +执行以上命令会下载最新版 KubeKey,您可以修改命令中的版本号下载指定版本。 + +{{}} + +为 `kk` 添加可执行权限: + +```bash +chmod +x kk +``` + +## 步骤 3:开始安装 + +在本快速入门教程中,您只需执行一个命令即可进行安装,其模板如下所示: + +```bash +./kk create cluster [--with-kubernetes version] [--with-kubesphere version] +``` + +若要同时安装 Kubernetes 和 KubeSphere,可参考以下示例命令: + +```bash +./kk create cluster --with-kubernetes v1.22.12 --with-kubesphere v3.3.2 +``` + +{{< notice note >}} + +- 安装 KubeSphere 3.3 的建议 Kubernetes 版本:v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如需使用边缘节点,推荐安装 v1.21.x。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.23.10。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../installing-on-linux/introduction/kubekey/#支持矩阵)。 + +- 一般来说,对于 All-in-One 安装,您无需更改任何配置。 +- 如果您在这一步的命令中不添加标志 `--with-kubesphere`,则不会部署 KubeSphere,KubeKey 将只安装 Kubernetes。如果您添加标志 `--with-kubesphere` 时不指定 KubeSphere 版本,则会安装最新版本的 KubeSphere。 +- KubeKey 会默认安装 [OpenEBS](https://openebs.io/) 为开发和测试环境提供 LocalPV 以方便新用户。对于其他存储类型,请参见[持久化存储配置](../../installing-on-linux/persistent-storage-configurations/understand-persistent-storage/)。 + +{{}} + +执行该命令后,KubeKey 将检查您的安装环境,结果显示在一张表格中。有关详细信息,请参见[节点要求](#节点要求)和[依赖项要求](#依赖项要求)。输入 `yes` 继续安装流程。 + +## 步骤 4:验证安装结果 + +输入以下命令以检查安装结果。 + +```bash +kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f +``` + +输出信息会显示 Web 控制台的 IP 地址和端口号,默认的 NodePort 是 `30880`。现在,您可以使用默认的帐户和密码 (`admin/P@88w0rd`) 通过 `:30880` 访问控制台。 + +```bash +##################################################### +### Welcome to KubeSphere! ### +##################################################### + +Console: http://192.168.0.2:30880 +Account: admin +Password: P@88w0rd + +NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. + +##################################################### +https://kubesphere.io 20xx-xx-xx xx:xx:xx +##################################################### +``` + +{{< notice note >}} + +您可能需要配置端口转发规则并在安全组中开放端口,以便外部用户访问控制台。 + +{{}} + +登录至控制台后,您可以在**系统组件**中查看各个组件的状态。如果要使用相关服务,您可能需要等待部分组件启动并运行。您也可以使用 `kubectl get pod --all-namespaces` 来检查 KubeSphere 相关组件的运行状况。 + +## 启用可插拔组件(可选) + +本指南仅适用于默认的最小化安装。若要在 KubeSphere 中启用其他组件,请参见[启用可插拔组件](../../pluggable-components/)。 + +## 代码演示 + + diff --git a/content/zh/docs/v3.4/quick-start/create-workspace-and-project.md b/content/zh/docs/v3.4/quick-start/create-workspace-and-project.md new file mode 100644 index 000000000..acfbbd8f4 --- /dev/null +++ b/content/zh/docs/v3.4/quick-start/create-workspace-and-project.md @@ -0,0 +1,239 @@ +--- +title: "创建企业空间、项目、用户和平台角色" +keywords: 'KubeSphere, Kubernetes, 多租户, 企业空间, 帐户, 平台角色, 项目' +description: '了解如何利用 KubeSphere 中的多租户功能在不同级别进行细粒度访问控制。' +linkTitle: "创建企业空间、项目、用户和平台角色" +weight: 2300 +--- + +本快速入门演示如何创建企业空间、用户和平台角色。同时,您将学习如何在企业空间中创建项目和 DevOps 项目,用于运行工作负载。完成本教程后,您将熟悉 KubeSphere 的多租户管理系统,并使用本教程中创建的资源(例如企业空间和帐户等)完成其他教程中的操作。 + +## 准备工作 + +KubeSphere 需要安装在您的机器中。 + +## 架构 + +KubeSphere 的多租户系统分**三个**层级,即集群、企业空间和项目。KubeSphere 中的项目等同于 Kubernetes 的[命名空间](https://kubernetes.io/zh/docs/concepts/overview/working-with-objects/namespaces/)。 + +您需要创建一个新的[企业空间](../../workspace-administration/what-is-workspace/)进行操作,而不是使用系统企业空间,系统企业空间中运行着系统资源,绝大部分仅供查看。出于安全考虑,强烈建议给不同的租户授予不同的权限在企业空间中进行协作。 + +您可以在一个 KubeSphere 集群中创建多个企业空间,每个企业空间下可以创建多个项目。KubeSphere 为每个级别默认设有多个内置角色。此外,您还可以创建拥有自定义权限的角色。KubeSphere 多层次结构适用于具有不同团队或组织以及每个团队中需要不同角色的企业用户。 + +## 动手实验 + +### 步骤 1:创建用户 + +安装 KubeSphere 之后,您需要向平台添加具有不同角色的用户,以便他们可以针对自己授权的资源在不同的层级进行工作。一开始,系统默认只有一个用户 `admin`,具有 `platform-admin` 角色。在本步骤中,您将创建一个示例用户 `ws-manager`。 + +1. 以 `admin` 身份使用默认帐户和密码 (`admin/P@88w0rd`) 登录 Web 控制台。 + + {{< notice tip >}} + 出于安全考虑,强烈建议您在首次登录控制台时更改密码。若要更改密码,在右上角的下拉列表中选择**用户设置**,在**密码设置**中设置新密码,您也可以在**用户设置** > **基本信息**中修改控制台语言。 + {{}} + +2. 点击左上角的**平台管理**,然后选择**访问控制**。在左侧导航栏中,选择**平台角色**。内置角色的描述信息如下表所示。 + + + + + + + + + + + + + + + + + + + + + +
内置角色描述
platform-self-provisioner创建企业空间并成为所创建企业空间的管理员。
platform-regular平台普通用户,在被邀请加入企业空间或集群之前没有任何资源操作权限。
platform-admin平台管理员,可以管理平台内的所有资源。
+ + {{< notice note >}} + 内置角色由 KubeSphere 自动创建,无法编辑或删除。 + {{}} + +3. 在**用户**中,点击**创建**。在弹出的对话框中,提供所有必要信息(带有*标记)。在**平台角色**下拉列表,选择**platform-self-provisioner**。 + + 完成后,点击**确定**。新创建的用户将显示在**用户**页面。 + + {{< notice note >}} + 如果您在此处未指定**平台角色**,该用户将无法执行任何操作。您需要在创建企业空间后,将该用户邀请至企业空间。 + {{}} + +4. 重复以上的步骤创建新用户,这些用户将在其他的教程中使用。 + + {{< notice tip >}} + - 帐户登出请点击右上角的用户名,然后选择**登出**。 + - 下面仅为示例用户名,请根据实际情况修改。 + {{}} + + + + + + + + + + + + + + + + + + + + + + + +
用户 指定的平台角色用户权限
ws-adminplatform-regular被邀请到企业空间后,管理该企业空间中的所有资源(在此示例中,此用户用于邀请新成员加入该企业空间)。
project-adminplatform-regular创建和管理项目以及 DevOps 项目,并邀请新成员加入项目。
project-regularplatform-regularproject-regular 将由 project-admin 邀请至项目或 DevOps 项目。该用户将用于在指定项目中创建工作负载、流水线和其他资源。
+ +5. 在**用户**页面,查看创建的用户。 + + {{< notice note >}} + + 您可以点击用户名称后的 图标选择启用或禁用某个用户。您也可以勾选多个用户进行批量操作。 + + {{}} +### 步骤 2:创建企业空间 + +作为管理项目、DevOps 项目和组织成员的基本逻辑单元,企业空间是 KubeSphere 多租户系统的基础。 + +1. 在左侧导航栏,选择**企业空间**。企业空间列表中已列出默认企业空间 **system-workspace**,该企业空间包含所有系统项目。其中运行着与系统相关的组件和服务,您无法删除该企业空间。 + +2. 在企业空间列表页面,点击**创建**,输入企业空间的名称(例如 **demo-workspace**),并将用户 `ws-admin` 设置为企业空间管理员。完成后,点击**创建**。 + + {{< notice note >}} + + 如果您已启用[多集群功能](../../multicluster-management/),您需要为企业空间[分配一个或多个可用集群](../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/#在创建企业空间时选择可用集群),以便项目可以在集群中创建。 + + {{}} + +3. 登出控制台,然后以 `ws-admin` 身份重新登录。在**企业空间设置**中,选择**企业空间成员**,然后点击**邀请**。 + +4. 邀请 `project-admin` 和 `project-regular` 进入企业空间,分别授予 `demo-workspace-self-provisioner` 和 `demo-workspace-viewer` 角色,点击**确定**。 + + {{< notice note >}} +实际角色名称的格式:`-`。例如,在名为 `demo-workspace` 的企业空间中,角色 `viewer` 的实际角色名称为 `demo-workspace-viewer`。 + {{}} + +5. 将 `project-admin` 和 `project-regular` 都添加到企业空间后,点击**确定**。在**企业空间成员**中,您可以看到列出的三名成员。 + + + + + + + + + + + + + + + + + + + + + + + +
用户分配的企业空间角色角色权限
ws-admindemo-workspace-admin管理指定企业空间中的所有资源(在此示例中,此用户用于邀请新成员加入企业空间)。
project-admindemo-workspace-self-provisioner创建和管理项目以及 DevOps 项目,并邀请新成员加入项目。
project-regulardemo-workspace-viewerproject-regular 将由 project-admin 邀请至项目或 DevOps 项目。该用户将用于在指定项目中创建工作负载、流水线和其他资源。
+ +### 步骤 3:创建项目 + +在此步骤中,您需要使用在上一步骤中创建的帐户 `project-admin` 来创建项目。KubeSphere 中的项目与 Kubernetes 中的命名空间相同,为资源提供了虚拟隔离。有关更多信息,请参见[命名空间](https://kubernetes.io/zh/docs/concepts/overview/working-with-objects/namespaces/)。 + +1. 以 `project-admin` 身份登录 KubeSphere Web 控制台,在**项目**中,点击**创建**。 + +2. 输入项目名称(例如 `demo-project`),点击**确定**。您还可以为项目添加别名和描述。 + +3. 在**项目**中,点击刚创建的项目查看其详情页面。 + +4. 在项目的**概览**页面,默认情况下未设置项目配额。您可以点击**编辑配额**并根据需要指定[资源请求和限制](../../workspace-administration/project-quotas/)(例如:CPU 和内存的限制分别设为 1 Core 和 1000 Gi)。 + +5. 在**项目设置** > **项目成员**中,邀请 `project-regular` 至该项目,并授予该用户 `operator` 角色。 + + {{< notice info >}} + 具有 `operator` 角色的用户是项目维护者,可以管理项目中除用户和角色以外的资源。 + {{}} + +6. 在创建[应用路由](../../project-user-guide/application-workloads/routes/)(即 Kubernetes 中的 [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/))之前,需要启用该项目的网关。网关是在项目中运行的 [NGINX Ingress 控制器](https://github.com/kubernetes/ingress-nginx)。若要设置网关,请转到**项目设置**中的**网关设置**,然后点击**设置网关**。此步骤中仍使用帐户 `project-admin`。 + +7. 选择访问方式 **NodePort**,然后点击**确定**。 + +8. 在**网关设置**下,可以在页面上看到网关地址以及 http/https 的端口。 + + {{< notice note >}} + 如果要使用 `LoadBalancer` 暴露服务,则需要使用云厂商的 LoadBalancer 插件。如果您的 Kubernetes 集群在裸机环境中运行,建议使用 [OpenELB](https://github.com/kubesphere/openelb) 作为 LoadBalancer 插件。 + {{}} + +### 步骤 4:创建角色 + +完成上述步骤后,您已了解可以为不同级别的用户授予不同角色。先前步骤中使用的角色都是 KubeSphere 提供的内置角色。在此步骤中,您将学习如何创建自定义角色以满足工作需求。 + +1. 再次以 `admin` 身份登录 KubeSphere Web 控制台,转到**访问控制**。 + +2. 点击左侧导航栏中的**平台角色**,再点击右侧的**创建**。 + + {{< notice note >}} + + **平台角色**页面的预设角色无法编辑或删除。 + + {{}} + +3. 在**创建平台角色**对话框中,设置角色标识符(例如,`clusters-admin`)、角色名称和描述信息,然后点击**编辑权限**。 + + {{< notice note >}} + + 本示例演示如何创建负责集群管理的角色。 + + {{}} + +4. 在**编辑权限**对话框中,设置角色权限(例如,选择**集群管理**)并点击**确定**。 + + {{< notice note >}} + + * 在本示例中,角色 `clusters-admin` 包含**集群管理**和**集群查看**权限。 + * 一些权限依赖于其他权限,依赖项由每项权限下的**依赖于**字段指定。 + * 选择权限后,将自动选择它所依赖的权限。 + * 若要取消选择权限,则需要首先取消选择其从属权限。 + + {{}} + +5. 在**平台角色**页面,可以点击所创建角色的名称查看角色详情,点击 以编辑角色、编辑角色权限或删除该角色。 + +6. 在**用户**页面,可以在创建帐户或编辑现有帐户时为帐户分配该角色。 + +### 步骤 5:创建 DevOps 项目(可选) + +{{< notice note >}} + +若要创建 DevOps 项目,需要预先启用 KubeSphere DevOps 系统,该系统是个可插拔的组件,提供 CI/CD 流水线、Binary-to-Image 和 Source-to-Image 等功能。有关如何启用 DevOps 的更多信息,请参见 [KubeSphere DevOps 系统](../../pluggable-components/devops/)。 + +{{}} + +1. 以 `project-admin` 身份登录控制台,在 **DevOps 项目**中,点击**创建**。 + +2. 输入 DevOps 项目名称(例如 `demo-devops`),然后点击**确定**,也可以为该项目添加别名和描述。 + +3. 点击刚创建的项目查看其详细页面。 + +4. 转到 **DevOps 项目设置**,然后选择 **DevOps 项目成员**。点击**邀请**授予 `project-regular` 用户 `operator` 的角色,允许其创建流水线和凭证。 + + +至此,您已熟悉 KubeSphere 的多租户管理系统。在其他教程中,`project-regular` 帐户还将用于演示如何在项目或 DevOps 项目中创建应用程序和资源。 diff --git a/content/zh/docs/v3.4/quick-start/deploy-bookinfo-to-k8s.md b/content/zh/docs/v3.4/quick-start/deploy-bookinfo-to-k8s.md new file mode 100644 index 000000000..d45d32432 --- /dev/null +++ b/content/zh/docs/v3.4/quick-start/deploy-bookinfo-to-k8s.md @@ -0,0 +1,101 @@ +--- +title: "部署并访问 Bookinfo" +keywords: 'KubeSphere, Kubernetes, Bookinfo, Istio' +description: '通过部署示例应用程序 Bookinfo 来探索 KubeSphere 服务网格的基本功能。' +linkTitle: "部署并访问 Bookinfo" +weight: 2400 +--- + +作为开源的服务网格解决方案,[Istio](https://istio.io/) 为微服务提供了强大的流量管理功能。以下是 [Istio](https://istio.io/latest/zh/docs/concepts/traffic-management/) 官方网站上关于流量管理的简介: + +*Istio 的流量路由规则可以让您很容易地控制服务之间的流量和 API 调用。Istio 简化了服务级别属性的配置,比如熔断器、超时、重试等,并且能轻松地设置重要的任务,如 A/B 测试、金丝雀发布、基于流量百分比切分的概率发布等。它还提供了开箱即用的故障恢复特性,有助于增强应用的健壮性,从而更好地应对被依赖的服务或网络发生故障的情况。* + +为了给用户提供管理微服务的一致体验,KubeSphere 在容器平台上集成了 Istio。本教程演示了如何部署由四个独立的微服务组成的示例应用程序 Bookinfo,以及如何通过 NodePort 访问该应用。 + +## 视频演示 + + + +## 准备工作 + +- 您需要启用 [KubeSphere 服务网格](../../pluggable-components/service-mesh/)。 + +- 您需要完成[创建企业空间、项目、用户和角色](../create-workspace-and-project/)中的所有任务。 + +- 您需要启用**链路追踪**。有关更多信息,请参见[设置网关](../../project-administration/project-gateway/#设置网关)。 + + {{< notice note >}} + 您需要启用**链路追踪**以使用追踪功能。启用后若无法访问路由 (Ingress),请检查您的路由是否已经添加注释(例如:`nginx.ingress.kubernetes.io/service-upstream: true`)。 + {{}} + +## 什么是 Bookinfo 应用 + +Bookinfo 应用由以下四个独立的微服务组成,其中 **reviews** 微服务有三个版本。 + +- **productpage** 微服务会调用 **details** 和 **reviews** 用来生成页面。 +- **details** 微服务中包含了书籍的信息。 +- **reviews** 微服务中包含了书籍相关的评论,它还会调用 **ratings** 微服务。 +- **ratings** 微服务中包含了由书籍评价组成的评级信息。 + +这个应用的端到端架构如下所示。有关更多详细信息,请参见 [Bookinfo 应用](https://istio.io/latest/zh/docs/examples/bookinfo/)。 + +![bookinfo](/images/docs/v3.3/zh-cn/quickstart/deploy-bookinfo-to-k8s/bookinfo.png) + +## 动手实验 + +### 步骤 1:部署 Bookinfo + +1. 使用帐户 `project-regular` 登录控制台并访问项目 (`demo-project`)。前往**应用负载**下的**应用**,点击右侧的**部署示例应用**。 + +2. 在出现的对话框中点击**下一步**,其中必填字段已经预先填好,相关组件也已经设置完成。您无需修改设置,只需在最后一页(**路由设置**)点击**创建**。 + + {{< notice note >}} + +KubeSphere 会自动创建主机名。若要更改主机名,请将鼠标悬停在默认路由规则上,然后点击 icon 进行编辑。有关更多信息,请参见[创建基于微服务的应用](../../project-user-guide/application/compose-app/)。 + + {{}} + +3. 在**工作负载**中,确保这四个部署都处于`运行中`状态,这意味着该应用已经成功创建。 + + {{< notice note >}}可能需要等几分钟才能看到部署正常运行。 +{{}} + +### 步骤 2:访问 Bookinfo + +1. 在**应用**中,访问**自制应用**,点击应用 `bookinfo` 查看其详情页面。 + + {{< notice note >}}如果您没有在列表中看到该应用,请刷新页面。 + {{}} + +2. 详情页面中显示了用于访问 Bookinfo 应用的主机名和端口号。 + +3. 由于将通过 NodePort 在集群外访问该应用,因此您需要在安全组中为出站流量开放上图中的端口,并按需设置端口转发规则。 + +4. 在本地 hosts 文件 (`/etc/hosts`) 中添加一个条目将主机名映射到对应的 IP 地址,例如: + + ```bash + 139.198.178.20 productpage.demo-project.192.168.0.2.nip.io + ``` + + {{< notice warning >}} + +请勿直接复制上述内容到本地 hosts 文件,请将其替换成您自己的 IP 地址与主机名。 +{{}} + + +5. 完成后,点击**访问服务**访问该应用。 + +6. 在应用详情页面,点击左下角的 **Normal user**。 + + ![normal-user](/images/docs/v3.3/zh-cn/quickstart/deploy-bookinfo-to-k8s/normal-user.png) + +7. 在下图中,您可以注意到 **Book Reviews** 板块仅出现 **Reviewer1** 和 **Reviewer2**,并且没有任何评级内容,因为这是当前应用版本的状态。若想探索更多流量管理相关的功能,您可以为该应用执行[金丝雀发布](../../project-user-guide/grayscale-release/canary-release/)。 + + ![ratings-page](/images/docs/v3.3/zh-cn/quickstart/deploy-bookinfo-to-k8s/ratings-page.png) + + {{< notice note >}} + +KubeSphere 基于 Istio 提供了三种灰度策略,包括[蓝绿部署](../../project-user-guide/grayscale-release/blue-green-deployment/),[金丝雀发布](../../project-user-guide/grayscale-release/canary-release/)和[流量镜像](../../project-user-guide/grayscale-release/traffic-mirroring/)。 + {{}} \ No newline at end of file diff --git a/content/zh/docs/v3.4/quick-start/enable-pluggable-components.md b/content/zh/docs/v3.4/quick-start/enable-pluggable-components.md new file mode 100644 index 000000000..1c93773cf --- /dev/null +++ b/content/zh/docs/v3.4/quick-start/enable-pluggable-components.md @@ -0,0 +1,146 @@ +--- +title: "启用可插拔组件" +keywords: 'KubeSphere,Kubernetes,可插拔,组件' +description: '了解如何在 KubeSphere 上启用可插拔组件,以便您全方位地探索 KubeSphere。安装前和安装后均可启用可插拔组件。' +linkTitle: "启用可插拔组件" +weight: 2600 +--- + +本教程演示如何在安装前或安装后启用 KubeSphere 的可插拔组件。请参照下表了解 KubeSphere 的全部可插拔组件。 + +| 配置项 | 功能组件 | 描述 | +| ------------------ | ------------------------------------- | ------------------------------------------------------------ | +| `alerting` | KubeSphere 告警系统 | 可以为工作负载和节点自定义告警策略。告警策略被触发后,告警消息会通过不同的渠道(例如,邮件和 Slack)发送至接收人。 | +| `auditing` | KubeSphere 审计日志系统 | 提供一套与安全相关并按时间顺序排列的记录,记录平台上不同租户的活动。 | +| `devops` | KubeSphere DevOps 系统 | 基于 Jenkins 提供开箱即用的 CI/CD 功能,提供一站式 DevOps 方案、内置 Jenkins 流水线与 B2I & S2I。 | +| `events` | KubeSphere 事件系统 | 提供一个图形化的 Web 控制台,用于导出、过滤和警告多租户 Kubernetes 集群中的 Kubernetes 事件。| +| `logging` | KubeSphere 日志系统 | 在统一的控制台中提供灵活的日志查询、收集和管理功能。可以添加第三方日志收集器,例如 Elasticsearch、Kafka 和 Fluentd。 | +| `metrics_server` | HPA | 根据设定指标对 Pod 数量进行动态伸缩,使运行在上面的服务对指标的变化有一定的自适应能力。| +| `networkpolicy` | 网络策略 | 可以在同一个集群内部之间设置网络策略(比如限制或阻止某些实例 Pod 之间的网络请求)。| +| `kubeedge` | KubeEdge | 为集群添加边缘节点并在这些节点上运行工作负载。 | +| `openpitrix` | KubeSphere 应用商店 | 基于 Helm 的应用程序商店,允许用户管理应用的整个生命周期。| +| `servicemesh` | KubeSphere 服务网格 (基于 Istio) | 提供细粒度的流量治理、可观测性、流量追踪以及可视化流量拓扑图。 | +| `ippool` | 容器组 IP 池 | 创建容器组 IP 池并从 IP 池中分配 IP 地址到 Pod。 | +| `topology` | 服务拓扑图 | 集成 [Weave Scope](https://www.weave.works/oss/scope/) 以查看应用和容器的服务间通信(拓扑图)。 | + +有关每个组件的更多信息,请参见[启用可插拔组件概述](../../pluggable-components/overview/)。 + +{{< notice note >}} + +- `multicluster` 不在本教程中介绍。如果要启用此功能,则需要为 `clusterRole` 设置相应的值。有关详细信息,请参见[多集群管理](../../multicluster-management/)。 +- 在安装前,请确保您的机器符合硬件要求。如果想启用所有的可插拔组件,请参考推荐机器配置:CPU ≥ 8 Core,内存 ≥ 16 G,磁盘空间 ≥ 100 G。 + +{{}} + +## 在安装前启用可插拔组件 + +对于大多数可插拔组件,您可以按照以下步骤来启用。如需启用 [KubeEdge](../../pluggable-components/kubeedge/)、[容器组 IP 池](../../pluggable-components/pod-ip-pools/)以及[服务拓扑图](../../pluggable-components/service-topology/),请直接参照相应的教程。 + +### **在 Linux 上安装** + +在 Linux 上安装 KubeSphere 时,需要创建一个配置文件,该文件列出所有 KubeSphere 组件。 + +1. [在 Linux 上安装 KubeSphere](../../installing-on-linux/introduction/multioverview/) 时,您需要创建一个默认文件名为 `config-sample.yaml` 的配置文件。通过执行以下命令来修改文件: + + ```bash + vi config-sample.yaml + ``` + + {{< notice note >}} +如果采用 [All-in-one 模式安装](../../quick-start/all-in-one-on-linux/),您无需创建 `config-sample.yaml` 文件,因为 all-in-one 模式可以通过一条命令直接创建集群。通常,all-in-one 模式适用于刚接触 KubeSphere 并希望快速上手该系统的用户。如果要在此模式下启用可插拔组件(例如,出于测试目的),请参考[在安装后启用可插拔组件](#在安装后启用可插拔组件)。 + {{}} + +2. 在此文件中,将 `enabled` 的值从 `false` 改为 `true`。这是[完整文件](https://github.com/kubesphere/kubekey/blob/release-2.2/docs/config-example.md)供您参考,修改完成后保存文件。 + +3. 使用该配置文件创建集群: + + ```bash + ./kk create cluster -f config-sample.yaml + ``` + +### 在 Kubernetes 上安装 + +在已有 Kubernetes 集群上安装 KubeSphere 时,需要部署 [ks-installer](https://github.com/kubesphere/ks-installer/) 的两个 YAML 文件。 + +1. 首先下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml) 文件,然后打开编辑。 + + ```bash + vi cluster-configuration.yaml + ``` + +2. 在该本地文件 `cluster-configuration.yaml` 中,将对应组件 `enabled` 的值从 `false` 改为 `true`。 + +3. 编辑完成后保存文件,执行以下命令开始安装: + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f cluster-configuration.yaml + ``` + +无论是在 Linux 上还是在 Kubernetes 上安装 KubeSphere,安装后都可以在 KubeSphere 的 Web 控制台中检查已启用组件的状态。 + +## 在安装后启用可插拔组件 + +用户可以使用 KubeSphere Web 控制台查看和操作不同的资源。要在安装后启用可插拔组件,只需要在控制台中进行略微调整。对于那些习惯使用 Kubernetes 命令行工具 kubectl 的人来说,由于该工具已集成到控制台中,因此使用 KubeSphere 将毫无困难。 + +{{< notice note >}} + +如需启用 [KubeEdge](../../pluggable-components/kubeedge/)、[容器组 IP 池](../../pluggable-components/pod-ip-pools/)以及[服务拓扑图](../../pluggable-components/service-topology/),请直接参照相应的教程。 + +{{}} + +1. 以 `admin` 身份登录控制台。点击左上角的**平台管理** ,然后选择**集群管理**。 + +2. 点击**定制资源定义**,然后在搜索栏中输入 `clusterconfiguration`,点击搜索结果进入其详情页面。 + + {{< notice info >}} +定制资源定义(CRD)允许用户在不增加额外 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些定制资源。 + {{}} + +3. 在**自定义资源**中,点击 `ks-installer` 右侧的三个点,然后选择**编辑 YAML**。 + +4. 在该配置文件中,将对应组件 `enabled` 的 `false` 更改为 `true`,以启用要安装的组件。完成后,点击**确定**以保存配置。 + + ![启用组件](/images/docs/v3.3/zh-cn/quickstart/enable-pluggable-components/启用组件.png) + +5. 执行以下命令,使用 Web kubectl 来检查安装过程: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + + {{< notice tip >}} +您可以通过点击控制台右下角的锤子图标来找到 Web kubectl 工具。 + {{}} + +6. 如果组件安装成功,输出将显示以下消息。 + + ```yaml + ##################################################### + ### Welcome to KubeSphere! ### + ##################################################### + + Console: http://192.168.0.2:30880 + Account: admin + Password: P@88w0rd + + NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. + + ##################################################### + https://kubesphere.io 20xx-xx-xx xx:xx:xx + ##################################################### + ``` + +7. 登录 KubeSphere 控制台,在**系统组件**中可以查看不同组件的状态。 + + {{< notice tip >}} + +如果在上图中看不到相关组件,可能是一些 Pod 尚未启动完成,可以通过 kubectl 执行 `kubectl get pod --all-namespaces` 来查看 Pod 的状态。 + {{}} \ No newline at end of file diff --git a/content/zh/docs/v3.4/quick-start/minimal-kubesphere-on-k8s.md b/content/zh/docs/v3.4/quick-start/minimal-kubesphere-on-k8s.md new file mode 100644 index 000000000..dec9cf225 --- /dev/null +++ b/content/zh/docs/v3.4/quick-start/minimal-kubesphere-on-k8s.md @@ -0,0 +1,59 @@ +--- +title: "在 Kubernetes 上最小化安装 KubeSphere" +keywords: 'KubeSphere, Kubernetes, 最小化安装' +description: '了解在现有 Kubernetes 集群上如何使用最小安装包安装 KubeSphere。您可以使用托管在云服务器上或者安装在本地的 Kubernetes 集群。' +linkTitle: "在 Kubernetes 上最小化安装 KubeSphere" +weight: 2200 +--- + +除了在 Linux 机器上安装 KubeSphere 之外,您还可以将其直接部署在现有的 Kubernetes 集群上。本快速入门指南将引导您完成在 Kubernetes 上最小化安装 KubeSphere 的一般性步骤。有关更多信息,请参见[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/)。 + +## 准备工作 + +- 您的 Kubernetes 版本必须为:v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如需使用边缘节点,推荐安装 v1.21.x。 +- 确保您的机器满足最低硬件要求:CPU > 1 核,内存 > 2 GB。 +- 在安装之前,需要配置 Kubernetes 集群中的**默认**存储类型。 + +{{< notice note >}} + +- 当使用 `--cluster-signing-cert-file` 和 `--cluster-signing-key-file` 参数启动时,在 `kube-apiserver` 中会激活 CSR 签名功能。请参见 [RKE 安装问题](https://github.com/kubesphere/kubesphere/issues/1925#issuecomment-591698309)。 +- 有关在 Kubernetes 上安装 KubeSphere 的准备工作,请参见[准备工作](../../installing-on-kubernetes/introduction/prerequisites/)。 + +{{}} + +## 部署 KubeSphere + +确保您的机器满足安装的前提条件之后,可以按照以下步骤安装 KubeSphere。 + +1. 执行以下命令开始安装: + + ```bash + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml + ``` + +2. 检查安装日志: + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f + ``` + +3. 使用 `kubectl get pod --all-namespaces` 查看所有 Pod 是否在 KubeSphere 的相关命名空间中正常运行。如果是,请通过以下命令检查控制台的端口(默认为 `30880`): + + ```bash + kubectl get svc/ks-console -n kubesphere-system + ``` + +4. 确保在安全组中打开了端口 `30880`,并通过 NodePort `(IP:30880)` 使用默认帐户和密码 `(admin/P@88w0rd)` 访问 Web 控制台。 + +5. 登录控制台后,您可以在**系统组件**中检查不同组件的状态。如果要使用相关服务,可能需要等待某些组件启动并运行。 + + +## 启用可插拔组件(可选) + +本指南仅适用于默认的最小化安装。若要在 KubeSphere 中启用其他组件,请参见[启用可插拔组件](../../pluggable-components/)。 + +## 代码演示 + + diff --git a/content/zh/docs/v3.4/quick-start/wordpress-deployment.md b/content/zh/docs/v3.4/quick-start/wordpress-deployment.md new file mode 100644 index 000000000..15bb545e7 --- /dev/null +++ b/content/zh/docs/v3.4/quick-start/wordpress-deployment.md @@ -0,0 +1,137 @@ +--- +title: "创建并部署 WordPress" +keywords: 'KubeSphere, Kubernetes, 应用, WordPress' +description: '了解在 KubeSphere 中部署示例应用程序的整个流程,包括创建凭证、创建持久卷声明、组件设置等。' +linkTitle: "创建并部署 WordPress" +weight: 2500 +--- + +## WordPress 简介 + +WordPress(使用 PHP 语言编写)是免费、开源的内容管理系统,用户可以使用 WordPress 搭建自己的网站。完整的 WordPress 应用程序包括以下 Kubernetes 对象,由 MySQL 作为后端数据库。 + +![WordPress](/images/docs/v3.3/zh-cn/quickstart/wordpress-deployment/WordPress.png) + +## 目的 + +本教程演示了如何在 KubeSphere 中创建应用程序(以 WordPress 为例)并在集群外进行访问。 + +## 视频演示 + + + +## 准备工作 + +您需要准备一个 `project regular` 帐户,并在一个项目中赋予该帐户 `operator` 角色(该用户已被邀请参加该项目)。有关更多信息,请参见[创建企业空间、项目、用户和角色](../create-workspace-and-project/)。 + +## 预计操作时间 + +大约 15 分钟。 + +## 动手实验 + +### 步骤 1:创建密钥 + +#### 创建 MySQL 密钥 + +环境变量 `WORDPRESS_DB_PASSWORD` 是连接到 WordPress 数据库的密码。在此步骤中,您需要创建一个密钥来保存将在 MySQL Pod 模板中使用的环境变量。 + +1. 使用 `project-regular` 帐户登录 KubeSphere 控制台,访问 `demo-project` 的详情页并导航到**配置**。在**保密字典**中,点击右侧的**创建**。 + +2. 输入基本信息(例如,将其命名为 `mysql-secret`)并点击**下一步**。在下一页中,选择**类型**为 **Opaque(默认)**,然后点击**添加数据**来添加键值对。输入如下所示的键 (Key) `MYSQL_ROOT_PASSWORD` 和值 (Value) `123456`,点击右下角 **√** 进行确认。完成后,点击**创建**按钮以继续。 + + +#### 创建 WordPress 密钥 + +按照以上相同的步骤创建一个名为 `wordpress-secret` 的 WordPress 密钥,输入键 (Key) `WORDPRESS_DB_PASSWORD` 和值 (Value) `123456`。创建的密钥显示在列表中。 + +### 步骤 2:创建持久卷声明 + +1. 访问**存储**下的**持久卷声明**,点击**创建**。 + +2. 输入持久卷声明的基本信息(例如,将其命名为 `wordpress-pvc`),然后点击**下一步**。 + +3. 在**存储设置**中,需要选择一个可用的**存储类**,并设置**访问模式**和**卷容量**。您可以直接使用默认值,点击**下一步**继续。 + +4. 在**高级设置**中,您无需添加额外的配置,点击**创建**完成即可。 + +### 步骤 3:创建应用程序 + +#### 添加 MySQL 后端组件 + +1. 导航到**应用负载**下的**应用**,选择**自制应用** > **创建**。 + +2. 输入基本信息(例如,在应用名称一栏输入 `wordpress`),然后点击**下一步**。 + +3. 在**服务设置**中,点击**创建服务**以在应用中设置组件。 + +4. 设置组件的服务类型为**有状态服务**。 + +5. 输入有状态服务的名称(例如 **mysql**)并点击**下一步**。 + +6. 在**容器组设置**中,点击**添加容器**。 + +7. 在搜索框中输入 `mysql:5.6`,按下**回车键**,然后点击**使用默认端口**。由于配置还未设置完成,请不要点击右下角的 **√** 按钮。 + + {{< notice note >}} +在**高级设置**中,请确保内存限制不小于 1000 Mi,否则 MySQL 可能因内存不足而无法启动。 + {{}} + +8. 向下滚动到**环境变量**,点击**来自保密字典**。输入名称 `MYSQL_ROOT_PASSWORD`,然后选择资源 `mysql-secret` 和前面步骤中创建的密钥 `MYSQL_ROOT_PASSWORD`,完成后点击 **√** 保存配置,最后点击**下一步**继续。 + +9. 选择**存储设置**中的**添加持久卷声明模板**,输入 PVC 名称前缀 (`mysql`) 和**挂载路径**(模式:`读写`,路径:`/var/lib/mysql`)的值。 + + 完成后,点击 **√** 保存设置并点击**下一步**继续。 + +10. 在**高级设置**中,可以直接点击**创建**,也可以按需选择其他选项。 + + +#### 添加 WordPress 前端组件 + +12. 再次点击**创建服务**,选择**无状态服务**。输入名称 `wordpress` 并点击**下一步**。 + +13. 与上述步骤类似,点击**添加容器**,在搜索栏中输入 `wordpress:4.8-apache` 并按下**回车键**,然后点击**使用默认端口**。 + +14. 向下滚动到**环境变量**,点击**来自保密字典**。这里需要添加两个环境变量,请输入以下值: + + - 对于 `WORDPRESS_DB_PASSWORD`,请选择在步骤 1 中创建的 `wordpress-secret` 和 `WORDPRESS_DB_PASSWORD`。 + - 点击**添加环境变量**,分别输入 `WORDPRESS_DB_HOST` 和 `mysql` 作为键 (Key) 和值 (Value)。 + + {{< notice warning >}} +对于此处添加的第二个环境变量,该值必须与步骤 5 中创建 MySQL 有状态服务设置的名称完全相同。否则,WordPress 将无法连接到 MySQL 对应的数据库。 + {{}} + + 点击 **√** 保存配置,再点击**下一步**继续。 + +15. 在**存储设置**中,点击**挂载卷**,并点击**选择持久卷声明**。 + +16. 选择上一步创建的 `wordpress-pvc`,将模式设置为`读写`,并输入挂载路径 `/var/www/html`。点击 **√** 保存,再点击**下一步**继续。 + +17. 在**高级设置**中,可以直接点击**创建**创建服务,也可以按需选择其他选项。 + +18. 现在,前端组件也已设置完成。点击**下一步**继续。 + +19. 您可以在**路由设置**中设置路由规则(应用路由 Ingress),也可以直接点击**创建**。创建成功后,应用将显示在应用列表中。 + + +### 步骤 4:验证资源 + +在**工作负载**中,分别检查**部署**和**有状态副本集**中 `wordpress-v1` 和 `mysql-v1` 的状态。如果它们的运行状态为**运行中**,就意味着 WordPress 已经成功创建。 + +### 步骤 5:通过 NodePort 访问 WordPress + +1. 若要在集群外访问服务,选择左侧导航栏中的**应用负载 > 服务**。点击 `wordpress` 右侧的三个点后,选择**编辑外部访问**。 + +2. 在**访问方式**中选择 `NodePort`,然后点击**确定**。 + +3. 点击**服务**进入详情页,可以在**端口**处查看暴露的端口。 + +4. 通过 `{Node IP}:{NodePort}` 访问此应用程序,可以看到下图: + + ![wordpress-page](/images/docs/v3.3/zh-cn/quickstart/wordpress-deployment/wordpress-page.png) + + {{< notice note >}} + 在访问服务之前,请确保安全组中的端口已打开。 + {{}} diff --git a/content/zh/docs/v3.4/reference/_index.md b/content/zh/docs/v3.4/reference/_index.md new file mode 100644 index 000000000..a102fe196 --- /dev/null +++ b/content/zh/docs/v3.4/reference/_index.md @@ -0,0 +1,14 @@ +--- +title: "参考" +description: "KubeSphere 使用的词汇表以及如何使用 KubeSphere API 构建您自己的应用程序" +layout: "second" + +linkTitle: "参考" + +weight: 17000 + +icon: "/images/docs/v3.3/docs.svg" + +--- + +本章包含 KubeSphere 中的常用词汇表和有关 KubeSphere API 的信息。 diff --git a/content/zh/docs/v3.4/reference/api-changes/_index.md b/content/zh/docs/v3.4/reference/api-changes/_index.md new file mode 100644 index 000000000..982c4b580 --- /dev/null +++ b/content/zh/docs/v3.4/reference/api-changes/_index.md @@ -0,0 +1,12 @@ +--- +title: "API 变更" +description: "API 变更概述" +layout: "single" + +linkTitle: "API 变更" + +weight: 17300 + +icon: "/images/docs/v3.3/docs.svg" + +--- diff --git a/content/zh/docs/v3.4/reference/api-changes/logging.md b/content/zh/docs/v3.4/reference/api-changes/logging.md new file mode 100644 index 000000000..63ad9b4bb --- /dev/null +++ b/content/zh/docs/v3.4/reference/api-changes/logging.md @@ -0,0 +1,27 @@ +--- +title: "日志系统" +keywords: 'Kubernetes, KubeSphere, API, 日志系统' +description: 'KubeSphere 3.3 中日志系统(服务组件)的 API 变更。' +linkTitle: "日志系统" +weight: 17310 +--- + +KubeSphere 3.3 中**日志系统**(服务组件)的 API 变更。 + +## 时间格式 + +查询参数的时间格式必须是 Unix 时间戳(自 Unix Epoch 以来已经过去的秒数)。不再支持使用毫秒。该变更影响 `start_time` 和 `end_time` 参数。 + +## 已弃用的 API + +下列 API 已移除: + +- GET /workspaces/{workspace} +- GET /namespaces/{namespace} +- GET /namespaces/{namespace}/workloads/{workload} +- GET /namespaces/{namespace}/pods/{pod} +- 整个日志设置 API 组 + +## Fluent Operator + +在 KubeSphere 3.3 中,由于 Fluent Operator 项目已重构且不兼容,整个日志设置 API 已从 KubeSphere 内核中移除。有关如何在 KubeSphere 3.3 中配置日志收集,请参考 [Fluent Operator](https://github.com/kubesphere/fluentbit-operator) 文档。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/reference/api-changes/monitoring.md b/content/zh/docs/v3.4/reference/api-changes/monitoring.md new file mode 100644 index 000000000..af7916da9 --- /dev/null +++ b/content/zh/docs/v3.4/reference/api-changes/monitoring.md @@ -0,0 +1,110 @@ +--- +title: "监控系统" +keywords: 'Kubernetes, KubeSphere, API, 监控系统' +description: 'KubeSphere 3.3 中监控系统(服务组件)的 API 变更。' +linkTitle: "监控系统" +weight: 17320 +--- + +## API 版本 + +监控系统 API 版本已提升至 `v1alpha3`。 + +## 时间格式 + +查询参数的时间格式必须是 Unix 时间戳(自 Unix Epoch 以来已经过去的秒数)。不再支持使用小数。该变更影响 `start`、`end` 和 `time` 参数。 + +## 已弃用的指标 + +在 KubeSphere 3.3 中,下表左侧的指标已重命名为右侧的指标。 + +|V2.0|V3.3| +|---|---| +|workload_pod_cpu_usage | workload_cpu_usage| +|workload_pod_memory_usage| workload_memory_usage| +|workload_pod_memory_usage_wo_cache | workload_memory_usage_wo_cache| +|workload_pod_net_bytes_transmitted | workload_net_bytes_transmitted| +|workload_pod_net_bytes_received | workload_net_bytes_received| + +下列指标已被弃用并移除。 + +|已弃用的指标| +|---| +|cluster_workspace_count| +|cluster_account_count| +|cluster_devops_project_count| +|coredns_up_sum| +|coredns_cache_hits| +|coredns_cache_misses| +|coredns_dns_request_rate| +|coredns_dns_request_duration| +|coredns_dns_request_duration_quantile| +|coredns_dns_request_by_type_rate| +|coredns_dns_request_by_rcode_rate| +|coredns_panic_rate| +|coredns_proxy_request_rate| +|coredns_proxy_request_duration| +|coredns_proxy_request_duration_quantile| +|prometheus_up_sum| +|prometheus_tsdb_head_samples_appended_rate| + +KubeSphere 3.3 中引入的新指标。 + +|新指标| +|---| +|kubesphere_workspace_count| +|kubesphere_user_count| +|kubesphere_cluser_count| +|kubesphere_app_template_count| + +## 响应字段 + +在 KubeSphere 3.3 中,已移除响应字段 `metrics_level`、`status` 和 `errorType`。 + +另外,字段名称 `resource_name` 已替换为具体资源类型名称。这些类型是 `node`、`workspace`、`namespace`、`workload`、`pod`、`container` 和 `persistentvolumeclaim`。例如,您将获取 `node: node1`,而不是 `resource_name: node1`。请参见以下示例响应: + +```json +{ + "results":[ + { + "metric_name":"node_cpu_utilisation", + "data":{ + "resultType":"vector", + "result":[ + { + "metric":{ + "__name__":"node:node_cpu_utilisation:avg1m", + "node":"master" + }, + "value":[ + 1588841175.979, + "0.04587499999997817" + ] + }, + { + "metric":{ + "__name__":"node:node_cpu_utilisation:avg1m", + "node":"node1" + }, + "value":[ + 1588841175.979, + "0.06379166666670245" + ] + }, + { + "metric":{ + "__name__":"node:node_cpu_utilisation:avg1m", + "node":"node2" + }, + "value":[ + 1588841175.979, + "0.19008333333367772" + ] + } + ] + } + } + ] +} + +``` diff --git a/content/zh/docs/v3.4/reference/api-docs.md b/content/zh/docs/v3.4/reference/api-docs.md new file mode 100644 index 000000000..3b5f22a88 --- /dev/null +++ b/content/zh/docs/v3.4/reference/api-docs.md @@ -0,0 +1,125 @@ +--- +title: "KubeSphere API" +keywords: 'Kubernetes, KubeSphere, API' +description: 'REST API 是 KubeSphere 的基本结构。本指南向您展示如何访问 KubeSphere API 服务器。' +linkTitle: "KubeSphere API" +weight: 17200 +--- + +## 架构 + +KubeSphere API 服务器为 API 对象验证和配置数据。API 服务器为 REST 操作提供服务,并为集群的共享状态提供前端,其他所有组件通过它进行交互。 + +其中 /kapi 和/kapis 是 KubeSphere 拓展聚合的 API,/api和 /apis开头的都属于 Kubernetes 原生的 API,KubeSphere 把用户对原生 Kubernetes 资源的请求通过 API Server 转发到 Kubernetes API Server 对原生资源进行操作和管理。 + +![ks-apiserver](/images/docs/v3.3/zh-cn/reference/kubesphere-api/ks-apiserver.png) + +## 使用 KubeSphere API + +KubeSphere 3.0 将 **ks-apigateway** 和 **ks-account** 功能移动至 **ks-apiserver** 中,使架构更加紧凑和清晰。要使用 KubeSphere API,您需要将 **ks-apiserver** 暴露给您的客户端。 + + +### 步骤 1:暴露 KubeSphere API 服务 + +如果您要在集群内部访问 KubeSphere,可以跳过以下内容,使用 KubeSphere API 服务器 Endpoint **`http://ks-apiserver.kubesphere-system.svc`** 即可。 + +如果从集群外部访问,您需要先将 KubeSphere API 服务器 Endpoint 暴露给集群外部。 + +暴露 Kubernetes 服务的方式有很多。本示例使用 `NodePort` 来演示。使用以下命令将 `ks-apiserver` 的服务类型变更为 `NodePort`。 + +```bash +$ kubectl -n kubesphere-system patch service ks-apiserver -p '{"spec":{"type":"NodePort"}}' +$ kubectl -n kubesphere-system get svc +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +etcd ClusterIP 10.233.34.220 2379/TCP 44d +ks-apiserver NodePort 10.233.15.31 80:31407/TCP 49d +ks-console NodePort 10.233.3.45 80:30880/TCP 49d +``` + +现在,您可以从集群外部通过 URL(例如 `http://[node ip]:31407`)访问 `ks-apiserver`,其中 `[node ip]` 是您集群中任意节点的 IP 地址。 + +### 步骤 2:生成令牌 + +您需要先验证身份,然后才能向 API 服务器发起调用。下面的示例使用的密码是 `P#$$w0rd`。用户需要发起请求来生成令牌,如下所示: + +```bash +curl -X POST -H 'Content-Type: application/x-www-form-urlencoded' \ + 'http://[node ip]:31407/oauth/token' \ + --data-urlencode 'grant_type=password' \ + --data-urlencode 'username=admin' \ + --data-urlencode 'password=P#$$w0rd' \ + --data-urlencode 'client_id=kubesphere' \ + --data-urlencode 'client_secret=kubesphere' +``` + +{{< notice note >}} + +将 `[node ip]` 替换为您的实际 IP 地址。你可以在 `ClusterConfiguration` 中配置客户端凭证, 存在一个默认的客户端凭证 `client_id` 和 `client_secret` 的值为 `kubesphere`。 + +{{}} + +如果身份正确,服务器将输出响应,如下所示。`access_token` 是访问 KubeSphere API 服务器的令牌。 + +```json +{ + "access_token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6ImFkbWluIiwidWlkIjoiYTlhNjJmOTEtYWQ2Yi00MjRlLWIxNWEtZTFkOTcyNmUzNDFhIiwidG9rZW5fdHlwZSI6ImFjY2Vzc190b2tlbiIsImV4cCI6MTYwMDg1MjM5OCwiaWF0IjoxNjAwODQ1MTk4LCJpc3MiOiJrdWJlc3BoZXJlIiwibmJmIjoxNjAwODQ1MTk4fQ.Hcyf-CPMeq8XyQQLz5PO-oE1Rp1QVkOeV_5J2oX1hvU", + "token_type": "Bearer", + "refresh_token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6ImFkbWluIiwidWlkIjoiYTlhNjJmOTEtYWQ2Yi00MjRlLWIxNWEtZTFkOTcyNmUzNDFhIiwidG9rZW5fdHlwZSI6InJlZnJlc2hfdG9rZW4iLCJleHAiOjE2MDA4NTk1OTgsImlhdCI6MTYwMDg0NTE5OCwiaXNzIjoia3ViZXNwaGVyZSIsIm5iZiI6MTYwMDg0NTE5OH0.PerssCLVXJD7BuCF3Ow8QUNYLQxjwqC8m9iOkRRD6Tc", + "expires_in": 7200 +} +``` + +### 步骤 3:发起调用 + +如果您访问 KubeSphere API 服务器的准备工作都已做完,请使用上一步中获取的访问令牌来发起调用,以获取节点列表,如下所示: + +```bash +$ curl -X GET -H "Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6ImFkbWluIiwidWlkIjoiYTlhNjJmOTEtYWQ2Yi00MjRlLWIxNWEtZTFkOTcyNmUzNDFhIiwidG9rZW5fdHlwZSI6ImFjY2Vzc190b2tlbiIsImV4cCI6MTYwMDg1MjM5OCwiaWF0IjoxNjAwODQ1MTk4LCJpc3MiOiJrdWJlc3BoZXJlIiwibmJmIjoxNjAwODQ1MTk4fQ.Hcyf-CPMeq8XyQQLz5PO-oE1Rp1QVkOeV_5J2oX1hvU" \ + -H 'Content-Type: application/json' \ + 'http://[node ip]:31407/kapis/resources.kubesphere.io/v1alpha3/nodes' + +{ + "items": [ + { + "metadata": { + "name": "node3", + "selfLink": "/api/v1/nodes/node3", + "uid": "dd8c01f3-76e8-4695-9e54-45be90d9ec53", + "resourceVersion": "84170589", + "creationTimestamp": "2020-06-18T07:36:41Z", + "labels": { + "a": "a", + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/os": "linux", + "gitpod.io/theia.v0.4.0": "available", + "gitpod.io/ws-sync": "available", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "node3", + "kubernetes.io/os": "linux", + "kubernetes.io/role": "new", + "node-role.kubernetes.io/worker": "", + "topology.disk.csi.qingcloud.com/instance-type": "Standard", + "topology.disk.csi.qingcloud.com/zone": "ap2a" + }, + "annotations": { + "csi.volume.kubernetes.io/nodeid": "{\"disk.csi.qingcloud.com\":\"i-icjxhi1e\"}", + "kubeadm.alpha.kubernetes.io/cri-socket": "/var/run/dockershim.sock", + "node.alpha.kubernetes.io/ttl": "0", + .... +``` + +{{< notice note >}} + +将 `[node ip]` 替换为您的实际 IP 地址。 + +{{}} + +## API 参考 + +KubeSphere API Swagger JSON 文件可以在 https://github.com/kubesphere/kubesphere/tree/release-3.3/api 仓库中找到。 + +- KubeSphere 已指定 API [Swagger Json](https://github.com/kubesphere/kubesphere/blob/release-3.1/api/ks-openapi-spec/swagger.json) 文件,它包含所有只适用于 KubeSphere 的 API。 +- KubeSphere 已指定 CRD [Swagger Json](https://github.com/kubesphere/kubesphere/blob/release-3.1/api/openapi-spec/swagger.json) 文件,它包含所有已生成的 CRD API 文档,与 Kubernetes API 对象相同。 +- kubernetes API 参考:https://kubernetes.io/docs/concepts/overview/kubernetes-api/ + +您也可以[点击这里](https://kubesphere.io/api/kubesphere)查看 KubeSphere API 文档。 diff --git a/content/zh/docs/v3.4/reference/environment-requirements.md b/content/zh/docs/v3.4/reference/environment-requirements.md new file mode 100644 index 000000000..4b01e0eee --- /dev/null +++ b/content/zh/docs/v3.4/reference/environment-requirements.md @@ -0,0 +1,37 @@ +--- +title: "环境要求" +keywords: 'KubeSphere, Kubernetes, 配置, 要求' +description: '理解使用和安装 KubeSphere 的各项环境要求。' +linkTitle: "环境要求" +weight: 17500 +--- + +本页面归纳了安装和使用 KubeSphere 的部分要求。 + +## 系统要求 + +{{< content "common/system-requirements.md" >}} + +## 依赖项要求 + +{{< content "common/dependency-requirements.md" >}} + +## 容器运行时要求 + +{{< content "common/container-runtime-requirements.md" >}} + +## 网络要求 + +{{< content "common/network-requirements.md" >}} + +## 支持的 Kubernetes 版本 + +{{< content "common/kubernetes-versions.md" >}} + +## 支持的 CSI 插件 + +{{< content "common/csi-plugins.md" >}} + +## KubeSphere Web 控制台支持的浏览器 + +![console-browser](/images/docs/v3.3/reference/environment-requirements/console-browser.png) \ No newline at end of file diff --git a/content/zh/docs/v3.4/reference/glossary.md b/content/zh/docs/v3.4/reference/glossary.md new file mode 100644 index 000000000..1c6af1d78 --- /dev/null +++ b/content/zh/docs/v3.4/reference/glossary.md @@ -0,0 +1,154 @@ +--- +title: "词汇表" +keywords: 'Kubernetes, KubeSphere, DevOps, docker, Helm, Jenkins, Istio, Prometheus, 词汇表' +description: 'KubeSphere 中使用的词汇表。' +linkTitle: "词汇表" +weight: 17100 +--- + +本词汇表包含 KubeSphere 中专有的通用术语和技术术语。 + +## 通用术语 + +- **企业空间**
+ 管理租户工作负载项目(即 Kubernetes 中的企业空间)和 DevOps 项目的逻辑单位。不同团队的成员在企业空间中有不同的权限,可对资源执行不同的操作并共享信息。 +- **系统企业空间**
管理 KubeSphere、Kubernetes 以及可选组件(例如应用商店、服务网格和 DevOps 等)系统项目的特殊企业空间。 +- **企业空间成员**
邀请至企业空间中工作的用户,拥有特定的权限。 +- **项目**
+ KubeSphere 中的项目对应 Kubernetes 中的命名空间。 +- **多集群项目**
+ 工作负载部署在多个集群上的项目。 +- **项目成员**
+ 邀请至项目中工作的用户,拥有特定的权限。 +- **工作台**
+ 租户的登录页面,会显示租户拥有访问权限的资源,例如企业空间和项目。 +- **持久卷**
+ 持久卷(Persistent Volume,PV)是集群中的一块存储,可以由管理员预设,也可以使用存储类(Storage Class)来动态供应。 +- **持久卷声明**
+ 持久卷声明(Persistent Volume Claim,PVC)定义了存储需求,系统根据持久卷声明创建持久卷。 +- **公开集群**
集群管理员可以设置集群可见性,以便企业空间可以使用所授权的集群。将集群设置为公开集群意味着所有的平台成员都可访问该集群,并在该集群中创建和调度资源。 +- **KubeKey**
+ 以 Go 语言编写的全新安装器,可单独安装 Kubernetes 或同时安装 Kubernetes 和 KubeSphere,并支持在创建集群时部署云原生插件(YAML 或 Chart 格式),亦可用于伸缩和升级集群。 +- **ks-installer**
+ 在已有 Kubernetes 集群上部署 KubeSphere 的安装包。 + +## 应用程序和工作负载 + +- **OpenPitrix**
+ 一个用于打包、部署和管理不同类型应用的开源系统。 + +- **应用模板**
+ 某个应用程序的模板,租户可使用应用模板部署新的应用程序实例。 + +- **应用仓库**
+ 基于 Web 包含不同应用模板的仓库,独立于 OpenPitrix 的外部存储而创建,例如 [MinIO](https://min.io/) 对象存储、[QingStor 对象存储](https://github.com/qingstor)以及 [AWS 对象存储](https://aws.amazon.com/cn/what-is-cloud-object-storage/)。 + +- **应用商店**
应用商店包含内置应用,平台租户也可在应用商店中分享不同的应用程序。 + +- **部署**
您使用部署描述一个期望状态,Kubernetes 部署控制器会以受控速率将实际状态变更为期望状态。一个部署运行着应用程序的几个副本,它会自动替换宕机或故障的实例。有关更多信息,请参见[部署](https://kubernetes.io/zh/docs/concepts/workloads/controllers/deployment/)。 + +- **有状态副本集**
有状态副本集是用于管理有状态应用程序的工作负载对象,例如 MySQL。有关更多信息,请参见[有状态副本集](https://kubernetes.io/zh/docs/concepts/workloads/controllers/statefulset/)。 + +- **守护进程集**
守护进程集管理多组容器组副本,确保所有(或某些)节点运行一个容器组的副本,例如 Fluentd 和 Logstash。有关更多信息,请参见[守护进程集](https://kubernetes.io/zh/docs/concepts/workloads/controllers/daemonset/)。 + +- **任务**
任务会创建一个或者多个容器组,并确保指定数量的容器组成功结束。有关更多信息,请参见[任务](https://kubernetes.io/zh/docs/concepts/workloads/controllers/job/)。 + +- **定时任务**
定时任务按照特定时间或特定时间间隔运行任务,定时任务对象就像 crontab 文件中的一行。有关更多信息,请参见[定时任务](https://kubernetes.io/zh/docs/concepts/workloads/controllers/cron-jobs/)。 + +- **服务**
Kubernetes 服务是一种抽象对象,定义一组逻辑容器组和访问它们的策略,有时也称为微服务。有关更多信息,请参见[服务](https://kubernetes.io/zh/docs/concepts/services-networking/service/)。 + +## DevOps + +- **DevOps 项目**
DevOps 项目用于创建和管理流水线和凭证。 + +- **SCM**
源控制管理 (Source Control Management),例如 GitHub 和 Gitlab。 + +- **In-SCM**
+ 通过 SCM 工具构建基于 Jenkinsfile 的流水线。 + +- **Out-of-SCM**
+ 通过图形编辑面板构建流水线,无需编写 Jenkinsfile。 + +- **CI 节点**
+ 流水线、S2I 和 B2I 任务的专用节点。一般来说,应用程序往往需要在构建过程中拉取多个依赖项,这可能会导致如拉取时间过长、网络不稳定等问题,从而使得构建失败。为了确保流水线正常运行并加快构建速度(通过缓存),您可以配置一个或一组 CI 节点以供 CI/CD 流水线和 S2I/B2I 任务专用。 + +- **B2I**
+ B2I (Binary-to-Image) 是一套从二进制可执行文件(例如 Jar 和 War 等)构建可再现容器镜像的工具和工作流。开发者和运维团队在项目打包成 War 和 Jar 这一类的制品后,可快速将制品或二进制的 Package 打包成 Docker 镜像,并发布到 DockerHub 或 Harbor 等镜像仓库中。 + +- **S2I**
S2I (Source-to-Image) 是一套从源代码构建可再现容器镜像的工具和工作流。通过将源代码注入容器镜像,自动将编译后的代码打包成镜像。在 KubeSphere 中支持 S2I 构建镜像,也支持以创建服务的形式,一键将源代码生成镜像推送到仓库,并创建其部署和服务最终自动发布到 Kubernetes 中。 + +## 日志、事件和审计 + +- **精确匹配**
通过完全匹配关键词查找结果的检索方式。 + +- **模糊匹配**
通过部分匹配关键词查找结果的检索方式。 + +- **审计策略**
审计策略定义事件记录和所含数据的一系列规则。 + +- **审计规则**
+ 审计规则定义如何处理审计日志。 + +- **审计 Webhook**
+ Kubernetes 审计日志会发送至审计 Webhook。 + +## 监控、告警和通知 + +- **集群状态监控**
+ 监控集群中的相关指标,如节点状态、组件状态、CPU、内存、网络和硬盘等。 + +- **应用资源监控**
+ 监控平台上的应用程序资源,例如项目和 DevOps 项目的数量,以及特定类型的工作负载和服务的数量。 + +- **已分配 CPU**
+ 该指标根据节点上容器组的总 CPU 请求数计算得出。它表示节点上为工作负载预留的 CPU 资源,工作负载实际正在使用 CPU 资源可能低于该数值。 + +- **已分配内存**
该指标根据节点上容器组的总内存请求计算得出。它表示节点上为工作负载预留的内存资源,工作负载实际正在使用内存资源可能低于该数值。 + +- **落盘日志收集**
+ 日志收集功能允许系统收集保存在卷上的容器日志,并将日志发送到标准输出。 + +- **通知接收器**
接收通知的渠道,如电子邮件、钉钉、企业微信、Slack 和 Webhook。 + +## 网络 + +- **应用路由**
+ KubeSphere 应用路由对应 Kubernetes 中的 Ingress。 + +- **网关**
+ 创建应用路由时,您需要启用外网访问网关,将请求转发至对应的后段服务。 + +## 服务网格 + +- **金丝雀发布**
+ 一种优雅的应用程序发布方式,首先您可将一小部分实际流量发送至服务的新版本进行测试。与此同时,老版本会处理剩余流量。如果一切运行正常,您可以逐渐增加发送至新版本的流量,直到最后新版本彻底接管所有流量。如果发生任何问题,您可以立刻调整流量比例,让老版本接管所有流量。 + +- **蓝绿部署**
此方式提供零宕机部署,即在保留旧版本的同时部署新版本。在任何时候,只有其中一个版本处于活跃状态,接收所有流量,另一个版本保持空闲状态。如果运行出现问题,您可以快速回滚到旧版本。 + +- **流量镜像**
+ 一种测试应用版本的零风险方式,将实时流量的副本发送给被镜像的服务,也称为流量影子 (Traffic Shadowing)。 + +- **应用治理**
+ 开启应用治理以在项目实现微服务的链路追踪。 + +## 多集群管理 + +- **主集群(H 集群)**
+ 主集群管理成员集群,并提供统一的多集群中央控制平面。 + +- **成员集群(M 集群)**
+ 成员集群在多集群架构中由主集群统一管理。 + +- **直接连接**
+ 当主集群的任意节点均可访问成员集群的 kube-apiserver 地址时可使用此方式直接连接主集群和成员集群。 + +- **代理连接**
+ 当主集群无法直接连接成员集群时可使用代理方式连接主集群和成员集群。 + +- **jwtSecret**
+ 主集群和成员集群所需的密钥以便二者通信。 + +- **Tower**
+ 使用代理连接时,主集群上会安装 proxy 组件而成员集群上会安装 agent,Tower 包含 proxy 和 agent。 + +- **代理服务地址**
+ 使用代理连接时,成员 集群上的 Tower agent 需要获取的主集群的通信服务地址。 diff --git a/content/zh/docs/v3.4/reference/storage-system-installation/_index.md b/content/zh/docs/v3.4/reference/storage-system-installation/_index.md new file mode 100644 index 000000000..aace90856 --- /dev/null +++ b/content/zh/docs/v3.4/reference/storage-system-installation/_index.md @@ -0,0 +1,12 @@ +--- +title: "存储系统安装" +description: "存储系统安装" +layout: "single" + +linkTitle: "存储系统安装" + +weight: 17400 + +icon: "/images/docs/v3.3/docs.svg" + +--- diff --git a/content/zh/docs/v3.4/reference/storage-system-installation/glusterfs-server.md b/content/zh/docs/v3.4/reference/storage-system-installation/glusterfs-server.md new file mode 100644 index 000000000..1d579df71 --- /dev/null +++ b/content/zh/docs/v3.4/reference/storage-system-installation/glusterfs-server.md @@ -0,0 +1,517 @@ +--- +title: "搭建 GlusterFS 服务器" +keywords: 'Kubernetes, KubeSphere, GlusterFS' +description: '如何搭建 GlusterFS 服务器' +linkTitle: "搭建 GlusterFS 服务器" +weight: 17420 +--- + +[GlusterFS](https://kubernetes.io/zh/docs/concepts/storage/volumes/#glusterfs) 是开源的分布式文件系统,您能使用 GlusterFS 将 `glusterfs` 存储卷挂载到 Pod。如果 `glusterfs` 存储卷中预先填充了数据,则可以在 Kubernetes 集群中的 Pod 之间共享这些数据。 + +本教程演示了如何在三台服务器机器上配置 GlusterFS 以及如何安装 [Heketi](https://github.com/heketi/heketi) 来管理 GlusterFS 集群。 + +GlusterFS 和 Heketi 搭建好之后,就可以在客户端机器上安装 GlusterFS,并使用 KubeKey 创建一个存储类型为 GlusterFS 的 KubeSphere 集群。 + +## 准备 GlusterFS 节点 + +本示例中包含三台 Ubuntu 16.04 服务器机器,每台服务器都有一个附带的磁盘。 + +| 主机名 | IP 地址 | 操作系统 | 设备 | +| ------- | ----------- | ----------------------------- | --------------- | +| server1 | 192.168.0.2 | Ubuntu 16.04,4 核,4 GB 内存 | /dev/vdd 300 GB | +| server2 | 192.168.0.3 | Ubuntu 16.04,4 核,4 GB 内存 | /dev/vdd 300 GB | +| server3 | 192.168.0.4 | Ubuntu 16.04,4 核,4 GB 内存 | /dev/vdd 300 GB | + +{{< notice note >}} + +- Heketi 将安装在 `server1` 上,该服务器提供 RESTful 管理接口来管理 GlusterFS 存储卷的生命周期。您也可以将 Heketi 安装在不同的服务器机器上。 + +- 若需要更多存储空间,请在服务器上加装存储磁盘。 +- 数据将保存到 `/dev/vdd`(块设备),必须是没有经过分区或格式化的原始块设备。 + +{{}} + +## 设置无密码 SSH 登录 + +### 配置 root 登录 + +1. 登录 `server1` 并切换到 root 用户。 + + ```bash + sudo -i + ``` + +2. 更改 root 用户密码: + + ```bash + passwd + ``` + + {{< notice note >}} + + +请确保在文件 `/etc/ssh/sshd_config` 中启用了密码认证(`PasswordAuthentication` 的值应该为 `yes`)。 + +{{}} + +3. `server2` 和 `server3` 的 root 用户密码也需要进行更改。 + +### 添加 hosts 文件条目 + +1. 在所有服务器机器上配置 DNS 或编辑 `/etc/hosts` 文件,添加相应的主机名和 IP 地址: + + ```bash + vi /etc/hosts + ``` + + ```txt + # hostname loopback address + 192.168.0.2 server1 + 192.168.0.3 server2 + 192.168.0.4 server3 + ``` + +2. 请确保将以上条目添加到所有服务器机器的 `hosts` 文件中。 + +### 配置无密码 SSH 登录 + +1. 通过运行以下命令在 `server1` 上创建密钥。直接按**回车键**跳过所有提示。 + + ```bash + ssh-keygen + ``` + +2. 将密钥复制到所有 GlusterFS 节点。 + + ```bash + ssh-copy-id root@server1 + ``` + + ```bash + ssh-copy-id root@server2 + ``` + + ```bash + ssh-copy-id root@server3 + ``` + +3. 请验证您可以从 `server1` 通过无密码登录访问所有服务器机器。 + + ```bash + ssh root@server1 + ``` + + ```bash + ssh root@server2 + ``` + + ```bash + ssh root@server3 + ``` + +## 在所有服务器机器上安装 GlusterFS + +1. 运行以下命令在 `server1` 上安装 `software-properties-common`。 + + ```bash + apt-get install software-properties-common + ``` + +2. 添加社区 GlusterFS PPA。 + + ```bash + add-apt-repository ppa:gluster/glusterfs-7 + ``` + +3. 请确保使用的是最新安装包。 + + ```bash + apt-get update + ``` + +4. 安装 GlusterFS 服务器。 + + ```bash + apt-get install glusterfs-server -y + ``` + +5. 请确保也在 `server2` 和 `server3` 上运行上述命令,并在所有机器上验证安装包版本。 + + ```text + glusterfs -V + ``` + +{{< notice note >}} + +如果您是在 Ubuntu 之外的其他系统上安装 GlusterFS,那么上述命令可能会略有不同。有关更多信息,请参见 [Gluster 文档](https://docs.gluster.org/en/latest/Install-Guide/Install/#installing-gluster)。 + +{{}} + +## 加载内核模块 + +1. 运行以下命令在 `server1` 上加载三个必要的内核模块。 + + ```bash + echo dm_thin_pool | sudo tee -a /etc/modules + ``` + + ```bash + echo dm_snapshot | sudo tee -a /etc/modules + ``` + + ```bash + echo dm_mirror | sudo tee -a /etc/modules + ``` + +2. 安装 `thin-provisioning-tools`。 + + ```bash + apt-get -y install thin-provisioning-tools + ``` + +3. 请确保您也在 `server2` 和 `server3` 上运行以上命令。 + +## 创建 GlusterFS 集群 + +1. 在 `server1` 上运行以下命令添加其他节点并创建集群。 + + ```bash + gluster peer probe server2 + ``` + + ``` + gluster peer probe server3 + ``` + +2. 请验证集群中的所有节点均已成功连接。 + + ```bash + gluster peer status + ``` + +3. 预计输出如下: + + ```bash + Number of Peers: 2 + + Hostname: server2 + Uuid: e1192d6a-b65e-4ce8-804c-72d9425211a6 + State: Peer in Cluster (Connected) + + Hostname: server3 + Uuid: 9bd733e4-96d4-49d5-8958-6c947a2b4fa6 + State: Peer in Cluster (Connected) + ``` + +## 安装 Heketi + +由于 GlusterFS 本身不提供 API 调用的方法,因此您可以安装 [Heketi](https://github.com/heketi/heketi),通过用于 Kubernetes 调用的 RESTful API 来管理 GlusterFS 存储卷的生命周期。这样,您的 Kubernetes 集群就可以动态地配置 GlusterFS 存储卷。在此示例中将会安装 Heketi v7.0.0。有关 Heketi 可用版本的更多信息,请参见其[发布页面](https://github.com/heketi/heketi/releases/)。 + +1. 在 `server1` 上下载 Heketi。 + + ```bash + wget https://github.com/heketi/heketi/releases/download/v7.0.0/heketi-v7.0.0.linux.amd64.tar.gz + ``` + + {{< notice note >}} + + 您也可以在单独的机器上安装 Heketi。 + + {{}} + +2. 将文件解压缩。 + + ``` + tar -xf heketi-v7.0.0.linux.amd64.tar.gz + ``` + + ``` + cd heketi + ``` + + ``` + cp heketi /usr/bin + ``` + + ``` + cp heketi-cli /usr/bin + ``` + +3. 创建 Heketi 服务文件。 + + ``` + vi /lib/systemd/system/heketi.service + ``` + + ``` + [Unit] + Description=Heketi Server + [Service] + Type=simple + WorkingDirectory=/var/lib/heketi + ExecStart=/usr/bin/heketi --config=/etc/heketi/heketi.json + Restart=on-failure + StandardOutput=syslog + StandardError=syslog + [Install] + WantedBy=multi-user.target + ``` + +4. 创建 Heketi 文件夹。 + + ```bash + mkdir -p /var/lib/heketi + ``` + + ``` + mkdir -p /etc/heketi + ``` + +5. 创建 JSON 文件以配置 Heketi。 + + ``` + vi /etc/heketi/heketi.json + ``` + + 示例文件: + + ```json + { + "_port_comment": "Heketi Server Port Number", + "port": "8080", + + "_use_auth": "Enable JWT authorization. Please enable for deployment", + "use_auth": false, + + "_jwt": "Private keys for access", + "jwt": { + "_admin": "Admin has access to all APIs", + "admin": { + "key": "123456" + }, + "_user": "User only has access to /volumes endpoint", + "user": { + "key": "123456" + } + }, + + "_glusterfs_comment": "GlusterFS Configuration", + "glusterfs": { + "_executor_comment": [ + "Execute plugin. Possible choices: mock, ssh", + "mock: This setting is used for testing and development.", + " It will not send commands to any node.", + "ssh: This setting will notify Heketi to ssh to the nodes.", + " It will need the values in sshexec to be configured.", + "kubernetes: Communicate with GlusterFS containers over", + " Kubernetes exec api." + ], + "executor": "ssh", + + "_sshexec_comment": "SSH username and private key file information", + "sshexec": { + "keyfile": "/root/.ssh/id_rsa", + "user": "root" + }, + + "_kubeexec_comment": "Kubernetes configuration", + "kubeexec": { + "host" :"https://kubernetes.host:8443", + "cert" : "/path/to/crt.file", + "insecure": false, + "user": "kubernetes username", + "password": "password for kubernetes user", + "namespace": "Kubernetes namespace", + "fstab": "Optional: Specify fstab file on node. Default is /etc/fstab" + }, + + "_db_comment": "Database file name", + "db": "/var/lib/heketi/heketi.db", + "brick_max_size_gb" : 1024, + "brick_min_size_gb" : 1, + "max_bricks_per_volume" : 33, + + + "_loglevel_comment": [ + "Set log level. Choices are:", + " none, critical, error, warning, info, debug", + "Default is warning" + ], + "loglevel" : "debug" + } + } + ``` + + {{< notice note >}} + + 在安装 GlusterFS 作为 KubeSphere 集群的存储类型时,必须提供帐户 `admin` 及其 `Secret` 值。 + + {{}} + +6. 启动 Heketi。 + + ```bash + systemctl start heketi + ``` + +7. 检查 Heketi 的状态。 + + ```bash + systemctl status heketi + ``` + + 如果出现了 `active (running)`,则意味着安装成功。预计输出: + + ```bash + ● heketi.service - Heketi Server + Loaded: loaded (/lib/systemd/system/heketi.service; disabled; vendor preset: enabled) + Active: active (running) since Tue 2021-03-09 13:04:30 CST; 4s ago + Main PID: 9282 (heketi) + Tasks: 8 + Memory: 6.5M + CPU: 62ms + CGroup: /system.slice/heketi.service + └─9282 /usr/bin/heketi --config=/etc/heketi/heketi.json + + Mar 09 13:04:30 server1 systemd[1]: Started Heketi Server. + Mar 09 13:04:30 server1 heketi[9282]: Heketi v7.0.0 + Mar 09 13:04:30 server1 heketi[9282]: [heketi] INFO 2021/03/09 13:04:30 Loaded ssh executor + Mar 09 13:04:30 server1 heketi[9282]: [heketi] INFO 2021/03/09 13:04:30 Adv: Max bricks per volume set to 33 + Mar 09 13:04:30 server1 heketi[9282]: [heketi] INFO 2021/03/09 13:04:30 Adv: Max brick size 1024 GB + Mar 09 13:04:30 server1 heketi[9282]: [heketi] INFO 2021/03/09 13:04:30 Adv: Min brick size 1 GB + Mar 09 13:04:30 server1 heketi[9282]: [heketi] INFO 2021/03/09 13:04:30 GlusterFS Application Loaded + Mar 09 13:04:30 server1 heketi[9282]: [heketi] INFO 2021/03/09 13:04:30 Started Node Health Cache Monitor + Mar 09 13:04:30 server1 heketi[9282]: Listening on port 8080 + ``` + +8. 启用 Heketi。 + + ```bash + systemctl enable heketi + ``` + + 预计输出: + + ```bash + Created symlink from /etc/systemd/system/multi-user.target.wants/heketi.service to /lib/systemd/system/heketi.service. + ``` + +9. 为 Heketi 创建拓扑配置文件,该文件包含添加到 Heketi 的集群、节点和磁盘的信息。 + + ```bash + vi /etc/heketi/topology.json + ``` + + 示例文件: + + ```json + { + "clusters": [ + { + "nodes": [ + { + "node": { + "hostnames": { + "manage": [ + "192.168.0.2" + ], + "storage": [ + "192.168.0.2" + ] + }, + "zone": 1 + }, + "devices": [ + "/dev/vdd" + ] + }, + { + "node": { + "hostnames": { + "manage": [ + "192.168.0.3" + ], + "storage": [ + "192.168.0.3" + ] + }, + "zone": 1 + }, + "devices": [ + "/dev/vdd" + ] + }, + { + "node": { + "hostnames": { + "manage": [ + "192.168.0.4" + ], + "storage": [ + "192.168.0.4" + ] + }, + "zone": 1 + }, + "devices": [ + "/dev/vdd" + ] + } + ] + } + ] + } + ``` + + {{< notice note >}} + + - 请使用您自己的 IP 替换上述 IP 地址。 + - 请在 `devices` 一栏添加您自己的磁盘名称。 + + {{}} + +10. 加载 Heketi JSON 文件。 + + ```bash + export HEKETI_CLI_SERVER=http://localhost:8080 + ``` + + ```bash + heketi-cli topology load --json=/etc/heketi/topology.json + ``` + + 预计输出: + + ```bash + Creating cluster ... ID: 2d9e11adede04fe6d07cb81c5a1a7ea4 + Allowing file volumes on cluster. + Allowing block volumes on cluster. + Creating node 192.168.0.2 ... ID: 0a9f240ab6fd96ea014948c5605be675 + Adding device /dev/vdd ... OK + Creating node 192.168.0.3 ... ID: 2468086cadfee8ef9f48bc15db81c88a + Adding device /dev/vdd ... OK + Creating node 192.168.0.4 ... ID: 4c21b33d5c32029f5b7dc6406977ec34 + Adding device /dev/vdd ... OK + ``` + +11. 以上输出同时显示了集群 ID 和节点 ID。运行以下命令查看集群信息。 + + ```bash + heketi-cli cluster info 2d9e11adede04fe6d07cb81c5a1a7ea4 # Use your own cluster ID. + ``` + + 预计输出: + + ```bash + Cluster id: 2d9e11adede04fe6d07cb81c5a1a7ea4 + Nodes: + 0a9f240ab6fd96ea014948c5605be675 + 2468086cadfee8ef9f48bc15db81c88a + 4c21b33d5c32029f5b7dc6406977ec34 + Volumes: + + Block: true + + File: true + ``` + diff --git a/content/zh/docs/v3.4/reference/storage-system-installation/nfs-server.md b/content/zh/docs/v3.4/reference/storage-system-installation/nfs-server.md new file mode 100644 index 000000000..7b919484c --- /dev/null +++ b/content/zh/docs/v3.4/reference/storage-system-installation/nfs-server.md @@ -0,0 +1,102 @@ +--- +title: "搭建 NFS 服务器" +keywords: 'Kubernetes, KubeSphere, NFS 服务器' +description: '如何搭建 NFS 服务器' +linkTitle: "搭建 NFS 服务器" +weight: 17410 +--- + +KubeSphere 支持存储插件 [NFS-client Provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client)。若想使用该插件,必须预先配置 NFS 服务器。NFS 服务器配置完成后,NFS 客户端会在服务器机器上挂载目录,以便 NFS 客户端访问 NFS 服务器上的文件,即您需要创建并输出客户端机器可以访问的目录。 + +NFS 服务器机器就绪后,您可以使用 [KubeKey](../../../installing-on-linux/introduction/kubekey/) 通过 Helm Chart 来安装 NFS-client Provisioner 以及 Kubernetes 和 KubeSphere。您必须在 Chart 配置中提供 NFS 服务器的输出目录以便 KubeKey 在安装时使用。 + +{{< notice note >}} + +- 您也可以在安装 KubeSphere 集群后创建 NFS-client 的存储类型。 +- NFS 与部分应用不兼容(例如 Prometheus),可能会导致容器组创建失败。如果确实需要在生产环境中使用 NFS,请确保您了解相关风险或咨询 KubeSphere 技术支持 support@kubesphere.cloud。 + +{{}} + +本教程演示了如何安装 NFS 服务器,以 Ubuntu 16.04 为例。 + +## 安装及配置 NFS 服务器 + +### 步骤 1:安装 NFS 服务器 (NFS kernel server) + +若要设置服务器机器,就必须在机器上安装 NFS 服务器。 + +1. 运行以下命令,使用 Ubuntu 上的最新软件包进行安装。 + + ```bash + sudo apt-get update + ``` + +2. 安装 NFS 服务器。 + + ```bash + sudo apt install nfs-kernel-server + ``` + +### 步骤 2:创建输出目录 + +NFS 客户端将在服务器机器上挂载一个目录,该目录已由 NFS 服务器输出。 + +1. 运行以下命令来指定挂载文件夹名称(例如,`/mnt/demo`)。 + + ```bash + sudo mkdir -p /mnt/demo + ``` + +2. 出于演示目的,请移除该文件夹的限制性权限,这样所有客户端都可以访问该目录。 + + ```bash + sudo chown nobody:nogroup /mnt/demo + ``` + + ```bash + sudo chmod 777 /mnt/demo + ``` + +### 步骤 3:授予客户端机器访问 NFS 服务器的权限 + +1. 运行以下命令: + + ```bash + sudo nano /etc/exports + ``` + +2. 将客户端信息添加到文件中。 + + ```bash + /mnt/demo clientIP(rw,sync,no_subtree_check) + ``` + + 如果您有多台客户端机器,则可以将它们的客户端信息全部添加到文件中。或者,在文件中指定一个子网,以便该子网中的所有客户端都可以访问 NFS 服务器。例如: + + ```bash + /mnt/demo 192.168.0.0/24(rw,sync,no_subtree_check) + ``` + + {{< notice note >}} + + - `rw`:读写操作。客户端机器拥有对卷的读写权限。 + - `sync`:更改将被写入磁盘和内存中。 + - `no_subtree_check`:防止子树检查,即禁用客户端挂载允许的子目录所需的安全验证。 + + {{}} + +3. 编辑完成后,请保存文件。 + +### 步骤 4:应用配置 + +1. 运行以下命令输出共享目录。 + + ```bash + sudo exportfs -a + ``` + +2. 重启 NFS 服务器。 + + ```bash + sudo systemctl restart nfs-kernel-server + ``` diff --git a/content/zh/docs/v3.4/release/_index.md b/content/zh/docs/v3.4/release/_index.md new file mode 100644 index 000000000..640f83cfb --- /dev/null +++ b/content/zh/docs/v3.4/release/_index.md @@ -0,0 +1,14 @@ +--- +title: "Release Notes" +description: "Release Notes of Different KubeSphere Versions" +layout: "second" + +linkTitle: "Release Notes" + +weight: 18000 + +icon: "/images/docs/v3.3/docs.svg" + +--- + +This chapter lists the release notes of all versions of KubeSphere, helping you gain a comprehensive understanding of upgrades and feature enhancements in every version release. \ No newline at end of file diff --git a/content/zh/docs/v3.4/release/release-v200.md b/content/zh/docs/v3.4/release/release-v200.md new file mode 100644 index 000000000..7bab3cb2b --- /dev/null +++ b/content/zh/docs/v3.4/release/release-v200.md @@ -0,0 +1,92 @@ +--- +title: "Release Notes for 2.0.0" +keywords: "kubernetes, docker, kubesphere, jenkins, istio, prometheus" +description: "KubeSphere Release Notes for 2.0.0" + +linkTitle: "Release Notes - 2.0.0" +weight: 18800 +--- + +KubeSphere 2.0.0 was released on **May 18th, 2019**. + +## What's New in 2.0.0 + +### Component Upgrades + +- Support Kubernetes [Kubernetes 1.13.5](https://github.com/kubernetes/kubernetes/releases/tag/v1.13.5) +- Integrate [QingCloud Cloud Controller](https://github.com/yunify/qingcloud-cloud-controller-manager). After installing load balancer, QingCloud load balancer can be created through KubeSphere console and the backend workload is bound automatically.  +- Integrate [QingStor CSI v0.3.0](https://github.com/yunify/qingstor-csi/tree/v0.3.0) storage plugin and support physical NeonSAN storage system. Support SAN storage service with high availability and high performance. +- Integrate [QingCloud CSI v0.2.1](https://github.com/yunify/qingcloud-csi/tree/v0.2.1) storage plugin and support many types of volume to create QingCloud block services. +- Harbor is upgraded to 1.7.5. +- GitLab is upgraded to 11.8.1. +- Prometheus is upgraded to 2.5.0. + +### Microservice Governance + +- Integrate Istio 1.1.1 and support visualization of service mesh management. +- Enable the access to the project's external websites and the application traffic governance. +- Provide built-in sample microservice [Bookinfo Application](https://istio.io/docs/examples/bookinfo/). +- Support traffic governance. +- Support traffic images. +- Provide load balancing of microservice based on Istio. +- Support canary release. +- Enable blue-green deployment. +- Enable circuit breaking. +- Enable microservice tracing. + +### DevOps (CI/CD Pipeline) + +- CI/CD pipeline provides email notification and supports the email notification during construction. +- Enhance CI/CD graphical editing pipelines, and more pipelines for common plugins and execution conditions. +- Provide source code vulnerability scanning based on SonarQube 7.4. +- Support [Source to Image](https://github.com/kubesphere/s2ioperator) feature. + +### Monitoring + +- Provide Kubernetes component independent monitoring page including etcd, kube-apiserver and kube-scheduler. +- Optimize several monitoring algorithm. +- Optimize monitoring resources. Reduce Prometheus storage and the disk usage up to 80%. + +### Logging + +- Provide unified log console in terms of tenant. +- Enable accurate and fuzzy retrieval. +- Support real-time and history logs. +- Support combined log query based on namespace, workload, Pod, container, key words and time limit.   +- Support detail page of single and direct logs. Pods and containers can be switched. +- [FluentBit Operator](https://github.com/kubesphere/fluentbit-operator) supports logging gathering settings: ElasticSearch, Kafka and Fluentd can be added, activated or turned off as log collectors. Before sending to log collectors, you can configure filtering conditions for needed logs. + +### Alerting and Notifications + +- Email notifications are available for cluster nodes and workload resources.  +- Notification rules: combined multiple monitoring resources are available. Different warning levels, detection cycle, push times and threshold can be configured. +- Time and notifiers can be set. +- Enable notification repeating rules for different levels. + +### Security Enhancement + +- Fix RunC Container Escape Vulnerability [Runc container breakout](https://log.qingcloud.com/archives/5127) +- Fix Alpine Docker's image Vulnerability [Alpine container shadow breakout](https://www.alpinelinux.org/posts/Docker-image-vulnerability-CVE-2019-5021.html) +- Support single and multi-login configuration items. +- Verification code is required after multiple invalid logins. +- Enhance passwords' policy and prevent weak passwords. +- Others security enhancements. + +### Interface Optimization + +- Optimize multiple user experience of console, such as the switch between DevOps project and other projects. +- Optimize many Chinese-English webpages. + +### Others + +- Support Etcd backup and recovery. +- Support regular cleanup of the docker's image. + +## Bugs Fixes + +- Fix delay updates of the resource and deleted pages. +- Fix the left dirty data after deleting the HPA workload. +- Fix incorrect Job status display. +- Correct resource quota, Pod usage and storage metrics algorithm. +- Adjust CPU usage percentages. +- many more bugfix diff --git a/content/zh/docs/v3.4/release/release-v201.md b/content/zh/docs/v3.4/release/release-v201.md new file mode 100644 index 000000000..57c88ac22 --- /dev/null +++ b/content/zh/docs/v3.4/release/release-v201.md @@ -0,0 +1,19 @@ +--- +title: "Release Notes for 2.0.1" +keywords: "kubernetes, docker, kubesphere, jenkins, istio, prometheus" +description: "KubeSphere Release Notes for 2.0.1" + +linkTitle: "Release Notes - 2.0.1" +weight: 18700 +--- + +KubeSphere 2.0.1 was released on **June 9th, 2019**. + +## Bug Fix + +- Fix the issue that CI/CD pipeline cannot recognize correct special characters in the code branch. +- Fix CI/CD pipeline's issue of being unable to check logs. +- Fix no-log data output problem caused by index document fragmentation abnormity during the log query. +- Fix prompt exceptions when searching for logs that do not exist. +- Fix the line-overlap problem on traffic governance topology and fixed invalid image strategy application. +- Many more bugfix diff --git a/content/zh/docs/v3.4/release/release-v202.md b/content/zh/docs/v3.4/release/release-v202.md new file mode 100644 index 000000000..ec51161cd --- /dev/null +++ b/content/zh/docs/v3.4/release/release-v202.md @@ -0,0 +1,40 @@ +--- +title: "Release Notes for 2.0.2" +keywords: "kubernetes, docker, kubesphere, jenkins, istio, prometheus" +description: "KubeSphere Release Notes for 2.0.2" + +linkTitle: "Release Notes - 2.0.2" +weight: 18600 +--- + +KubeSphere 2.0.2 was released on July 9, 2019, which fixes known bugs and enhances existing feature. If you have installed versions of 1.0.x, 2.0.0 or 2.0.1, please download KubeSphere installer v2.0.2 to upgrade. + +## What's New in 2.0.2 + +### Enhanced Features + +- [API docs](../../reference/api-docs/) are available on the official website. +- Block brute-force attacks. +- Standardize the maximum length of resource names. +- Upgrade the gateway of project (Ingress Controller) to the version of 0.24.1. Support Ingress grayscale release. + +## List of Fixed Bugs + +- Fix the issue that traffic topology displays resources outside of this project. +- Fix the extra service component issue from traffic topology under specific circumstances. +- Fix the execution issue when "Source to Image" reconstructs images under specific circumstances. +- Fix the page display problem when "Source to Image" job fails. +- Fix the log checking problem when Pod status is abnormal. +- Fix the issue that disk monitor cannot detect some types of volume mounting, such as LVM volume. +- Fix the problem of detecting deployed applications. +- Fix incorrect status of application component. +- Fix host node's number calculation errors. +- Fix input data loss caused by switching reference configuration buttons when adding environmental variables. +- Fix the rerun job issue that the Operator role cannot execute. +- Fix the initialization issue on IPv4 environment uuid. +- Fix the issue that the log detail page cannot be scrolled down to check past logs. +- Fix wrong APIServer addresses in KubeConfig files. +- Fix the issue that DevOps project's name cannot be changed. +- Fix the issue that container logs cannot specify query time. +- Fix the saving problem on relevant repository's secrets under certain circumstances. +- Fix the issue that application's service component creation page does not have image registry's secrets. diff --git a/content/zh/docs/v3.4/release/release-v210.md b/content/zh/docs/v3.4/release/release-v210.md new file mode 100644 index 000000000..b8d256025 --- /dev/null +++ b/content/zh/docs/v3.4/release/release-v210.md @@ -0,0 +1,155 @@ +--- +title: "Release Notes for 2.1.0" +keywords: "kubernetes, docker, kubesphere, jenkins, istio, prometheus" +description: "KubeSphere Release Notes for 2.1.0" + +linkTitle: "Release Notes - 2.1.0" +weight: 18500 +--- + +KubeSphere 2.1.0 was released on Nov 11th, 2019, which fixes known bugs, adds some new features and brings some enhancement. If you have installed versions of 2.0.x, please upgrade it and enjoy the better user experience of v2.1.0. + +## Installer Enhancement + +- Decouple some components and make components including DevOps, service mesh, app store, logging, alerting and notification optional and pluggable +- Add Grafana (v5.2.4) as the optional component +- Upgrade Kubernetes to 1.15.5. It is also compatible with 1.14.x and 1.13.x +- Upgrade [OpenPitrix](https://openpitrix.io/) to v0.4.5 +- Upgrade the log forwarder Fluent Bit to v1.3.2 +- Upgrade Jenkins to v2.176.2 +- Upgrade Istio to 1.3.3 +- Optimize the high availability for core components + +## App Store + +### Features + +Support upload / test / review / deploy / publish/ classify / upgrade / deploy and delete apps, and provide nine built-in applications + +### Upgrade & Enhancement + +- The application repository configuration is moved from global to each workspace +- Support adding application repository to share applications in a workspace + +## Storage + +### Features + +- Support Local Volume with dynamic provisioning +- Provide the real-time monitoring feature for QingCloud block storage + +### Upgrade & Enhancement + +QingCloud CSI is adapted to CSI 1.1.0, supports upgrade, topology, create or delete a snapshot. It also supports creating PVC based on a snapshot + +### BUG Fixes + +Fix the StorageClass list display problem + +## Observability + +### Features + +- Support for collecting the file logs on the disk. It is used for the Pod which preserves the logs as the file on the disk +- Support integrating with external ElasticSearch 7.x +- Ability to search logs containinh Chinese words +- Add initContainer log display +- Ability to export logs +- Support for canceling the notification from alerting + +### UPGRADE & ENHANCEMENT + +- Improve the performance of log search +- Refine the hints when the logging service is abnormal +- Optimize the information when the monitoring metrics request is abnormal +- Support pod anti-affinity rule for Prometheus + +### BUG FIXES + +- Fix the mistaken highlights in the logs search result +- Fix log search not matching phrases correctly +- Fix the issue that log could not be retrieved for a deleted workload when it is searched by workload name +- Fix the issue where the results were truncated when the log is highlighted +- Fix some metrics exceptions: node `inode`, maximum pod tolerance +- Fix the issue with an incorrect number of alerting targets +- Fix filter failure problem of multi-metric monitoring +- Fix the problem of no logging and monitoring information on taint nodes (Adjust the toleration attributes of node-exporter and fluent-bit to deploy on all nodes by default, ignoring taints) + +## DevOps + +### Features + +- Add support for branch exchange and git log export in S2I +- Add B2I, ability to build Binary/WAR/JAR package and release to Kubernetes +- Support dependency cache for the pipeline, S2I, and B2I +- Support delete Kubernetes resource action in `kubernetesDeploy` step +- Multi-branch pipeline supports trigger other pipelines when create or delete the branch + +### Upgrades & Enhancement + +- Support BitBucket in the pipeline +- Support Cron script validation in the pipeline +- Support Jenkinsfile syntax validation +- Support custom the link in SonarQube +- Support event trigger build in the pipeline +- Optimize the agent node selection in the pipeline +- Accelerate the start speed of the pipeline +- Use dynamical volume as the work directory of the Agent in the pipeline, also contributes to Jenkins [#589](https://github.com/jenkinsci/kubernetes-plugin/pull/598) +- Optimize the Jenkins kubernetesDeploy plugin, add more resources and versions (v1, app/v1, extensions/v1beta1、apps/v1beta2、apps/v1beta1、autoscaling/v1、autoscaling/v2beta1、autoscaling/v2beta2、networking.k8s.io/v1、batch/v1beta1、batch/v2alpha1), also contributes to Jenkins [#614](https://github.com/jenkinsci/kubernetes-plugin/pull/614) +- Add support for PV, PVC, Network Policy in deploy step of the pipeline, also contributes to Jenkins [#87](https://github.com/jenkinsci/kubernetes-cd-plugin/pull/87)、[#88](https://github.com/jenkinsci/kubernetes-cd-plugin/pull/88) + +### Bug Fixes + +- Fix the issue that 400 bad request in GitHub Webhook +- incompatible change: DevOps Webhook's URL prefix is changed from `/webhook/xxx` to `/devops_webhook/xxx` + +## Authentication and authority + +### Features + +Support sync and authenticate with AD account + +### Upgrades & Enhancement + +- Reduce the LDAP component's RAM consumption +- Add protection against brute force attacks + +### Bug Fixes + +- Fix LDAP connection pool leak +- Fix the issue where users could not be added in the workspace +- Fix sensitive data transmission leaks + +## User Experience + +### Features + +Ability to wizard management of projects (namespace) that are not assigned to the workspace + +### Upgrades & Enhancement + +- Support bash-completion in web kubectl +- Optimize the host information display +- Add connection test of the email server +- Add prompt on resource list page +- Optimize the project overview page and project basic information +- Simplify the service creation process +- Simplify the workload creation process +- Support real-time status update in the resource list +- optimize YAML editing +- Support image search and image information display +- Add the pod list to the workload page +- Update the web terminal theme +- Support container switching in container terminal +- Optimize Pod information display, and add Pod scheduling information +- More detailed workload status display + +### Bug Fixes + +- Fix the issue where the default request resource of the project is displayed incorrectly +- Optimize the web terminal design, make it much easier to find +- Fix the Pod status update delay +- Fix the issue where a host could not be searched based on roles +- Fix DevOps project quantity error in workspace detail page +- Fix the issue with the workspace list pages not turning properly +- Fix the problem of inconsistent result ordering after query on workspace list page diff --git a/content/zh/docs/v3.4/release/release-v211.md b/content/zh/docs/v3.4/release/release-v211.md new file mode 100644 index 000000000..8bb2769fe --- /dev/null +++ b/content/zh/docs/v3.4/release/release-v211.md @@ -0,0 +1,122 @@ +--- +title: "Release Notes for 2.1.1" +keywords: "kubernetes, docker, kubesphere, jenkins, istio, prometheus" +description: "KubeSphere Release Notes for 2.1.1" + +linkTitle: "Release Notes - 2.1.1" +weight: 18400 +--- + +KubeSphere 2.1.1 was released on Feb 23rd, 2020, which has fixed known bugs and brought some enhancements. For the users who have installed versions of 2.0.x or 2.1.0, make sure to read the user manual carefully about how to upgrade before doing that, and feel free to raise any questions on [GitHub](https://github.com/kubesphere/kubesphere/issues). + +## What's New in 2.1.1 + +## Installer + +### UPGRADE & ENHANCEMENT + +- Support Kubernetes v1.14.x、v1.15.x、v1.16.x、v1.17.x,also solve the issue of Kubernetes API Compatibility#[1829](https://github.com/kubesphere/kubesphere/issues/1829) +- Simplify the steps of installation on existing Kubernetes, and remove the step of specifying cluster's CA certification, also specifying Etcd certification is no longer mandatory step if users don't need Etcd monitoring metrics +- Backup the configuration of CoreDNS before upgrading + +### BUG FIXES + +- Fix the issue of importing apps to App Store + +## App Store + +### UPGRADE & ENHANCEMENT + +- Upgrade OpenPitrix to v0.4.8 + +### BUG FIXES + +- Fix the latest version display issue for the published app #[1130](https://github.com/kubesphere/kubesphere/issues/1130) +- Fix the column name display issue in app approval list page #[1498](https://github.com/kubesphere/kubesphere/issues/1498) +- Fix the searching issue by app name/workspace #[1497](https://github.com/kubesphere/kubesphere/issues/1497) +- Fix the issue of failing to create app with the same name of previously deleted app #[1821](https://github.com/kubesphere/kubesphere/pull/1821) #[1564](https://github.com/kubesphere/kubesphere/issues/1564) +- Fix the issue of failing to deploy apps in some cases #[1619](https://github.com/kubesphere/kubesphere/issues/1619) #[1730](https://github.com/kubesphere/kubesphere/issues/1730) + +## Storage + +### UPGRADE & ENHANCEMENT + +- Support CSI plugins of Alibaba Cloud and Tencent Cloud + +### BUG FIXES + +- Fix the paging issue of storage class list page #[1583](https://github.com/kubesphere/kubesphere/issues/1583) #[1591](https://github.com/kubesphere/kubesphere/issues/1591) +- Fix the issue that the value of imageFeatures parameter displays '2' when creating ceph storage class #[1593](https://github.com/kubesphere/kubesphere/issues/1593) +- Fix the issue that search filter fails to work in persistent volumes list page #[1582](https://github.com/kubesphere/kubesphere/issues/1582) +- Fix the display issue for abnormal persistent volume #[1581](https://github.com/kubesphere/kubesphere/issues/1581) +- Fix the display issue for the persistent volumes which associated storage class is deleted #[1580](https://github.com/kubesphere/kubesphere/issues/1580) #[1579](https://github.com/kubesphere/kubesphere/issues/1579) + +## Observability + +### UPGRADE & ENHANCEMENT + +- Upgrade Fluent Bit to v1.3.5 #[1505](https://github.com/kubesphere/kubesphere/issues/1505) +- Upgrade Kube-state-metrics to v1.7.2 +- Upgrade Elastic Curator to v5.7.6 #[517](https://github.com/kubesphere/ks-installer/issues/517) +- Fluent Bit Operator support to detect the location of soft linked docker log folder dynamically on host machines +- Fluent Bit Operator support to manage the instance of Fluent Bit by declarative configuration through updating the ConfigMap of Operator +- Fix the issue of sort orders in alert list page #[1397](https://github.com/kubesphere/kubesphere/issues/1397) +- Adjust the metric of container memory usage with 'container_memory_working_set_bytes' + +### BUG FIXES + +- Fix the lag issue of container logs #[1650](https://github.com/kubesphere/kubesphere/issues/1650) +- Fix the display issue that some replicas of workload have no logs on container detail log page #[1505](https://github.com/kubesphere/kubesphere/issues/1505) +- Fix the compatibility issue of Curator to support ElasticSearch 7.x #[517](https://github.com/kubesphere/ks-installer/issues/517) +- Fix the display issue of container log page during container initialization #[1518](https://github.com/kubesphere/kubesphere/issues/1518) +- Fix the blank node issue when these nodes are resized #[1464](https://github.com/kubesphere/kubesphere/issues/1464) +- Fix the display issue of components status in monitor center, to keep them up-to date #[1858](https://github.com/kubesphere/kubesphere/issues/1858) +- Fix the wrong monitoring targets number in alert detail page #[61](https://github.com/kubesphere/console/issues/61) + +## DevOps + +### BUG FIXES + +- Fix the issue of UNSTABLE state not visible in the pipeline #[1428](https://github.com/kubesphere/kubesphere/issues/1428) +- Fix the format issue of KubeConfig in DevOps pipeline #[1529](https://github.com/kubesphere/kubesphere/issues/1529) +- Fix the image repo compatibility issue in B2I, to support image repo of Alibaba Cloud #[1500](https://github.com/kubesphere/kubesphere/issues/1500) +- Fix the paging issue in DevOps pipelines' branches list page #[1517](https://github.com/kubesphere/kubesphere/issues/1517) +- Fix the issue of failing to display pipeline configuration after modifying it #[1522](https://github.com/kubesphere/kubesphere/issues/1522) +- Fix the issue of failing to download generated artifact in S2I job #[1547](https://github.com/kubesphere/kubesphere/issues/1547) +- Fix the issue of [data loss occasionally after restarting Jenkins]( https://ask.kubesphere.io/forum/d/283-jenkins) +- Fix the issue that only 'PR-HEAD' is fetched when binding pipeline with GitHub #[1780](https://github.com/kubesphere/kubesphere/issues/1780) +- Fix 414 issue when updating DevOps credential #[1824](https://github.com/kubesphere/kubesphere/issues/1824) +- Fix wrong s2ib/s2ir naming issue from B2I/S2I #[1840](https://github.com/kubesphere/kubesphere/issues/1840) +- Fix the issue of failing to drag and drop tasks on pipeline editing page #[62](https://github.com/kubesphere/console/issues/62) + +## Authentication and Authorization + +### UPGRADE & ENHANCEMENT + +- Generate client certification through CSR #[1449](https://github.com/kubesphere/kubesphere/issues/1449) + +### BUG FIXES + +- Fix content loss issue in KubeConfig token file #[1529](https://github.com/kubesphere/kubesphere/issues/1529) +- Fix the issue that users with different permission fail to log in on the same browser #[1600](https://github.com/kubesphere/kubesphere/issues/1600) + +## User Experience + +### UPGRADE & ENHANCEMENT + +- Support to edit SecurityContext in workload editing page #[1530](https://github.com/kubesphere/kubesphere/issues/1530) +- Support to configure init container in workload editing page #[1488](https://github.com/kubesphere/kubesphere/issues/1488) +- Add support of startupProbe, also add periodSeconds, successThreshold, failureThreshold parameters in probe editing page #[1487](https://github.com/kubesphere/kubesphere/issues/1487) +- Optimize the status update display of Pods #[1187](https://github.com/kubesphere/kubesphere/issues/1187) +- Optimize the error message report on console #[43](https://github.com/kubesphere/console/issues/43) + +### BUG FIXES + +- Fix the status display issue for the Pods that are not under running status #[1187](https://github.com/kubesphere/kubesphere/issues/1187) +- Fix the issue that the added annotation can't be deleted when creating service of QingCloud LoadBalancer #[1395](https://github.com/kubesphere/kubesphere/issues/1395) +- Fix the display issue when selecting workload on service editing page #[1596](https://github.com/kubesphere/kubesphere/issues/1596) +- Fix the issue of failing to edit configuration file when editing 'Job' #[1521](https://github.com/kubesphere/kubesphere/issues/1521) +- Fix the issue of failing to update the service of 'StatefulSet' #[1513](https://github.com/kubesphere/kubesphere/issues/1513) +- Fix the issue of image searching for QingCloud and Alibaba Cloud image repos #[1627](https://github.com/kubesphere/kubesphere/issues/1627) +- Fix resource ordering issue with the same creation timestamp #[1750](https://github.com/kubesphere/kubesphere/pull/1750) +- Fix the issue of failing to edit configuration file when editing service #[41](https://github.com/kubesphere/console/issues/41) diff --git a/content/zh/docs/v3.4/release/release-v300.md b/content/zh/docs/v3.4/release/release-v300.md new file mode 100644 index 000000000..176b0c187 --- /dev/null +++ b/content/zh/docs/v3.4/release/release-v300.md @@ -0,0 +1,206 @@ +--- +title: "Release Notes for 3.0.0" +keywords: "Kubernetes, KubeSphere, release-notes" +description: "KubeSphere Release Notes for 3.0.0" + +linkTitle: "Release Notes - 3.0.0" +weight: 18300 +--- + +## How to get v3.0.0 + +- [Install KubeSphere v3.0.0 on Linux](../../installing-on-linux/) +- [Install KubeSphere v3.0.0 on existing Kubernetes](../../installing-on-kubernetes/) + +## Release Notes + +## **Installer** + +### FEATURES + +- A brand-new installer: [KubeKey](https://github.com/kubesphere/kubekey), v1.0.0, which is a turnkey solution to installing Kubernetes with KubeSphere on different platforms. It is more easy to use and reduces the dependency on OS environment + +### UPGRADES & ENHANCEMENTS + +- Be compatible with Kubernetes 1.15.x, 1.16.x, 1.17.x and 1.18.x for [ks-installer](https://github.com/kubesphere/ks-installer), v3.0.0 +- Add support for EulerOS, UOS and KylinOS +- Add support for Kunpeng and Phytium CPU +- Use ClusterConfiguration CRD to store ks-installer's configuration instead of ConfigMap + +## **Cluster Management** + +### FEATURES + +- Support management of multiple Kubernetes clusters +- Support Federated Deployment and Federated StatefulSet across multiple clusters + +## **Observability** + +### FEATURES + +- Support custom monitoring for 3rd-party application metrics in KubeSphere console +- Add Kubernetes and KubeSphere auditing support, including audit event archiving, searching and alerting +- Add Kubernetes event management support, including Kubernetes event archiving, searching and alerting based by [kube-events](https://github.com/kubesphere/kube-events) +- Add tenant control to auditing and support Kubernetes event searching. A tenant user can only search his or her own auditing logs and Kubernetes events +- Support archiving auditing logs and Kubernetes events to Elasticsearch, Kafka or Fluentd +- Add multi-tenant notification support by [Notification Manager](https://github.com/kubesphere/notification-manager) +- Support Alertmanager v0.21.0 + +### UPGRADES & ENHANCEMENTS + +- Upgrade Prometheus Operator to v0.38.3 (KubeSphere customized version ) +- Upgrade Prometheus to v2.20.1 +- Upgrade Node Exporter to v0.18.1 +- Upgrade kube-state-metrics to v1.9.6 +- Upgrade metrics server to v0.3.7 +- metrics-server is enabled by default (Disabled if KubeSphere is installed on existing Kubernetes) +- Upgrade Fluent Bit Operator to v0.2.0 +- Upgrade Fluent Bit to v1.4.6 +- Significantly improve log searching performance +- Allow platform admins to view pod logs from deleted namespaces +- Adjust the display style of log searching results in Toolbox +- Optimize log collection configuration for log files on pod's volume + +### BUG FIXES + +- Fix time skew in metric graphs for newly created namespaces (#[2868](https://github.com/kubesphere/kubesphere/issues/2868)) +- Fix workload-level alerting not working as expected (#[2834](https://github.com/kubesphere/kubesphere/issues/2834)) +- Fix no metric data for NotReady nodes + +## **DevOps** + +### FEATURES + +- Refactor DevOps framework, and use CRDs to manage DevOps resources + +### UPGRADES & ENHANCEMENTS + +- Remove Sonarqube from installer default packages, and support for external Sonarqube + +### BUG FIXES + +- Fix the issue that DevOps permission data is missing in a very limited number of cases + +- Fix the issue that the Button in the Stage page doesn't work (#[449](https://github.com/kubesphere/console/issues/449)) +- Fix the issue that the parameterized pipeline failed to send the parameter's value (#[2699](https://github.com/kubesphere/kubesphere/issues/2699)) + +## **App Store** + +### FEATURES + +- Support Helm V3 +- Support deploying application templates onto multiple clusters +- Support application template upgrade +- Users can view events that occur during repository synchronization + +### UPGRADES & ENHANCEMENTS + +- Users can use the same application repository name + +- Support the application template which contains CRDs + +- Merge all OpenPitrix services into one service + +- Support HTTP basic authentication when adding an application repository + +- Add and upgrade below apps in App Store: + + | App Name | App Version | Chart Version | + | ---------------------- | ----------- | :------------ | + | AWS EBS CSI Driver | 0.5.0 | 0.3.0 | + | AWS EFS CSI Driver | 0.3.0 | 0.1.0 | + | AWS FSX CSI Driver | 0.1.0 | 0.1.0 | + | Elasticsearch Exporter | 1.1.0 | 3.3.0 | + | etcd | 3.3.12 | 0.1.1 | + | Harbor | 2.0.0 | 1.4.0 | + | Memcached | 1.5.20 | 3.2.3 | + | Minio master | | 5.0.26 | + | MongoDB | 4.2.1 | 0.3.0 | + | MySQL | 5.7.30 | 1.6.6 | + | MySQL Exporter | 0.11.0 | 0.5.3 | + | Nginx | 1.18.0 | 1.3.2 | + | PorterLB | 0.3-alpha | 0.1.3 | + | PostgreSQL | 12.0 | 0.3.2 | + | RabbitMQ | 3.8.1 | 0.3.0 | + | Redis | 5.0.5 | 0.3.2 | + | Redis Exporter | 1.3.4 | 3.4.1 | + | Tomcat | 8.5.41 | 0.4.1+1 | + +### BUG FIXES + +- Fix the issue of insufficient length of attachment IDs + +## **Network** + +### FEATURES + +- Support project network isolation by adding controllers to manage custom project network policies +- Support workspace network isolation +- Support adding, viewing, modifying and deleting native Kubernetes network policies + +## **Service Mesh** + +### FEATURES + +- Support cleaning Jaeger ES Indexer + +### UPGRADES & ENHANCEMENTS + +- Upgrade Istio to v1.4.8 + +## **Storage** + +### FEATURES + +- Support volume snapshot management +- Support storage capacity management +- Support volume monitoring + +## **Security** + +### FEATURES + +- Support LDAP and OAuth login +- Support custom workspace roles +- Support custom DevOps project roles +- Support access control across multiple clusters +- Support pod security context (#[1453](https://github.com/kubesphere/kubesphere/issues/1453)) + +### UPGRADES & ENHANCEMENTS + +- Simplify the role definition +- Optimize built-in roles + +### BUG FIXES + +- Fix the issue of login failure due to node clock skew + +## **Globalization** + +### FEATURES + +- Add support for new languages in the web console, including Spanish and Traditional Chinese + +## **User Experience** + +### FEATURES + +- Add support for history record viewing in Toolbox. Users can re-visit the Clusters/Workspaces/Projects/DevOps Projects that they recently visited, which can also be launched through shortcut keys + +### UPGRADES & ENHANCEMENTS + +- Refactor global navigation +- Refactor breadcrumbs in detail pages +- Refactor data watching in the resources list +- Simplify project creation +- Refactor composing application creation, and support creating a composing application through YAML +- Support workload revision through YAML +- Optimize the display of log query results +- Refactor app store deployment form +- Support helm chart schema (#[schema-files](https://helm.sh/docs/topics/charts/#schema-files)) + +### BUG FIXES + +- Fix the error when editing ingress annotations (#[1931](https://github.com/kubesphere/kubesphere/issues/1931)) +- Fix container probes when editing in workload edit template modal +- Fix XSS security problems of the server-side templates \ No newline at end of file diff --git a/content/zh/docs/v3.4/release/release-v310.md b/content/zh/docs/v3.4/release/release-v310.md new file mode 100644 index 000000000..87beea01b --- /dev/null +++ b/content/zh/docs/v3.4/release/release-v310.md @@ -0,0 +1,175 @@ +--- +title: "Release Notes for 3.1.0" +keywords: "Kubernetes, KubeSphere, release-notes" +description: "KubeSphere Release Notes for 3.1.0" +linkTitle: "Release Notes - 3.1.0" +weight: 18200 +--- + +## 如何获取 v3.1.0 + +- [Install KubeSphere v3.1.0 on Linux](https://github.com/kubesphere/kubekey) +- [Install KubeSphere v3.1.0 on existing Kubernetes](https://github.com/kubesphere/ks-installer) + +## 新功能及增强 + +### 多集群管理 + +- Member 集群管理服务轻量化,移除 Redis、OpenLDAP 等组件 [#3056](https://github.com/kubesphere/kubesphere/issues/3056) +- 简化添加集群的操作,并验证集群配置(如 jwtSecret)有效性 [#3232](https://github.com/kubesphere/kubesphere/issues/3232) +- 可按需配置集群控制器同步时间段 [#3213](https://github.com/kubesphere/kubesphere/issues/3213) +- 重构集群控制器,优化逻辑 [#3234](https://github.com/kubesphere/kubesphere/issues/3234) +- 支持以高可用方式运行 Tower 管理和代理服务 [#31](https://github.com/kubesphere/tower/issues/31) +- 升级工具箱中的 Kubectl 且与 Kubernetes 集群版本保持一致 [#3103](https://github.com/kubesphere/kubesphere/issues/3103) + +### 边缘节点管理 + +#### 集成 KubeEdge [#3070](https://github.com/kubesphere/kubesphere/issues/3070) + +- 支持 KubeEdge 云端组件的安装部署 +- 支持 KubeEdge 边缘节点的添加 +- 支持边缘节点的日志和监控数据采集 +- 支持边缘节点网络配置自动化的加入和退出 +- 边缘节点在加入集群时,支持自动添加污点 +- 支持通过添加 nodeAffinity 禁止云端工作负载(如 DaemonSet)调度到边缘节点 [#1295](https://github.com/kubesphere/ks-installer/pull/1295) [#1297](https://github.com/kubesphere/ks-installer/pull/1297) [1300](https://github.com/kubesphere/ks-installer/pull/1300) +- 支持调度工作负载至边缘节点 + +### 认证和权限管理 + +- 新用户首次登录,提示修改初始密码 +- 通过第三方平台登录 KubeSphere,需确认帐户信息 +- 支持 [CAS](https://apereo.github.io/cas/5.0.x/protocol/CAS-Protocol-Specification.html) 身份提供商 [#3047](https://github.com/kubesphere/kubesphere/issues/3047) +- 支持 [OIDC](https://openid.net/specs/openid-connect-core-1_0.html) 身份提供商 [#2941](https://github.com/kubesphere/kubesphere/issues/2941) +- 支持 IDaaS (Alibaba Cloud Identity as a Service) 身份提供商 [#2997](https://github.com/kubesphere/kubesphere/pull/2997) +- 支持 Service Account 管理 [#3211](https://github.com/kubesphere/kubesphere/issues/3211) +- 改善 LDAP 认证插件,支持 LDAPS 和搜索过滤 [#2970](https://github.com/kubesphere/kubesphere/issues/2970) [#3766](https://github.com/kubesphere/kubesphere/issues/3766) +- 改善认证插件,简化身份提供商的配置方式 [#2970](https://github.com/kubesphere/kubesphere/issues/2970) + + +### 多租户管理 +- 支持用户组管理,可将用户组添加至企业空间或项目参与协同 [#2940](https://github.com/kubesphere/kubesphere/issues/2940) +- 支持企业空间配额,可限制企业空间的资源用量 [#2939](https://github.com/kubesphere/kubesphere/issues/2939) + +### 网络 + +- 新增支持 Kube-OVN 插件 +- 支持 Calico IP 池管理 [#3057](https://github.com/kubesphere/kubesphere/issues/3057) +- 支持网络可视化 [#3061](https://github.com/kubesphere/kubesphere/issues/3061) [#583](https://github.com/kubesphere/kubesphere/issues/583) + +### 可观测性 + +- 优化集成已有 Prometheus 服务对接方式 [#3068](https://github.com/kubesphere/kubesphere/issues/3068) [#1164](https://github.com/kubesphere/ks-installer/pull/1164) [Guide](/docs/v3.3/faq/observability/byop/) +- 新增 Thanos Ruler (Thanos v0.18.0) 用于新版告警 +- 升级 Prometheus 至 v2.26.0 +- 升级 Prometheus Operator 至 v0.42.1 +- 升级 kube-state-metrics 至 v1.9.7 +- 升级 metrics-server 至 v0.4.2 +- 升级 Notification Manager 至 v1.0.0 [Releases](https://github.com/kubesphere/notification-manager/releases) +- 升级 FluentBit Operator 至 v0.5.0 [Releases](https://github.com/kubesphere/fluentbit-operator/releases) +- 升级 FluentBit 至 v1.6.9 +- 升级 KubeEvents 至 v0.2.0 +- 升级 Kube-Auditing 至 v0.1.2 + +#### 监控 + +- 支持图形化方式配置 ServiceMonitor [#1031](https://github.com/kubesphere/console/pull/1301) +- 支持 PromQL Auto-completion 和 Syntax Highlighting [#1307](https://github.com/kubesphere/console/pull/1307) +- 支持集群层级的自定义监控 [#3193](https://github.com/kubesphere/kubesphere/pull/3193) +- kube-scheduler 与 kube-controller-manager 数据抓取由 HTTP 端口 10251/10252 改为 HTTPS 端口 10259/10257 [#1367](https://github.com/kubesphere/ks-installer/pull/1367) + +#### 告警 + +- 支持 Prometheus 风格的告警规则配置管理 [#3181](https://github.com/kubesphere/kubesphere/pull/3181) +- 支持平台及项目层级的告警规则 [#3181](https://github.com/kubesphere/kubesphere/pull/3181) +- 支持显示告警规则的实时告警状态 [#3181](https://github.com/kubesphere/kubesphere/pull/3181) + +#### 通知管理 + +- 新增钉钉、企业微信、Slack 和 Webhook 通知方式,提供图形化管理 [#3066](https://github.com/kubesphere/kubesphere/issues/3066) + +#### 日志 + +- 支持将日志输出到 [Loki](https://github.com/kubesphere/fluentbit-operator/blob/master/docs/plugins/output/loki.md) [#39](https://github.com/kubesphere/fluentbit-operator/pull/39) +- 支持收集 kubelet/docker/containerd 的日志 [#38](https://github.com/kubesphere/fluentbit-operator/pull/38) +- 支持收集 [auditd](https://github.com/kubesphere/fluentbit-operator#auditd) 的日志 [#45](https://github.com/kubesphere/fluentbit-operator/pull/45) + +### DevOps + +- 支持 GitLab 多分支流水线 [#3100](https://github.com/kubesphere/kubesphere/issues/3100) +- 可同时启动并运行多条流水线 [#1811](https://github.com/kubesphere/kubesphere/issues/1811) +- 支持流水线复制 [#3053](https://github.com/kubesphere/kubesphere/issues/3053) +- 新增权限可控的流水线审核机制 [#2483](https://github.com/kubesphere/kubesphere/issues/2483) [#3006](https://github.com/kubesphere/kubesphere/issues/3006) +- 访问 DevOps 项目首页可查看流水线运行状态 [#3007](https://github.com/kubesphere/kubesphere/issues/3007) +- 支持通过流水线 Tag 触发流水线运行 [#3051](https://github.com/kubesphere/kubesphere/issues/3051) +- 支持 S2I Webhook [#6](https://github.com/kubesphere/s2ioperator/issues/6) +- 优化在输入错误的流水线定时参数时的提示信息 [#2919](https://github.com/kubesphere/kubesphere/issues/2919) +- 优化创建流水线的交互体验 [#1283](https://github.com/kubesphere/console/issues/1283) +- 优化 S2I 错误提示信息 [#140](https://github.com/kubesphere/s2ioperator/issues/140) +- 升级 Jenkins 至 2.249.1 [#2618](https://github.com/kubesphere/kubesphere/issues/2618) +- 调整 Jenkins 部署方式为 Jenkins Distribution [#2182](https://github.com/kubesphere/kubesphere/issues/2182) + +### 应用商店及应用 + +- 新增 MySQL 高可用集群应用:[XenonDB](https://github.com/radondb/xenondb) +- 支持修改已部署的应用模板 +- 支持查看应用模板部署失败的原因 [#3036](https://github.com/kubesphere/kubesphere/issues/3036) [#3001](https://github.com/kubesphere/kubesphere/issues/3001) [#2951](https://github.com/kubesphere/kubesphere/issues/2951) +- 支持批量删除应用模板 + +### 微服务治理 + +- 支持图形化流量方向检测,图像化方式显示应用 (Composing App) 流量的流入/流出 [#3153](https://github.com/kubesphere/kubesphere/issues/3153) +- 支持 Kiali 附加组件,用户可以通过 Kiali 直接管理 Istio [#3106](https://github.com/kubesphere/kubesphere/issues/3106) +- 支持 NGINX Ingress Gateway 的监控,新增 NGINX Ingress Controller 的监控指标 [#1205](https://github.com/kubesphere/ks-installer/pull/1205) +- 支持在创建应用时添加应用路由 [#1426](https://github.com/kubesphere/console/issues/1426) +- 升级 Istio 至 1.6.10 [#3326](https://github.com/kubesphere/kubesphere/issues/3236) + +### 计量计费 + +- 支持集群、企业空间和应用级别的应用消耗量统计 [#3062](https://github.com/kubesphere/kubesphere/issues/3062) +- 通过 ConfigMap 方式可为计量资源配置计费单价 + +### UI 页面优化 + +- 优化首页的 Loading 效果 +- 优化 kubectl 为独立页面 +- 优化可视化流水线的配置显示 +- 优化流水线的运行状态的错误显示 +- 优化代码仓库的筛选方式 +- 优化节点调度策略的设置方式 +- 优化部署模式的设置方式 + +## 重要的技术调整 + +- 升级 Kubernetes 版本依赖,从 v1.17 调整至 v1.18 [#3274](https://github.com/kubesphere/kubesphere/issues/3274) +- 升级 Prometheus client_golang 版本依赖至 v1.5.1,升级 Prometheus 版本依赖至 v1.8.2 [3097](https://github.com/kubesphere/kubesphere/pull/3097) +- 基于 CRD 重构应用管理框架 OpenPitrix 并修复原有架构导致的问题 [#3036](https://github.com/kubesphere/kubesphere/issues/3036) [#3001](https://github.com/kubesphere/kubesphere/issues/3001) [#2995](https://github.com/kubesphere/kubesphere/issues/2995) [#2981](https://github.com/kubesphere/kubesphere/issues/2981) [#2954](https://github.com/kubesphere/kubesphere/issues/2954) [#2951](https://github.com/kubesphere/kubesphere/issues/2951) [#2783](https://github.com/kubesphere/kubesphere/issues/2783) [#2713](https://github.com/kubesphere/kubesphere/issues/2713) [#2700](https://github.com/kubesphere/kubesphere/issues/2700) [#1903](https://github.com/kubesphere/kubesphere/issues/1903) +- 告警架构调整,不再使用 MySQL、Redis 和 etcd 等组件以及旧版告警规则格式。改为使用 Thanos Ruler 配合 Prometheus 内置告警规则进行告警管理,新版告警兼容 Prometheus 告警规则。KubeSphere v3.0.0 中旧版告警规则会在升级到 v3.1.0 后自动迁移为新版告警规则 +- 通知架构调整,不再使用 MySQL、Redis 和 etcd 等组件。改为使用 [Notification Manager](https://github.com/kubesphere/notification-manager/) 以 CRD 的方式配置通知渠道。通知渠道设置由告警规则级别调整为集群级别,且多集群仅需设置一次通知渠道 + +## 废弃或移除的功能 + +- 依赖 MySQL、Redis 和 etcd 等组件的旧版告警与通知被新版告警与通知替代 +- 容器终端 WebSocket API 发生变更 [#3041](https://github.com/kubesphere/kubesphere/issues/3041) + +## 问题修复 +- 修复帐户无法登录的问题 [#3132](https://github.com/kubesphere/kubesphere/issues/3132) [3357](https://github.com/kubesphere/kubesphere/issues/3357) +- 修复容器日志不支持ANSI Color的问题 [#1322](https://github.com/kubesphere/kubesphere/issues/3044) +- 修复以 `kube` 起始命名的项目(即 Namespace)下的微服务应用无法获取 Istio 相关的监控数据的问题 [#3126](https://github.com/kubesphere/kubesphere/issues/3162) +- 修复 Viewer 可进入容器终端的安全隐患 [#3041](https://github.com/kubesphere/kubesphere/issues/3041) +- 修复级联资源无法被删除的问题 [#2912](https://github.com/kubesphere/kubesphere/issues/2912) +- 修复 Kubernetes 1.19 及以上版本无法正常使用的问题 [#2928](https://github.com/kubesphere/kubesphere/issues/2928) [#2928](https://github.com/kubesphere/kubesphere/issues/2928) +- 修复微服务应用**监控**按钮无效的问题 [#1394](https://github.com/kubesphere/console/issues/1394) +- 修复灰度发布的服务名不能与微服务应用的标签名相同的问题 [#3128](https://github.com/kubesphere/kubesphere/issues/3128) +- 修复微服务应用状态无法更新的问题 [#3241](https://github.com/kubesphere/kubesphere/issues/3241) +- 修复 Host 和 Member 集群在有同名企业空间的情况下,Member 集群下的企业空间被删除的问题 [#3169](https://github.com/kubesphere/kubesphere/issues/3169) +- 修复通过 Proxy 方式下联邦多集群连接断开的问题 [#3202](https://github.com/kubesphere/kubesphere/pull/3203) +- 修正多集群状态显示问题 [#3135](https://github.com/kubesphere/kubesphere/issues/3135) +- 修复 DevOps 流水线中无法部署工作负载的问题 [#3112](https://github.com/kubesphere/kubesphere/issues/3112) +- 修复 DevOps 项目管理员无法下载 Artifact 的问题 [#3088](https://github.com/kubesphere/kubesphere/issues/3083) +- 修复 DevOps 无法创建流水线的问题 [#3105](https://github.com/kubesphere/kubesphere/issues/3105) +- 修复多集群下流水线触发的问题 [#2626](https://ask.kubesphere.io/forum/d/2626-webhook-jenkins) +- 修复某些情况下编辑流水线时导致的数据丢失问题 [#1270](https://github.com/kubesphere/console/issues/1270) +- 修复点击 **Docker Container Registry Credentials** 时的报错问题 [#1269](https://github.com/kubesphere/console/issues/1269) +- 修复英文控制台显示中文代码质量检查结果的问题 [#1278](https://github.com/kubesphere/console/issues/1278) +- 修复 Jenkinsfile 中包含布尔值时的显示报错问题 [#3043](https://github.com/kubesphere/kubesphere/issues/3043) +- 修复当 PVC 不含有 `StorageClassName` 时存储管理页面无法显示的问题 [#1109](https://github.com/kubesphere/ks-installer/issues/1109) diff --git a/content/zh/docs/v3.4/release/release-v311.md b/content/zh/docs/v3.4/release/release-v311.md new file mode 100644 index 000000000..56ec9256d --- /dev/null +++ b/content/zh/docs/v3.4/release/release-v311.md @@ -0,0 +1,171 @@ +--- +title: "Release Notes for 3.1.1" +keywords: "Kubernetes, KubeSphere, release-notes" +description: "KubeSphere Release Notes for 3.1.1" +linkTitle: "Release Notes - 3.1.1" +weight: 18100 +--- + +## 用户体验 + +### 优化增强 + +- 删除工作负载时支持批量删除关联资源 [kubesphere/console#1933](https://github.com/kubesphere/console/pull/1933) +- 优化页面弹框 [kubesphere/console#2016](https://github.com/kubesphere/console/pull/2016) +- 允许在 system-workspace 下的项目中使用容器终端 [kubesphere/console#1921](https://github.com/kubesphere/console/pull/1921) + +### 问题修复 + +- 移除服务管理页面中 headless service 编辑外网访问功能 [kubesphere/console#2055](https://github.com/kubesphere/console/issues/2055) +- 修复了创建工作负载时环境变量中占位符无法正确展示的问题 [kubesphere/console#2008](https://github.com/kubesphere/console/pull/2008) +- 修复了在特定页面登出时无法正确重定向至登录页面的问题 [kubesphere/console#2009](https://github.com/kubesphere/console/pull/2009) +- 修复了容器组模板编辑页面中协议下拉框显示不全的问题 [kubesphere/console#1944](https://github.com/kubesphere/console/pull/1944) +- 修复了工作负载创建时探针格式校验的问题 [kubesphere/console#1941](https://github.com/kubesphere/console/pull/1941) +- 修复了企业空间成员详情页面中 DevOps 项目列表展示错误的问题 [#1936](https://github.com/kubesphere/console/pull/1936) +- 修复文案错误、缺失的问题 [kubesphere/console#1879](https://github.com/kubesphere/console/pull/1879) [kubesphere/console#1880](https://github.com/kubesphere/console/pull/1880) [kubesphere/console#1895](https://github.com/kubesphere/console/pull/1895) + +## 可观测性 + +### 优化增强 + +- 优化了通知设置中端口号的格式限制[#1885](https://github.com/kubesphere/console/pull/1885) +- 支持安装时指定使用已有的 Prometheus. [#1528](https://github.com/kubesphere/ks-installer/pull/1528) + +### 问题修复 + +- 修复邮件服务器同步错误 [#1969](https://github.com/kubesphere/console/pull/1969) +- 修复重启 installer 后 notification manager 会被重置的问题 [#1564](https://github.com/kubesphere/ks-installer/pull/1564) +- 修复了删除监测对象后不能删除其告警策略的问题 [#2045](https://github.com/kubesphere/console/pull/2045) +- 修复了增加监控资源无默认模板的问题 [#2029](https://github.com/kubesphere/console/pull/2029) +- 修复了容器只显示老日志的问题[#1972](https://github.com/kubesphere/console/issues/1972) +- 修复了告警信息时间显示错误的问题 [#1978](https://github.com/kubesphere/console/pull/1978) +- 完善了一些创建告警规则时的输入规则 [#1958](https://github.com/kubesphere/console/pull/1958) +- 修复了自定义监控因可视区的高度导致级联选择无法完全显示指标的问题 [#1989](https://github.com/kubesphere/console/pull/1989) +- 调整了 node exporter 和 kube-state-metrics 的 limit [#1537](https://github.com/kubesphere/ks-installer/pull/1537) +- 微调了对 etcdHighNumberOfFailedGRPCRequests 规则的选择器,来避免错误的 etcd 告警 [#1540](https://github.com/kubesphere/ks-installer/pull/1540) +- 修复升级时 events ruler 组件未升到新版本的问题 [#1594](https://github.com/kubesphere/ks-installer/pull/1594) +- 修复规则选择器:kube_node_status_allocatable_memory_bytes, kube_resourcequota [#1560](https://github.com/kubesphere/ks-installer/pull/1560) + +## 微服务治理 + +### 优化增强 + +- 优化 trace 页面增加时间选择器 [#2022](https://github.com/kubesphere/console/pull/2022) + +### 问题修复 + +- 修复 trace 选项卡无法正常展示的问题 [kubesphere/console#1890](https://github.com/kubesphere/console/pull/1890) + +## DevOps + +### 优化增强 + +- 支持 GitLab 多分支流水线按分支名筛选过滤 [kubesphere/console#2077](https://github.com/kubesphere/console/pull/2077) +- 更改了 b2i 页面的“重新执行”按钮为“执行”[kubesphere/console#1981](https://github.com/kubesphere/console/pull/1981) + +### 问题修复 + +- 修复凭证状态无法同步的问题 [kubesphere/console#1956](https://github.com/kubesphere/console/pull/1956) +- 修复了 CI 自动推送镜像时 tag 错误的问题 [kubesphere/console#2037](https://github.com/kubesphere/console/pull/2037) +- 修复了在流水线详情页不能返回上一个页面的问题 [kubesphere/console#1996](https://github.com/kubesphere/console/pull/1996) +- 修复了镜像构建器弹窗名称不一致的问题 [kubesphere/console#1922](https://github.com/kubesphere/console/pull/1922) +- 修复了在 DevOps 项目中创建 kubeconfig 类型的证书更新被重置的问题 [kubesphere/console#1990](https://github.com/kubesphere/console/pull/1990) +- 修复了多分支流水线中信任用户错误的问题 [kubesphere/console#1987](https://github.com/kubesphere/console/pull/1987) +- 修复了 DevOps 项目中流水线 stage label 在配置其他项不保存后被重置的问题 [kubesphere/console#1979](https://github.com/kubesphere/console/pull/1979) +- 修复了 shell 和 lable 在流水线中显示不准确的问题 [kubesphere/console#1970](https://github.com/kubesphere/console/pull/1970) +- 修复了流水线基础信息对话框显示信息混乱的问题 [kubesphere/console#1955](https://github.com/kubesphere/console/pull/1955) +- 修复了多分支流水线运行 API 错误的问题 [kubesphere/console#1954](https://github.com/kubesphere/console/pull/1954) +- 修复了流水线中 webhook 推送设置无效的问题 [kubesphere/console#1953](https://github.com/kubesphere/console/pull/1953) +- 修复了流水线编辑器中关于拖拽功能的文案 [kubesphere/console#1949](https://github.com/kubesphere/console/pull/1949) +- 修复了从源码构建服务中构建环境中无默认选项的问题 [kubesphere/console#1993](https://github.com/kubesphere/console/pull/1993) + +## 认证与鉴权 + +### 问题修复 + +- 修复用户最近登录时间错误的问题 [kubesphere/console#1881](https://github.com/kubesphere/console/pull/1881) +- 修复企业空间 admin 用户无法查看资源配额的问题 [kubesphere/ks-installer#1551](https://github.com/kubesphere/ks-installer/pull/1551) [kubesphere/console#2062](https://github.com/kubesphere/console/pull/2062) +- 修复项目成员无法连接容器终端的问题 [kubesphere/console#2002](https://github.com/kubesphere/console/pull/2002) +- 修复为项目分配企业空间时无法指定管理员的问题 [kubesphere/console#1961](https://github.com/kubesphere/console/pull/1961) +- 修复创建企业空间角色时权限项名称重复的问题 [kubesphere/console#1945](https://github.com/kubesphere/console/pull/1945) + +## 多租户管理 + +### 问题修复 + +- 修复用户组可以关联已删除角色的问题 [#1899](https://github.com/kubesphere/console/pull/1899) [#3897](https://github.com/kubesphere/kubesphere/pull/3897) +- 修复了删除长用户名用户引起的系统崩溃问题 [kubesphere/ks-installer#1450](https://github.com/kubesphere/ks-installer/pull/1450) [kubesphere/kubesphere#3796](https://github.com/kubesphere/kubesphere/pull/3796) +- 修复用户组绑定项目角色提示出错的问题 [kubesphere/console#1967](https://github.com/kubesphere/console/pull/1967) +- 修复多集群环境企业空间配额展示错误的问题 [kubesphere/console#2013](https://github.com/kubesphere/console/pull/2013) + +## 多集群管理 + +### 优化增强 + +- 优化了 member 集群配置错误时的提示信息 [kubesphere/console#2084](https://github.com/kubesphere/console/pull/2084) [kubesphere/console#1965](https://github.com/kubesphere/console/pull/1965) + +### 问题修复 + +- 修复了不能获取 member 集群中节点标签的问题 [kubesphere/console#1927](https://github.com/kubesphere/console/pull/1927) +- 修复项目列表页面未正确区分多集群项目的问题 [kubesphere/console#2059](https://github.com/kubesphere/console/pull/2059) +- 修复多集群项目下网关开启状态展示错误的问题 [kubesphere/console#1939](https://github.com/kubesphere/console/pull/1939) + +## 计量计费 + +### 优化增强 + +- 计量计费部分的 UI 调整 [kubesphere/console#1896](https://github.com/kubesphere/console/pull/1896) +- 修改了计量计费按钮的颜色 [kubesphere/console#1934](https://github.com/kubesphere/console/pull/1934) + +### 问题修复 + +- 修复了计量计费无法涵盖 OpenPitrix 资源的问题 [kubesphere/console#3871](https://github.com/kubesphere/kubesphere/pull/3871) +- 修复了 system-workspace 计量计费中的报错问题 [kubesphere/console#2083](https://github.com/kubesphere/console/pull/2083) +- 修复了多集群计量计费列表中未显示所有项目的问题 [kubesphere/console#2066](https://github.com/kubesphere/console/pull/2066) +- 修复了由于所依赖的集群未加载导致的计费页面报错 [kubesphere/console#2054](https://github.com/kubesphere/console/pull/2054) + + +## 应用商店 + +### 优化增强 + +- 优化应用模板创建页面提示文案与页面布局 [kubesphere/console#2012](https://github.com/kubesphere/console/pull/2012) [kubesphere/console#2063](https://github.com/kubesphere/console/pull/2063) +- 优化应用导入功能 [kubesphere/openpitrix-jobs#18](https://github.com/kubesphere/openpitrix-jobs/pull/18) +- 应用商店中新增 RadonDB PostgreSQL 应用 [kubesphere/openpitrix-jobs#17](https://github.com/kubesphere/openpitrix-jobs/pull/17) + + + +## 安全 + +### 优化增强 + +- 切换 jwt-go 的分支,用于修复 CVE-2020-26160 [#3991](https://github.com/kubesphere/kubesphere/pull/3991) +- 升级 protobuf 版本至 v1.3.2 用于修复 CVE-2021-3121 [#3944](https://github.com/kubesphere/kubesphere/pull/3944) +- 升级 crypto 至最新版用于修复 CVE-2020-29652 [#3997](https://github.com/kubesphere/kubesphere/pull/3997) +- 移除了 yarn.lock 文件以避免一些 CVE 漏洞误报 [#2024](https://github.com/kubesphere/console/pull/2024) + +### 问题修复 + +- 修复了容器终端越权访问的问题 [kubesphere/kubesphere#3956](https://github.com/kubesphere/kubesphere/pull/3956) + +## 存储 + +### 优化增强 + +- 提升了 s3 uploader 并发性能 [#4011](https://github.com/kubesphere/kubesphere/pull/4011) +- 增加预置的 CSI Provisioner CR 配置 [#1536](https://github.com/kubesphere/ks-installer/pull/1536) + +### 问题修复 + +- 移除了无效的自动探测存储类的功能 [#3947](https://github.com/kubesphere/kubesphere/pull/3947) +- 修复关于项目配额存储资源单位错误引导的问题 [#3973](https://github.com/kubesphere/kubesphere/issues/3973) + +## KubeEdge 集成 + +### 优化增强 + +- 支持 KubeEdge v1.6.2 [#1527](https://github.com/kubesphere/ks-installer/pull/1527) [#1542](https://github.com/kubesphere/ks-installer/pull/1542) + +### 问题修复 + +- 修复了 KubeEdge CloudCore 组件 advertiseAddress 配置错误的问题 [#1561](https://github.com/kubesphere/ks-installer/pull/1561) \ No newline at end of file diff --git a/content/zh/docs/v3.4/release/release-v320.md b/content/zh/docs/v3.4/release/release-v320.md new file mode 100644 index 000000000..1ad18884f --- /dev/null +++ b/content/zh/docs/v3.4/release/release-v320.md @@ -0,0 +1,177 @@ +--- +title: "3.2.0 版本说明" +keywords: "Kubernetes, KubeSphere, 版本说明" +description: "KubeSphere 3.2.0 版本说明" +linkTitle: "3.2.0 版本说明" +weight: 18100 +--- + +## 多租户和多集群 + +### 新特性 + +- 新增支持在多集群场景设置主集群名称(默认值为 `host`)。([#4211](https://github.com/kubesphere/kubesphere/pull/4211),[@yuswift](https://github.com/yuswift)) +- 新增支持在单集群场景设置集群名称。([#4220](https://github.com/kubesphere/kubesphere/pull/4220),[@yuswift](https://github.com/yuswift)) +- 新增支持使用 `globals.config` 初始化默认集群名称。([#2283](https://github.com/kubesphere/console/pull/2283),[@harrisonliu5](https://github.com/harrisonliu5)) +- 新增支持创建部署时跨多个集群调度容器组副本。([#2191](https://github.com/kubesphere/console/pull/2191),[@weili520](https://github.com/weili520)) +- 新增支持在项目详情页面修改集群权重。([#2192](https://github.com/kubesphere/console/pull/2192),[@weili520](https://github.com/weili520)) + +### 问题修复 + +- 修复**集群管理**的**创建部署**对话框中可以通过输入项目名称选择多集群项目的问题。([#2125](https://github.com/kubesphere/console/pull/2125),[@fuchunlan](https://github.com/fuchunlan)) +- 修复编辑企业空间或集群基本信息时发生的错误。([#2188](https://github.com/kubesphere/console/pull/2188), [@xuliwenwenwen](https://github.com/xuliwenwenwen)) +- 移除主集群**基本信息**页面上有关已删除集群的信息。([#2211](https://github.com/kubesphere/console/pull/2211),[@fuchunlan](https://github.com/fuchunlan)) +- 新增支持在多集群项目中对服务进行排序和编辑。([#2167](https://github.com/kubesphere/console/pull/2167),[@harrisonliu5](https://github.com/harrisonliu5)) +- 重构多集群项目的网关功能。([#2275](https://github.com/kubesphere/console/pull/2275),[@harrisonliu5](https://github.com/harrisonliu5)) +- 修复删除企业空间后多集群项目无法删除的问题。([#4365](https://github.com/kubesphere/kubesphere/pull/4365),[@wansir](https://github.com/wansir)) + +## 可观察性 + +### 新特性 + +- 新增支持与 Elasticsearch 进行 HTTPS 通信。([#4176](https://github.com/kubesphere/kubesphere/pull/4176),[@wanjunlei](https://github.com/wanjunlei)) +- 新增支持调度 GPU 工作负载设置 GPU 类型。([#4225](https://github.com/kubesphere/kubesphere/pull/4225),[@zhu733756](https://github.com/zhu733756)) +- 新增支持验证通知设置。([#4216](https://github.com/kubesphere/kubesphere/pull/4216),[@wenchajun](https://github.com/wenchajun)) +- 新增支持通过指定监控面板 URL 或上传 Grafana 监控面板 JSON 配置文件导入 Grafana 监控面板。KubeSphere 自动将 Grafana 监控面板转换为 KubeSphere 集群监控面板。([#4194](https://github.com/kubesphere/kubesphere/pull/4194),[@zhu733756](https://github.com/zhu733756)) +- 新增支持在**自定义监控**页面创建 Grafana 监控面板。([#2214](https://github.com/kubesphere/console/pull/2214),[@harrisonliu5](https://github.com/harrisonliu5)) +- 优化**通知配置**功能。([#2261](https://github.com/kubesphere/console/pull/2261), [@xuliwenwenwen](https://github.com/xuliwenwenwen)) +- 新增支持在**编辑默认容器配额**对话框中设置 GPU 限制。([#2253](https://github.com/kubesphere/console/pull/2253),[@weili520](https://github.com/weili520)) +- 新增默认 GPU 监控面板。([#2580](https://github.com/kubesphere/console/pull/2580),[@harrisonliu5](https://github.com/harrisonliu5)) +- 在 etcd 监控页面对 etcd leader 增加 **Leader** 标签。([#2445](https://github.com/kubesphere/console/pull/2445), [@xuliwenwenwen](https://github.com/xuliwenwenwen)) + +### 问题修复 + +- 修复**告警消息**页面和告警策略详情页面容器组信息错误的问题。([#2215](https://github.com/kubesphere/console/pull/2215),[@harrisonliu5](https://github.com/harrisonliu5)) + +## 验证和授权 + +### 新特性 + +- 新增内置 OAuth 2.0 服务器(支持 OpenID Connect)。([#3525](https://github.com/kubesphere/kubesphere/pull/3525),[@wansir](https://github.com/wansir)) +- 移除使用外部身份认证提供者时所需的信息确认过程。([#4238](https://github.com/kubesphere/kubesphere/pull/4238),[@wansir](https://github.com/wansir)) + +### 问题修复 + +- 修复登录历史记录中源 IP 地址错误的问题。([#4331](https://github.com/kubesphere/kubesphere/pull/4331),[@wansir](https://github.com/wansir)) + +## 存储 + +### 新特性 + +- 变更用于确定是否允许存储卷克隆、存储卷快照和存储卷扩展的参数。([#2199](https://github.com/kubesphere/console/pull/2199),[@weili520](https://github.com/weili520)) +- 新增支持创建存储卷时设置存储卷绑定模式。([#2220](https://github.com/kubesphere/console/pull/2220),[@weili520](https://github.com/weili520)) +- 新增存储卷实例管理功能。([#2226](https://github.com/kubesphere/console/pull/2226),[@weili520](https://github.com/weili520)) +- 新增支持多个存储卷快照类型。用户可以在创建存储卷快照时选择快照类型。([#2218](https://github.com/kubesphere/console/pull/2218),[@weili520](https://github.com/weili520)) + +### 问题修复 + +- 更改**存储卷设置**页签上存储卷访问模式的可选项。([#2348](https://github.com/kubesphere/console/pull/2348),[@live77](https://github.com/live77)) + +## 网络 + +### 新特性 + +- 在应用路由列表页面新增应用路由排序、路由规则编辑和注解编辑功能。([#2165](https://github.com/kubesphere/console/pull/2165),[@harrisonliu5](https://github.com/harrisonliu5)) +- 重构集群网关和项目网关功能。([#2262](https://github.com/kubesphere/console/pull/2262),[@harrisonliu5](https://github.com/harrisonliu5)) +- 在路由规则创建过程中新增服务名称自动补全功能。([#2196](https://github.com/kubesphere/console/pull/2196),[@wengzhisong-hz](https://github.com/wengzhisong-hz)) +- 对 ks-console 进行了以下 DNS 优化: + - 直接使用 ks-apiserver 服务的名称作为 API URL,不再使用 `ks-apiserver.kubesphere-system.svc`。 + - 新增 DNS 缓存插件 (dnscache) 用于缓存 DNS 结果。([#2435](https://github.com/kubesphere/console/pull/2435),[@live77](https://github.com/live77)) + +### 问题修复 + +- 在**启用网关**对话框中新增**取消**按钮。([#2245](https://github.com/kubesphere/console/pull/2245),[@weili520](https://github.com/weili520)) + +## 应用和应用商店 + +### 新特性 + +- 新增支持在应用仓库创建和编辑过程中设置同步时间间隔。([#2311](https://github.com/kubesphere/console/pull/2311), [@xuliwenwenwen](https://github.com/xuliwenwenwen)) +- 在应用商店增加免责声明。([#2173](https://github.com/kubesphere/console/pull/2173), [@xuliwenwenwen](https://github.com/xuliwenwenwen)) +- 新增支持将社区开发的 Helm chart 动态加载到应用商店。([#4250](https://github.com/kubesphere/kubesphere/pull/4250),[@xyz-li](https://github.com/xyz-li)) + +### 问题修复 + +- 修复调用 `GetKubeSphereStats` 时 `kubesphere_app_template_count` 的值始终为 `0` 的问题。([#4130](https://github.com/kubesphere/kubesphere/pull/4130),[@Hanamichi](https://github.com/ks-ci-bohttps://github.com/x893675)) + +## DevOps + +### 新特性 + +- 设置系统在当前流水线不是多分支流水线时隐藏**运行记录**页签的**分支**列。([#2379](https://github.com/kubesphere/console/pull/2379),[@live77](https://github.com/live77)) +- 新增自动从 ConfigMaps 加载 Jenkins 配置的功能。([#75](https://github.com/kubesphere/ks-devops/pull/75),[@JohnNiang](https://github.com/JohnNiang)) +- 新增支持通过操纵 CRD 而不是调用 Jenkins API 来触发流水线。([#41](https://github.com/kubesphere/ks-devops/issues/41), [@rick](https://github.com/LinuxSuRen)) +- 新增支持基于 containerd 的流水线。([#171](https://github.com/kubesphere/ks-devops/pull/171), [@rick](https://github.com/LinuxSuRen)) +- 将 Jenkins 任务元数据添加流水线注解中。([#254](https://github.com/kubesphere/ks-devops/issues/254),[@JohnNiang](https://github.com/JohnNiang)) + +### 问题修复 + +- 修复参数值过长时凭证创建和更新失败的问题。([#123](https://github.com/kubesphere/ks-devops/pull/123),[@shihh](https://github.com/shihaoH)) +- 修复打开并行流水线**运行记录**页签时 ks-apiserver 崩溃的问题。([#93](https://github.com/kubesphere/ks-devops/pull/93),[@JohnNiang](https://github.com/JohnNiang)) + +### 依赖项升级 + +- 升级 Configuration as Code 版本到 1.53。([#42](https://github.com/kubesphere/ks-jenkins/pull/42), [@rick](https://github.com/LinuxSuRen)) + +## 安装 +### 新特性 + +- 新增支持 Kubernetes 1.21.5 和 1.22.1,Kubernetes最低版本要求为1.19。([#634](https://github.com/kubesphere/kubekey/pull/634),[@pixiake](https://github.com/pixiake)) +- 新增支持自动设置容器运行时。([#738](https://github.com/kubesphere/kubekey/pull/738),[@pixiake](https://github.com/pixiake)) +- 新增支持自动更新 Kubernetes 证书。([#705](https://github.com/kubesphere/kubekey/pull/705),[@pixiake](https://github.com/pixiake)) +- 新增支持使用二进制文件安装 Docker 和 conatinerd。([#657](https://github.com/kubesphere/kubekey/pull/657),[@pixiake](https://github.com/pixiake)) +- 新增支持 Flannel VxLAN 和直接路由。([#606](https://github.com/kubesphere/kubekey/pull/606),[@kinglong08](https://github.com/kinglong08)) +- 新增支持使用二进制文件部署 etcd。([#634](https://github.com/kubesphere/kubekey/pull/634),[@pixiake](https://github.com/pixiake)) +- 新增内部负载均衡器用于部署高可用系统。([#567](https://github.com/kubesphere/kubekey/pull/567),[@24sama](https://github.com/24sama)) + +### 问题修复 + +- 修复 `runtime.RawExtension` 序列化错误。([#731](https://github.com/kubesphere/kubekey/pull/731),[@pixiake](https://github.com/pixiake)) +- 修复集群升级期间出现的空指针错误。([#684](https://github.com/kubesphere/kubekey/pull/684),[@24sama](https://github.com/24sama)) +- 新增支持更新 Kubernetes 1.20.0 及以上版本的证书。([#690](https://github.com/kubesphere/kubekey/pull/690),[@24sama](https://github.com/24sama)) +- 修复 DNS 地址配置错误。([#637](https://github.com/kubesphere/kubekey/pull/637),[@pixiake](https://github.com/pixiake)) +- 修复缺少默认网关地址时出现的集群创建错误。([#661](https://github.com/kubesphere/kubekey/pull/661),[@liulangwa](https://github.com/liulangwa)) + +## 用户体验 + +- 修复语言错误并优化措辞。([@Patrick-LuoYu](https://github.com/Patrick-LuoYu)、[@Felixnoo](https://github.com/Felixnoo)、[@serenashe](https://github.com/serenashe)) +- 修复错误的功能说明。([@Patrick-LuoYu](https://github.com/Patrick-LuoYu)、[@Felixnoo](https://github.com/Felixnoo)、[@serenashe](https://github.com/serenashe)) +- 删除硬编码和拼接 UI 字符串,以更好地支持 UI 本地化和国际化。([@Patrick-LuoYu](https://github.com/Patrick-LuoYu)、[@Felixnoo](https://github.com/Felixnoo)、[@serenashe](https://github.com/serenashe)) +- 添加条件语句以显示正确的英文单复数形式。([@Patrick-LuoYu](https://github.com/Patrick-LuoYu)、[@Felixnoo](https://github.com/Felixnoo)、[@serenashe](https://github.com/serenashe)) +- 优化**创建部署**对话框中的**容器组调度规则**区域。([#2170](https://github.com/kubesphere/console/pull/2170),[@qinyueshang](https://github.com/qinyueshang)) +- 修复**编辑项目配额**中配额值设置为无穷大时值变为 `0` 的问题。([#2118](https://github.com/kubesphere/console/pull/2118),[@fuchunlan](https://github.com/fuchunlan)) +- 修复**创建配置字典**对话框中数据条目为空时锤子图标位置不正确的问题。([#2206](https://github.com/kubesphere/console/pull/2206),[@fuchunlan](https://github.com/fuchunlan)) +- 修复项目**概览**页面时间范围下拉列表默认值显示错误的问题。([#2340](https://github.com/kubesphere/console/pull/2340),[@fuchunlan](https://github.com/fuchunlan)) +- 修复 `referer` URL 包含 & 字符时登录重定向失败的问题。([#2194](https://github.com/kubesphere/console/pull/2194),[@harrisonliu5](https://github.com/harrisonliu5)) +- 在自定义监控面板创建页面将 **1 hours** 修改为 **1 hour**。([#2276](https://github.com/kubesphere/console/pull/2276),[@live77](https://github.com/live77)) +- 修复服务列表页面服务类型显示错误的问题。([#2178](https://github.com/kubesphere/console/pull/2178), [@xuliwenwenwen](https://github.com/xuliwenwenwen)) +- 修复灰度发布任务详细信息中流量数据显示错误的问题。([#2422](https://github.com/kubesphere/console/pull/2422),[@harrisonliu5](https://github.com/harrisonliu5)) +- 解决**编辑项目配额**对话框中无法设置带两位小数或大于 8 的值的问题。([#2127](https://github.com/kubesphere/console/pull/2127),[@weili520](https://github.com/weili520)) +- 允许通过单击窗口其他区域关闭**关于**对话框。([#2114](https://github.com/kubesphere/console/pull/2114),[@fuchunlan](https://github.com/fuchunlan)) +- 优化项目标题,使光标悬停在项目标题上时变为手形。([#2128](https://github.com/kubesphere/console/pull/2128),[@fuchunlan](https://github.com/fuchunlan)) +- 新增支持在**创建部署**对话框的**环境变量**区域创建配置字典和保密字典。([#2227](https://github.com/kubesphere/console/pull/2227),[@harrisonliu5](https://github.com/harrisonliu5)) +- 新增支持在**创建部署**对话框中设置容器组注解。([#2129](https://github.com/kubesphere/console/pull/2129),[@harrisonliu5](https://github.com/harrisonliu5)) +- 允许域名以星号(*)开头。([#2432](https://github.com/kubesphere/console/pull/2432),[@wengzhisong-hz](https://github.com/wengzhisong-hz)) +- 新增支持在**创建部署**对话框搜索 Harbor 镜像。([#2132](https://github.com/kubesphere/console/pull/2132),[@wengzhisong-hz](https://github.com/wengzhisong-hz)) +- 新增支持为初始化容器挂载存储卷。([#2166](https://github.com/kubesphere/console/pull/2166),[@Sigboom](https://github.com/Sigboom)) +- 移除存储卷扩展中过程中工作负载自动重新启动的功能。([#4121](https://github.com/kubesphere/kubesphere/pull/4121),[@wenhuwang](https://github.com/wenhuwang)) + +## API + +- 弃用 router API v1alpha2 版本。([#4193](https://github.com/kubesphere/kubesphere/pull/4193),[@RolandMa1986](https://github.com/RolandMa1986)) +- 将流水线 API 版本从 v2 升级到 v3。([#2323](https://github.com/kubesphere/console/pull/2323),[@harrisonliu5](https://github.com/harrisonliu5)) +- 更改保密字典校验 API。([#2368](https://github.com/kubesphere/console/pull/2368),[@harrisonliu5](https://github.com/harrisonliu5)) +- OAuth2 Token endpoint 需要客户端凭证。([#3525](https://github.com/kubesphere/kubesphere/pull/3525),[@wansir](https://github.com/wansir)) + +## 组件更改 + +- kubefed: v0.7.0 -> v0.8.1 +- prometheus-operator: v0.42.1 -> v0.43.2 +- notification-manager: v1.0.0 -> v1.4.0 +- fluent-bit: v1.6.9 -> v1.8.3 +- kube-events: v0.1.0 -> v0.3.0 +- kube-auditing: v0.1.2 -> v0.2.0 +- istio: 1.6.10 -> 1.11.1 +- jaeger: 1.17 -> 1.27 +- kiali: v1.26.1 -> v1.38 +- KubeEdge: v1.6.2 -> 1.7.2 diff --git a/content/zh/docs/v3.4/release/release-v321.md b/content/zh/docs/v3.4/release/release-v321.md new file mode 100644 index 000000000..e0245e9e1 --- /dev/null +++ b/content/zh/docs/v3.4/release/release-v321.md @@ -0,0 +1,41 @@ +--- +title: "3.2.1 版本说明" +keywords: "Kubernetes, KubeSphere, 版本说明" +description: "KubeSphere 3.2.1 版本说明" +linkTitle: "3.2.1 版本说明" +weight: 18099 +--- + +## 功能优化与问题修复 + +### 功能优化 + +- 新增支持按状态过滤容器组。([#4434](https://github.com/kubesphere/kubesphere/pull/4434),[@iawia002](https://github.com/iawia002),[#2620](https://github.com/kubesphere/console/pull/2620),[@weili520](https://github.com/weili520)) +- 在镜像构建器创建对话框中增加不支持 containerd 的提示。([#2734](https://github.com/kubesphere/console/pull/2734),[@weili520](https://github.com/weili520)) +- 在**编辑项目配额**对话框中增加可用配额信息。([#2619](https://github.com/kubesphere/console/pull/2619),[@weili520](https://github.com/weili520)) + +### 问题修复 + +- 更改密码校验规则以阻止不包含大写字母的密码。([#4481](https://github.com/kubesphere/kubesphere/pull/4481),[@live77](https://github.com/live77)) +- 修复 KubeSphere 上不存在相关用户信息时,无法使用来自 LDAP 的用户登录的问题。([#4436](https://github.com/kubesphere/kubesphere/pull/4436),[@RolandMa1986](https://github.com/RolandMa1986)) +- 修复无法获取集群网关指标信息的问题。([#4457](https://github.com/kubesphere/kubesphere/pull/4457),[@RolandMa1986](https://github.com/RolandMa1986)) +- 修复存储卷列表访问模式显示不正确的问题。([#2686](https://github.com/kubesphere/console/pull/2686),[@weili520](https://github.com/weili520)) +- 移除**网关设置**页面的**更新**按钮。([#2608](https://github.com/kubesphere/console/pull/2608),[@weili520](https://github.com/weili520)) +- 修复时间范围选择下拉列表显示错误的问题。([#2715](https://github.com/kubesphere/console/pull/2715),[@weili520](https://github.com/weili520)) +- 修复保密字典数据文本过长时文本显示不正确的问题。([#2600](https://github.com/kubesphere/console/pull/2600),[@weili520](https://github.com/weili520)) +- 修复挂载存储卷模板时有状态副本集创建失败的问题。([#2730](https://github.com/kubesphere/console/pull/2730),[@weili520](https://github.com/weili520)) +- 修复用户没有查看群集信息的权限时系统无法获取集群网关信息的问题。([#2695](https://github.com/kubesphere/console/pull/2695),[@harrisonliu5](https://github.com/harrisonliu5)) +- 修复流水线状态和运行记录无法自动更新的问题。([#2594](https://github.com/kubesphere/console/pull/2594),[@harrisonliu5](https://github.com/harrisonliu5)) +- 对 kubernetesDeply 流水线步骤增加该步骤将被弃用的提示。([#2660](https://github.com/kubesphere/console/pull/2660),[@harrisonliu5](https://github.com/harrisonliu5)) +- 修复镜像仓库保密字典使用 HTTP 仓库地址时无法通过验证的问题。([#2795](https://github.com/kubesphere/console/pull/2795),[@harrisonliu5](https://github.com/harrisonliu5)) +- 修复 Harbor 镜像 URL 错误的问题。([#2784](https://github.com/kubesphere/console/pull/2784),[@harrisonliu5](https://github.com/harrisonliu5)) +- 修复日志搜索结果显示错误的问题。([#2598](https://github.com/kubesphere/console/pull/2598),[@weili520](https://github.com/weili520)) +- 修复存储卷实例 YAML 配置中的错误。([#2629](https://github.com/kubesphere/console/pull/2629),[@weili520](https://github.com/weili520)) +- 修复**编辑项目配额**对话框中可用企业空间配额显示不正确的问题。([#2613](https://github.com/kubesphere/console/pull/2613),[@weili520](https://github.com/weili520)) +- 修复**监控**对话框中时间范围选择下拉列表功能不正常的问题。([#2722](https://github.com/kubesphere/console/pull/2722),[@weili520](https://github.com/weili520)) +- 修复部署创建页面可用配额显示不正确的问题。([#2668](https://github.com/kubesphere/console/pull/2668),[@weili520](https://github.com/weili520)) +- 将文档地址更改为 [kubesphere.io](http://kubesphere.io) 和 [kubesphere.com.cn](http://kubesphere.io)。([#2628](https://github.com/kubesphere/console/pull/2628),[@weili520](https://github.com/weili520)) +- 修复无法修改部署存储卷设置的问题。([#2656](https://github.com/kubesphere/console/pull/2656),[@weili520](https://github.com/weili520)) +- 修复浏览器语言必须为英文、简体中文或繁体中文时才能访问容器终端的问题。([#2702](https://github.com/kubesphere/console/pull/2702),[@weili520](https://github.com/weili520)) +- 修复部署编辑对话框中存储卷状态显示不正确的问题。([#2622](https://github.com/kubesphere/console/pull/2622),[@weili520](https://github.com/weili520)) +- 移除凭证详情页面显示的标签。([#2621](https://github.com/kubesphere/console/pull/2621),[@123liubao](https://github.com/123liubao)) \ No newline at end of file diff --git a/content/zh/docs/v3.4/release/release-v330.md b/content/zh/docs/v3.4/release/release-v330.md new file mode 100644 index 000000000..b1797133a --- /dev/null +++ b/content/zh/docs/v3.4/release/release-v330.md @@ -0,0 +1,89 @@ +--- +title: "3.3.0 版本说明" +keywords: "Kubernetes, KubeSphere, 版本说明" +description: "KubeSphere 3.3.0 版本说明" +linkTitle: "3.3.0 版本说明" +weight: 18098 +--- + +## DevOps +### 新特性 +- 提供了基于 GitOps 的持续部署方案,底层支持 Argo CD,可以实时查看持续部署的状态。 +- 支持配置持续部署白名单,限制持续部署的目标代码仓库和部署位置。 +- 支持导入并管理代码仓库。 +- 新增多款基于 CRD 的内置流水线模板,支持参数自定义。 +- 支持查看流水线事件。 +## 存储 +### 新特性 +- 支持租户级存储类权限管理。 +- 新增卷快照内容和卷快照类管理。 +- 支持 deployment 与 statefulSet 资源调整存储卷声明修改后自动重启。 +- 支持存储卷声明设定使用阈值自动扩容。 + +## 多租户和多集群 +### 新特性 +- 支持 kubeconfig 证书到期提示。 +- 支持 kubesphere-config configmap 以显示当前集群的名称。 +- 支持集群层级的成员管理。 + +## 可观察性 +### 新特性 +- 增加容器进程/线程监控指标。 +- 支持监控节点用量。 +- 支持导入 Grafana 模板实现自定义监控看板。 +- 支持为容器日志、资源事件和审计日志指定不同的数据保存时间。 + +### 优化增强 +- Alertmanager 从 v0.21.0 升级至 v0.23.0。 +- Grafana 从 7.4.3 升级至 8.3.3。 +- kube-state-metrics 从 v1.9.7 升级至 v2.3.0。 +- node-exporter 从 v0.18.1 升级至 v1.3.1。 +- Prometheus 从 v2.26.0 升级至 v2.34.0。 +- Prometheus Operator 从 v0.43.2 升级至 v0.55.1。 +- kube-rbac-proxy 从 v0.8.0 升级至 v0.11.0。 +- configmap-reload 从 v0.3.0 升级至 v0.5.0。 +- Thanos 从 v0.18.0 升级至 v0.25.2。 +- kube-events 从 v0.3.0 升级至 v0.4.0。 +- Fluent Bit Operator 从 v0.11.0 升级至 v0.13.0。 +- Fluent Bit 从 v1.8.3 升级至 v1.8.11。 + +## KubeEdge 集成 +### 新特性 +- 支持节点终端,可以直接在 UI 上登陆集群节点,包括边缘节点。 +### 优化增强 +- KubeEdge 版本从v1.7.2 升级到 v1.9.2。 +- 移除 EdgeWatcher。 + +## 网络 +### 优化增强 +- 负载均衡类型选择新增 OpenELB。 +### 问题修复 +- 修复了删除项目后项目网关遗留的问题。 +## App Store +### 问题修复 +- 修复 Helm Controller NPE 错误引起的 ks-controller-manager 崩溃。 + +## 验证和授权 +### 新特性 +- 支持手动启用/禁用用户。 + +## 用户体验 +- 新增 Kubernetes 审计日志开启提示。 +- 支持容器生命周期管理。 +- 支持应用整个配置字典或保密字典文件。 +- 支持在**流量监控**页签选择时间段。 +- 新增在**审计日志搜索** 对话框提醒用户开启审计日志的功能。 +- 支持通过 `ClusterConfiguration` 配置更多 Istio 参数。 +- 新增多语言支持,如土耳其语。 +- 支持用户密码合规性检查。 +- 新增在 webhook 设置页面将**访问令牌**设置为必填项的功能。 +- 修复**服务拓扑**页面的服务详情区域数据未自动更新的问题。 +- 修复“修改有状态服务时未显示服务名称”问题。 +- 修复用户点击按钮过快造成的应用安装失败问题。 +- 修复容器组探针删除后仍然显示的问题。 +- 修复存储卷挂载到 init 容器时 statefulset 创建失败的问题。 +- 优化了服务拓扑图详情展示窗口。 +- 优化了 ClusterConfiguration 更新机制,无需重启 ks-apiserver、ks-controller-manager。 +- 优化了部分页面文案描述。 + +有关 KubeSphere 3.3.0 的 Issue 和贡献者详细信息,请参阅 [GitHub](https://github.com/kubesphere/kubesphere/blob/master/CHANGELOG/CHANGELOG-3.3.md)。 diff --git a/content/zh/docs/v3.4/release/release-v331.md b/content/zh/docs/v3.4/release/release-v331.md new file mode 100644 index 000000000..524dbb009 --- /dev/null +++ b/content/zh/docs/v3.4/release/release-v331.md @@ -0,0 +1,38 @@ +--- +title: "3.3.1 版本说明" +keywords: "Kubernetes, KubeSphere, 版本说明" +description: "KubeSphere 3.3.1 版本说明" +linkTitle: "3.3.1 版本说明" +weight: 18096 +--- + +## DevOps +### 优化增强 + +- 支持通过 UI 编辑流水线中 kubeconfig 文件绑定方式。 + +### 问题修复 +- 修复用户查看 CI/CD 模板失败的问题。 +- 将 `Deprecated` 标签从 CI/CD 模版中移除,并将部署环节由 `kubernetesDeploy` 修改为 kubeconfig 绑定方式。 + +## 网络 +### 问题修复 +- 修复 IPv4/IPv6 双栈模式下用户创建路由规则失败的问题。 + +## 存储 +### 问题修复 +- 当用户使用 `hostpath` 作为存储时,必须填写主机路径。 + + +## 验证和授权 +### 问题修复 +- 删除角色 `users-manager` 和 `workspace-manager`。 +- 新增角色 `platform-self-provisioner`。 +- 屏蔽用户自定义角色的部分权限。 + + +## 用户体验 +- 支持修改每页列表的展示数量。 +- 新增对 statefulset 和 deamonset 批量停止的支持 + +有关 KubeSphere 3.3.1 的 Issue 和贡献者详细信息,请参阅 [GitHub](https://github.com/kubesphere/kubesphere/blob/master/CHANGELOG/CHANGELOG-3.3.1.md)。 diff --git a/content/zh/docs/v3.4/release/release-v332.md b/content/zh/docs/v3.4/release/release-v332.md new file mode 100644 index 000000000..75cef7292 --- /dev/null +++ b/content/zh/docs/v3.4/release/release-v332.md @@ -0,0 +1,97 @@ +--- +title: "3.3.2 版本说明" +keywords: "Kubernetes, KubeSphere, 版本说明" +description: "KubeSphere 3.3.2 版本说明" +linkTitle: "3.3.2 版本说明" +weight: 18095 +--- + +## DevOps + +### 优化增强 + +- 添加最新的 GitHub Actions。 +- 将 PipelineRun 的结果保存到 configmap 中。 +- 修改持续部署应用程序状态的中文描述。 +- 为持续部署参数添加更丰富的信息。 +- 为处于中止状态的 PipelineRun 添加链接。 +- 为 PipelineRun 增加 ID 列,用于执行 kubectl 命令时展示。 +- PipelineRun 生命周期中去掉 Queued 状态。 + +### 问题修复 + +- 修复用户修改并保存流水线配置后 Webhook 配置丢失的问题。 +- 修复下载 DevOps 流水线制品失败的问题。 +- 修复使用 JAR/WAR 文件创建服务时,镜像地址不匹配的问题。 +- 修复 PipelineRun 从“取消”状态变成“未运行”状态的问题。 +- 修复 Pipeline 的自动清理策略,使其与 Jenkins 的清理保持一致。 + + +## App Store + +### 问题修复 + +- 修复上传的应用程序模板上不显示图标的问题。 +- 修复应用商店应用信息处没有显示应用首页的问题。 +- 修复应用商店导入内置应用时导入失败的问题。 +- 修复 IPv6 环境下 UUID 生成错误的问题。 + +## 可观测性 + +### 问题修复 + +- 修复 logsidecar-injector 配置文件中的解析问题。 + +## 微服务 + +### 问题修复 + +- 修复未启用 service mesh 时创建的 Bookinfo 项目没有默认关闭应用治理的问题。 +- 修复蓝绿部署发布模式下线按钮缺失的问题。 + +## 网络 + +### 优化增强 + +- 限制项目的网络隔离范围为当前企业空间。 + +## 存储 + +### 优化增强 + +- 在多集群环境中显示 system-workspace 所属集群。 +- 将“应用路由”的英文词条由 “route” 修改为 “ingress”。 + +### 问题修复 + +- 修复编辑联邦项目中的持久卷声明存储类错误的问题。 + +## 验证和授权 + +### 优化增强 + +- 增加了动态的 cache 配置项。 +- 移除“告警消息管理”权限。 + +### 问题修复 + +- 修复拥有集群管理权限的平台角色无法管理集群的问题。 + +## 开发 & 测试 + +### 问题修复 + +- 修复引入热加载功能后部分数据后处于“不同步”状态的问题。 +- 修复 ks-apiserver 多次重载后崩溃的问题。 +- 修复缺少必要的 CRD 造成的资源缓存失败问题。 +- 修复 ks-apiserver 在 Kubernetes 1.24+ 版本中异常崩溃的问题。 +- 修复审计功能中协程泄露的问题。 + +## 用户体验 + +- 限制集群名称长度。 +- 修复 pod 副本不能自动刷新的问题。 +- 修复删除服务时,相关的 pod 没有删除的问题。 +- 修复只有一个节点时,节点数量和角色显示错误的问题。 + +有关 KubeSphere 3.3.2 的 Issue 和贡献者详细信息,请参阅 [GitHub](https://github.com/kubesphere/kubesphere/blob/master/CHANGELOG/CHANGELOG-3.3.2.md)。 diff --git a/content/zh/docs/v3.4/toolbox/_index.md b/content/zh/docs/v3.4/toolbox/_index.md new file mode 100644 index 000000000..211bb4eb1 --- /dev/null +++ b/content/zh/docs/v3.4/toolbox/_index.md @@ -0,0 +1,13 @@ +--- +title: "工具箱" +description: "Help you to better understand KubeSphere toolbox" +layout: "second" + +linkTitle: "工具箱" + +weight: 15000 + +icon: "/images/docs/v3.3/docs.svg" +--- + +KubeSphere 通过工具箱提供几种重要功能。本章演示了如何使用 KubeSphere 工具箱查询事件、日志和审计日志,查看资源消费情况,以及如何通过 Web Kubectl 运行命令。 diff --git a/content/zh/docs/v3.4/toolbox/auditing/_index.md b/content/zh/docs/v3.4/toolbox/auditing/_index.md new file mode 100644 index 000000000..de99625e1 --- /dev/null +++ b/content/zh/docs/v3.4/toolbox/auditing/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "审计" +weight: 15300 + +_build: + render: false +--- diff --git a/content/zh/docs/v3.4/toolbox/auditing/auditing-query.md b/content/zh/docs/v3.4/toolbox/auditing/auditing-query.md new file mode 100644 index 000000000..ae333b105 --- /dev/null +++ b/content/zh/docs/v3.4/toolbox/auditing/auditing-query.md @@ -0,0 +1,85 @@ +--- +title: "审计日志查询" +keywords: "Kubernetes, KubeSphere, 审计, 日志, 查询" +description: "了解如何快速执行审计日志查询,追踪集群的最新审计信息。" +linkTitle: "审计日志查询" +weight: 15330 +--- + +KubeSphere 支持租户隔离的审计日志查询。本教程演示了如何使用查询功能,包括界面、搜索参数和详情页面。 + +## 准备工作 + +您需要启用 [KubeSphere 审计日志](../../../pluggable-components/auditing-logs/)。 + +## 进入查询界面 + +1. 所有用户都可以使用该查询功能。使用任意帐户登录控制台,在右下角的**工具箱**图标上悬停,然后在弹出菜单中选择**审计日志查询**。 + + {{< notice note >}} + +任意帐户都有权限查询审计日志,但每个帐户能查看的日志有区别。 + +- 如果一个用户有权限查看项目中的资源,该帐户便可以查看此项目中发生的审计日志,例如在项目中创建工作负载。 +- 如果一个用户有权限在企业空间中列出项目,该帐户便可以查看此企业空间(而非项目)中发生的审计日志,例如在企业空间中创建项目。 +- 如果一个用户有权限在集群中列出项目,该帐户便可以查看此集群(而非企业空间和项目)中发生的审计日志,例如在集群中创建企业空间。 + +{{}} + +2. 在弹出窗口中,您可以查看最近 12 小时内审计日志总数的趋势。 + +3. **审计日志查询**控制台支持以下查询参数: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
参数描述
集群发生操作的集群。如果开启了多集群功能,则会启用该参数。
项目发生操作的项目。支持精确匹配和模糊匹配。
企业空间发生操作的企业空间。支持精确匹配和模糊匹配。
资源类型与请求相关联的资源类型。支持模糊匹配。
资源名称与请求相关联的资源名称。支持模糊匹配。
操作行为与请求相关联的 Kubernetes 操作行为。对于非资源请求,该参数为小写 HTTP 方式。支持精确匹配。
状态码HTTP 响应码。支持精确匹配。
操作帐户调用该请求的用户。支持精确匹配和模糊匹配。
来源 IP该请求源自的 IP 地址和中间代理。支持模糊匹配。
时间范围该请求到达 Apiserver 的时间。
+ + {{< notice note >}} + +- 模糊匹配不区分大小写,并且根据 ElasticSearch 分段规则,通过单词或词组的前半部分来检索完整术语。 +- KubeSphere 默认存储最近七天的日志。您可以在 `elasticsearch-logging-curator` ConfigMap 中修改保留期限。 + +{{}} + +## 输入查询参数 + +1. 选择一个过滤器,输入您想搜索的关键字。例如,查询包含 `services` 创建信息的审计日志。 + +2. 点击列表中的任一结果,您便可以查看审计日志的详细信息。 diff --git a/content/zh/docs/v3.4/toolbox/auditing/auditing-receive-customize.md b/content/zh/docs/v3.4/toolbox/auditing/auditing-receive-customize.md new file mode 100644 index 000000000..fa292855a --- /dev/null +++ b/content/zh/docs/v3.4/toolbox/auditing/auditing-receive-customize.md @@ -0,0 +1,180 @@ +--- +title: "接收和自定义审计日志" +keywords: "Kubernetes, KubeSphere, 审计, 日志, 自定义, 接收" +description: "了解如何接收和自定义审计日志。" +linkTitle: "接收和自定义审计日志" +weight: 15310 +--- + +KubeSphere 审计日志提供了与安全相关的、按时间顺序排列的记录集,记录每个用户、管理员或系统其他组件对系统产生影响的一系列活动。对 KubeSphere 的每个请求都会生成一个事件,随后该事件会写入 Webhook 并根据特定规则进行处理。根据不同规则,该事件会被忽略、存储或生成告警。 + +## 启用 KubeSphere 审计日志 + +要启用审计日志,请参见 [KubeSphere 审计日志](../../../pluggable-components/auditing-logs/)。 + +## 接收来自 KubeSphere 的审计日志 + +KubeSphere 审计日志系统默认只接收来自 KubeSphere 的审计日志,同时也可以接收来自 Kubernetes 的审计日志。 + +用户可以使用以下命令在命名空间 `kubesphere-system` 中修改 `kubesphere-config` ConfigMap 中 `auditing.enable` 的值,停止接收来自 KubeSphere 的审计日志: + +```bash +kubectl edit cm -n kubesphere-system kubesphere-config +``` + +将 `auditing.enabled` 的值修改为 `false`,停止接收来自 KubeSphere 的审计日志。 + +```yaml + spec: + auditing: + enabled: false +``` + +您需要重启 KubeSphere Apiserver 使修改生效。 + +## 接收来自 Kubernetes 的审计日志 + +要使 KubeSphere 审计日志系统接收来自 Kubernetes 的审计日志,您需要向 `/etc/kubernetes/manifests/kube-apiserver.yaml` 添加 Kubernetes 审计策略文件和 Kubernetes 审计 Webhook 配置文件。 + +### 审计策略 + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: kube-apiserver + namespace: kube-system +spec: + containers: + - command: + - kube-apiserver + - --audit-policy-file=/etc/kubernetes/audit/audit-policy.yaml + - --audit-webhook-config-file=/etc/kubernetes/audit/audit-webhook.yaml + volumeMounts: + - mountPath: /etc/kubernetes/audit + name: k8s-audit + readOnly: true + volumes: + - hostPath: + path: /etc/kubernetes/audit + type: DirectoryOrCreate + name: k8s-audit +``` + +{{< notice note >}} + +该操作会重启 Kubernetes Apiserver。 + +{{}} + +`audit-policy.yaml` 文件定义了关于应记录哪些事件和应包含哪些数据的规则。您可以使用最小审计策略文件记录元数据级别的所有请求。 + +```yaml +# Log all requests at the Metadata level. +apiVersion: audit.k8s.io/v1 +kind: Policy +rules: +- level: Metadata +``` + +有关审计策略的更多信息,请参见[审计策略](https://kubernetes.io/zh/docs/tasks/debug/debug-cluster/audit/)。 + +### 审计 Webhook + +`audit-webhook.yaml` 文件定义了 Kubernetes 审计日志将要发送至的 Webhook。以下是 Kube-Auditing Webhook 的示例配置。 + +```yaml +apiVersion: v1 +kind: Config +clusters: +- name: kube-auditing + cluster: + server: https://{ip}:6443/audit/webhook/event + insecure-skip-tls-verify: true +contexts: +- context: + cluster: kube-auditing + user: "" + name: default-context +current-context: default-context +preferences: {} +users: [] +``` + +`ip` 即命名空间 `kubesphere-logging-system` 中 `kube-auditing-webhook-svc` 服务的 `CLUSTER-IP`,您可以使用以下命令来获取。 + +```bash +kubectl get svc -n kubesphere-logging-system +``` + +{{< notice note >}} + +修改这两个文件后,您需要重启 Kubernetes Apiserver 使修改生效。 + +{{}} + +使用以下命令编辑 `kube-auditing-webhook` CRD Webhook,将 `k8sAuditingEnabled` 的值改为 `true`。 + +```bash +kubectl edit webhooks.auditing.kubesphere.io kube-auditing-webhook +``` + +```yaml +spec: + auditing: + k8sAuditingEnabled: true +``` +{{< notice tip >}} + +您也可以使用拥有 `platform-admin` 角色的用户登录控制台,在**集群管理**页面转到**定制资源定义**,搜索 `Webhook`,直接编辑 `kube-auditing-webhook`。 + +{{}} + +要停止接收来自 Kubernetes 的审计日志,请移除审计 Webhook 后端的配置,然后将 `k8sAuditingEnabled` 的值修改为 `false`。 + +## 自定义审计日志 + +KubeSphere 审计日志系统提供 `kube-auditing-webhook` CRD Webhook 来自定义审计日志。下方是一个示例 YAML 文件: + +```yaml +apiVersion: auditing.kubesphere.io/v1alpha1 +kind: Webhook +metadata: + name: kube-auditing-webhook +spec: + auditLevel: RequestResponse + auditSinkPolicy: + alertingRuleSelector: + matchLabels: + type: alerting + archivingRuleSelector: + matchLabels: + type: persistence + image: kubesphere/kube-auditing-webhook:v0.1.0 + archivingPriority: DEBUG + alertingPriority: WARNING + replicas: 2 + receivers: + - name: alert + type: alertmanager + config: + service: + namespace: kubesphere-monitoring-system + name: alertmanager-main + port: 9093 +``` + + 参数 | 描述信息 | 默认值 + --- | --- | --- + `replicas` | Kube-Auditing Webhook 的副本数量。 | 2 + `archivingPriority` | 存档规则的优先级。已知的审计类型有 `DEBUG`、`INFO` 和 `WARNING`。 | `DEBUG` + `alertingPriority` | 告警规则的优先级。已知的审计类型有 `DEBUG`、`INFO` 和 `WARNING`。 | `WARNING` + `auditLevel` | 审计日志的级别。已知的级别有:
- `None`:不记录事件。
- `Metadata`:记录请求的元数据,例如请求的用户、时间戳、资源和操作行为 (Verb) 等,但不记录请求或响应的消息体。
- `Request`:记录事件的元数据和请求的消息体但不记录响应的消息体。这不适用于非资源类型的请求。
- `RequestResponse`:记录事件的元数据、请求以及响应的消息体。这不适用于非资源类型的请求。 | `Metadata` + `k8sAuditingEnabled` | 是否接收 Kubernetes 审计日志。 | `false` + `receivers` | 接收告警的接收器。 | + +{{< notice note >}} + +您可以通过修改 `audit-policy.yaml` 文件变更 Kubernetes 审计日志的级别,然后重启 Kubernetes Apiserver。 + +{{}} \ No newline at end of file diff --git a/content/zh/docs/v3.4/toolbox/auditing/auditing-rule.md b/content/zh/docs/v3.4/toolbox/auditing/auditing-rule.md new file mode 100644 index 000000000..7e9c3110d --- /dev/null +++ b/content/zh/docs/v3.4/toolbox/auditing/auditing-rule.md @@ -0,0 +1,207 @@ +--- +title: "审计规则" +keywords: "Kubernetes, docker, kubesphere, 审计" +description: "了解审计规则以及如何自定义有关处理审计日志的规则。" +linkTitle: "审计规则" +weight: 15320 +--- + +审计规则定义了处理审计日志的策略。KubeSphere 审计日志为用户提供两种 CRD 规则(`archiving-rule` 和 `alerting-rule`)以供自定义。 + +启用 [KubeSphere 审计日志](../../../pluggable-components/auditing-logs/)后,使用拥有 `platform-admin` 角色的用户登录控制台。在**集群管理**页面转到**定制资源定义**,在搜索栏中输入 `rules.auditing.kubesphere.io`。点击搜索结果 **Rule**,您便可以看到这两种 CRD 规则。 + +下方是部分规则的示例。 + +## archiving-rule + +```yaml +apiVersion: auditing.kubesphere.io/v1alpha1 +kind: Rule +metadata: + labels: + type: archiving + workspace: system-workspace + name: archiving-rule +spec: + rules: + - desc: all action not need to be audit + list: + - get + - list + - watch + name: ignore-action + type: list + - condition: Verb not in ${ignore-action} + desc: All audit event except get, list, watch event + enable: true + name: archiving + priority: DEBUG + type: rule +``` + +## alerting-rule + +```yaml +apiVersion: auditing.kubesphere.io/v1alpha1 +kind: Rule +metadata: + labels: + type: alerting + workspace: system-workspace + name: alerting-rule +spec: + rules: + - desc: all operator need to be audit + list: + - create + - delete + - update + - patch + name: action + type: list + - condition: Verb in ${action} + desc: audit the change of resource + enable: true + name: ResourceChange + priority: INFO + type: rule +``` + + 属性 | 描述信息 + --- | --- + `name` | 该规则的名称。 + `type` | 该规则的类型;已知的值有 `rule`、`macro`、`list` 和 `alias`。 + `desc` | 该规则的描述。 + `condition` | 对审计日志应用的过滤表达式,检查是否符合规则。 + `macro` | 宏的条件。 + `list` | List 的值。 + `alias` | Alias 的值。 + `enable` | 如果设置为 `false`,该规则将不会生效。 + `output` | 指定告警消息。 + `priority` | 规则的优先级。 + +如果审计日志符合 `archiving-rule` 中的规则并且该规则的优先级不低于 `archivingPriority`,则会保存该日志供后续使用。如果审计日志符合 `alerting-rule` 中的规则并且该规则的优先级低于 `alertingPriority`,则会保存该日志供后续使用;否则将生成告警并发送至用户。 + + +## 规则条件(即 Condition) + +`Condition` 是一个过滤表达式,可以使用比较运算符(=、!=、<、<=、>、>=、contains、in、like 以及正则表达式),也可以使用布尔运算符(and、or 和 not)和括号进行组合。以下是支持的过滤器。 + + 过滤器 | 描述信息 + --- | --- + `Workspace` | 发生审计事件的企业空间。 + `DevOps` | 发生审计事件的 DevOps 项目。 + `Level` | 审计日志的级别。 + `RequestURI` | RequestURI 是由客户端发送至服务器的请求 URI。 + `Verb` | 与该请求相关联的动词。 + `User.Username` | 在所有活跃用户中唯一标识该用户的名称。 + `User.Groups` | 该用户所属的组的名称。 + `SourceIPs` | 该请求来源的源 IP 和中间代理。 + `ObjectRef.Resource` | 与该请求相关联的对象的资源。 + `ObjectRef.Namespace` | 与该请求相关联的对象的命名空间。 + `ObjectRef.Name` | 与该请求相关联的对象的名称。 + `ObjectRef.Subresource` | 与该请求相关联的对象的子资源。 + `ResponseStatus.code` | 对该请求的建议 HTTP 返回码。 + `ResponseStatus.Status` | 操作状态。 + `RequestReceivedTimestamp` | 该请求到达 Apiserver 的时间。 + `StageTimestamp` | 该请求到达当前审计阶段的时间。 + + 例如,匹配命名空间 `test` 中的所有日志: + +``` +ObjectRef.Namespace = "test" +``` + + 匹配命名空间中以 `test` 开头的所有日志: + +``` +ObjectRef.Namespace like "test*" +``` + +匹配最近一小时内发生的所有日志: + +``` +RequestReceivedTimestamp >= "2020-06-12T09:23:28.359896Z" and RequestReceivedTimestamp <= "2020-06-12T10:23:28.359896Z" +``` + +## 宏(即 Macro) + +`macro` 是一种规则条件片段,可以在规则甚至其他宏中复用。宏提供了一种命名常用模式的方法,并消除了规则中的冗余。以下是一个宏的示例。 + +```yaml +apiVersion: auditing.kubesphere.io/v1alpha1 +kind: Rule +metadata: + name: alerting-rule + labels: + workspace: system-workspace + type: alerting +spec: + rules: + - name: pod + type: macro + desc: pod + macro: ObjectRef.Resource="pods" +``` + +{{< notice note >}} + +`macro` 可用在规则中或者其他宏中,例如 ${pod} 或 ${alerting-rule.pod}。这两种方法的区别在于 ${pod} 只能用在 `alerting-rule` CRD 规则中,而 ${alerting-rule.pod} 可以用在所有 CRD 规则中。该原则也适用于 List 和 Alias。 + +{{}} + +## 列表(即 List) + +`list` 是一个可以包含在规则、宏或其他 List 中的项目的集合。与规则和宏不同,List 不能被解析为过滤表达式。下面是一个 List 的示例。 + +```yaml +apiVersion: auditing.kubesphere.io/v1alpha1 +kind: Rule +metadata: + name: alerting-rule + labels: + workspace: system-workspace + type: alerting +spec: + rules: + - name: action + type: list + desc: all operator needs to be audit + list: + - create + - delete + - update + - patch +``` + +## 别名(即 Alias) + +`alias` 是一个过滤字段的简称。它可以包含在规则、宏、List 和输出字符串中。下面是一个 Alias 的示例。 + +```yaml +apiVersion: auditing.kubesphere.io/v1alpha1 +kind: Rule +metadata: + name: alerting-rule + labels: + workspace: system-workspace + type: alerting +spec: + rules: + - name: namespace + type: alias + desc: the alias of the resource namespace + alias: ObjectRef.Namespace +``` + +## 输出(即 Output) +当审计日志触发告警时,`Output` 字符串用于格式化告警消息。`Output` 字符串可以包括 List 和 Alias。下面是一个示例。 + +```yaml +Output: ${user} ${verb} a HostNetwork Pod ${name} in ${namespace}. +``` +{{< notice note >}} + +`user`、`verb`、`namespace` 和 `name` 字段都是 Alias。 + +{{}} diff --git a/content/zh/docs/v3.4/toolbox/events-query.md b/content/zh/docs/v3.4/toolbox/events-query.md new file mode 100644 index 000000000..78006db48 --- /dev/null +++ b/content/zh/docs/v3.4/toolbox/events-query.md @@ -0,0 +1,45 @@ +--- +title: "事件查询" +keywords: 'KubeSphere, Kubernetes, 事件, 查询' +description: '了解如何快速执行事件查询,追踪集群的最新事件。' +linkTitle: "事件查询" +weight: 15200 +--- + +Kubernetes 事件系统用于深入了解集群内部发生的事件,KubeSphere 在此之上添加了跨度更长的历史查询和聚合功能,并且支持租户隔离的事件查询。 + +本指南演示了如何进行多层级、细粒度的事件查询,以追踪服务组件的状态。 + +## 视频演示 + + + +## 准备工作 + +需要启用 [KubeSphere 事件系统](../../pluggable-components/events/)。 + +## 查询事件 + +1. 所有用户都可以使用事件查询功能。使用任意帐户登录控制台,在右下角的 icon 上悬停,然后在弹出菜单中选择**资源事件查询**。 + +2. 在弹出窗口中,您可以看到该帐户有权限查看的事件数量。 + + {{< notice note >}} + +- 如果您启用了[多集群功能](../../multicluster-management/),KubeSphere 支持对每个集群分别进行事件查询。您可以点击搜索栏左侧的 icon,然后选择一个目标集群。 + +- KubeSphere 默认存储最近七天的事件。 + + {{}} + +3. 您可以点击搜索栏并输入搜索条件,可以按照消息、企业空间、项目、资源类型、资源名称、原因、类别或时间范围搜索事件(例如,输入`时间范围:最近 10 分钟`来搜索最近 10 分钟的事件)。 + +4. 点击列表中的任一查询结果,可以查看该结果的原始信息,便于开发者分析和排除故障。 + + {{< notice note >}} + +事件查询界面支持每 5 秒、10 秒或 15 秒动态刷新一次。 + + {{}} diff --git a/content/zh/docs/v3.4/toolbox/log-query.md b/content/zh/docs/v3.4/toolbox/log-query.md new file mode 100644 index 000000000..801d49966 --- /dev/null +++ b/content/zh/docs/v3.4/toolbox/log-query.md @@ -0,0 +1,69 @@ +--- +title: "日志查询" +keywords: 'KubeSphere, Kubernetes, 日志, 查询' +description: '了解如何快速执行日志查询,追踪集群的最新日志。' +linkTitle: "日志查询" +weight: 15100 +--- + +应用程序和系统的日志可以帮助您更好地了解集群内部和工作负载内部发生的事情。日志对于排除故障问题和监控集群活动特别有用。KubeSphere 提供强大且易用的日志系统,从租户的角度为用户提供日志收集、查询和管理的功能。基于租户的日志系统使不同的租户只能查看自己的日志,安全性更好,相较于 Kibana 更为实用。此外,KubeSphere 日志系统会过滤掉一些冗余信息,以便用户能够只专注于对自己有用的日志。 + +本教程演示了如何使用日志查询功能,包括界面、搜索参数和详情页面。 + +## 视频演示 + + + +## 准备工作 + +您需要启用 [KubeSphere 日志系统](../../pluggable-components/logging/)。 + +## 进入日志查询界面 + +1. 所有用户都可以使用日志查询功能。使用任意帐户登录控制台,在右下角的 icon 上悬停,然后在弹出菜单中选择**日志查询**。 + +2. 在弹出窗口中,您可以看到日志数量的时间直方图、集群选择下拉列表以及日志查询栏。 + + +​ {{< notice note >}} + +- 如果您启用了[多集群功能](../../multicluster-management/),KubeSphere 支持对每个集群分别进行日志查询。您可以点击搜索栏左侧的 icon 切换目标集群。 + +- KubeSphere 默认存储最近七天内的日志。 + + {{}} + +3. 您可以点击搜索栏并输入搜索条件,可以按照消息、企业空间、项目、资源类型、资源名称、原因、类别或时间范围搜索事件(例如,输入`时间范围:最近 10 分钟`来搜索最近 10 分钟的事件)。或者,点击时间直方图中的柱状图,KubeSphere 会使用该柱状图的时间范围进行日志查询。 + + {{< notice note >}} + +- 关键字字段支持关键字组合查询。例如,您可以同时使用 `Error`、`Fail`、`Fatal`、`Exception` 和 `Warning` 来查询所有异常日志。 +- 关键字字段支持精确匹配和模糊匹配。模糊匹配不区分大小写,并且根据 ElasticSearch 分段规则,通过单词或词组的前半部分来检索完整术语。例如,您可以通过搜索关键字 `node_cpu`(而不是 `cpu`)来检索包含 `node_cpu_total` 的日志。 + +- 每个集群都有自己的日志保留期限,可单独设置,您可以在 `ClusterConfiguration` 中进行修改。有关详细信息,请参考 [KubeSphere 日志系统](../../pluggable-components/logging/)。 + + {{}} + +## 使用搜索参数 + +1. 您可以输入多个条件来缩小搜索结果。 + +2. 点击列表中的任一结果,进入它的详情页面,查看该容器组 (Pod) 的日志,包括右侧的完整内容,便于开发者分析和排除故障。 + + {{< notice note >}} + +- 日志查询界面支持每 5 秒、10 秒或 15 秒动态刷新一次。 +- 您可以点击右上角的 icon 将日志导出至本地文件进行进一步分析。 + +{{}} + +4. 在左侧面板中,您可以点击 icon 切换 Pod 并查看其在同一个项目中的容器,从而查看是否有任何异常 Pod 影响到其他 Pod。 + + +## 进入详情页面 + +在左侧面板,您可以点击 icon 查看 Pod 详情页面或容器详情页面。 + +您可以点击右上角的**终端**打开终端为容器排除故障。 diff --git a/content/zh/docs/v3.4/toolbox/metering-and-billing/_index.md b/content/zh/docs/v3.4/toolbox/metering-and-billing/_index.md new file mode 100644 index 000000000..490c49a2a --- /dev/null +++ b/content/zh/docs/v3.4/toolbox/metering-and-billing/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "计量计费" +weight: 15400 + +_build: + render: false +--- diff --git a/content/zh/docs/v3.4/toolbox/metering-and-billing/enable-billing.md b/content/zh/docs/v3.4/toolbox/metering-and-billing/enable-billing.md new file mode 100644 index 000000000..bede16d80 --- /dev/null +++ b/content/zh/docs/v3.4/toolbox/metering-and-billing/enable-billing.md @@ -0,0 +1,83 @@ +--- +title: "启用计费" +keywords: "Kubernetes, KubeSphere, ConfigMap, 计费" +description: "在 KubeSphere 中启用计费功能,查看一段时期内资源的计费数据。" +linkTitle: "启用计费" +weight: 15420 +--- + +本教程介绍如何启用 KubeSphere 的计费功能,以查看集群中不同资源的消费情况。计费功能默认不启用,因此您需要在 ConfigMap 中手动添加价格信息。 + +请按照以下步骤启用 KubeSphere 的计费功能。 + +1. 执行以下命令编辑 ConfigMap `kubesphere-config`: + + ```bash + kubectl edit cm kubesphere-config -n kubesphere-system + ``` + +2. 在该 ConfigMap 的 `metering` 下添加保留期限和价格信息。以下为示例配置: + + ```yaml + $ kubectl get cm kubesphere-config -n kubesphere-system -oyaml + ... + alerting: + prometheusEndpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090 + thanosRulerEndpoint: http://thanos-ruler-operated.kubesphere-monitoring-system.svc:10902 + thanosRuleResourceLabels: thanosruler=thanos-ruler,role=thanos-alerting-rules + ... + metering: + retentionDay: 7d + billing: + priceInfo: + currencyUnit: "USD" + cpuPerCorePerHour: 1.5 + memPerGigabytesPerHour: 5 + ingressNetworkTrafficPerMegabytesPerHour: 1 + egressNetworkTrafficPerMegabytesPerHour: 1 + pvcPerGigabytesPerHour: 2.1 + kind: ConfigMap + ... + ``` + + 相关参数描述如下: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
参数描述
retentionDayretentionDay 决定用户的资源消费统计页面显示的日期范围。该参数的值必须与 Prometheusretention 的值相同.
currencyUnit资源消费统计页面显示的货币单位。目前可用的单位有 CNY(人民币)和 USD(美元)。若指定其他货币,控制台将默认以美元为单位显示消费情况。
cpuCorePerHour每核/小时的 CPU 单价。
memPerGigabytesPerHour每 GB/小时的内存单价。
ingressNetworkTrafficPerMegabytesPerHour每 MB/小时的入站流量单价。
egressNetworkTrafficPerMegabytesPerHour每 MB/小时的出站流量单价。
pvcPerGigabytesPerHour每 GB/小时的 PVC 单价。请注意,无论实际使用的存储是多少,KubeSphere 都会根据 PVC 请求的存储容量来计算存储卷的总消费情况。
+3. 执行以下命令重启 `ks-apiserver`。 + + ```bash + kubectl rollout restart deploy ks-apiserver -n kubesphere-system + ``` + +4. 在**资源消费统计**页面,您可以看到资源的消费信息。 diff --git a/content/zh/docs/v3.4/toolbox/metering-and-billing/view-resource-consumption.md b/content/zh/docs/v3.4/toolbox/metering-and-billing/view-resource-consumption.md new file mode 100644 index 000000000..256da95b8 --- /dev/null +++ b/content/zh/docs/v3.4/toolbox/metering-and-billing/view-resource-consumption.md @@ -0,0 +1,73 @@ +--- +title: "查看资源消费" +keywords: "Kubernetes, KubeSphere, 计量, 计费, 消费" +description: "在不同层级追踪集群工作负载的资源用量。" +linkTitle: "查看资源消费" +weight: 15410 +--- + +KubeSphere 计量功能帮助您在不同层级追踪集群或企业空间中的资源消费。具有不同角色的租户只能看到其有权访问的数据。此外,您还可以为不同的资源设置价格以查看计费信息。 + +## 准备工作 + +- 所有租户都可以访问**资源消费统计**模块,但每个租户可见的信息可能有所不同,可见信息具体取决于租户在所处层级上具有的角色。请注意,计量功能并非 KubeSphere 的可插拔组件,即只要您有一个 KubeSphere 集群,就可以使用该功能。对于新创建的集群,您需要等待大约一小时才能看到计量信息。 +- 如需查看计费信息,您需要预先[启用计费](../enable-billing/)。 + +## 查看集群资源消费情况 + +**集群资源消费情况**包含集群(也包括节点)的资源使用情况,如 CPU、内存、存储等。 + +1. 使用 `admin` 用户登录 KubeSphere Web 控制台,点击右下角的 icon,然后选择**资源消费统计**。 + +2. 在**集群资源消费情况**一栏,点击**查看消费**。 + +3. 如果您已经启用[多集群管理](../../../multicluster-management/),则可以在控制面板左侧看到包含 Host 集群和全部 Member 集群的集群列表。如果您未启用该功能,那么列表中只会显示一个 `default` 集群。 + + 在右侧,有三个模块以不同的方式显示资源消费情况。 + + + + + + + + + + + + + + + + + + + + +
模块描述
资源消费统计显示自集群创建以来不同资源的消费概览。如果您在 ConfigMap kubesphere-config已经配置资源的价格,则可以看到计费信息。
消费历史显示截止到昨天的资源消费总况,您也可以自定义时间范围和时间间隔,以查看特定周期内的数据。
当前消费显示过去一小时所选目标对象的资源消费情况。
+ +4. 在左侧,点击集群名称即可查看集群节点或 Pod 的资源消费详情。 + + {{< notice note >}} + + 如需导出 CSV 格式的资源消费统计数据,请勾选左侧的复选框,然后点击 ✓。 + + {{}} + +## 查看企业空间(项目)资源消费情况 + +**企业空间(项目)资源消费情况**包含企业空间(包括项目)的资源使用情况,如 CPU、内存、存储等。 + +1. 使用 `admin` 用户登录 KubeSphere Web 控制台,点击右下角的 icon 图标,然后选择**资源消费统计**。 + +2. 在**企业空间资源消费情况**一栏,点击**查看**。 + +3. 在控制面板左侧,可以看到包含当前集群中全部企业空间的列表。右侧显示所选企业空间的消费详情,其布局与集群消费情况布局类似。 + + {{< notice note >}} + + 在多集群架构中,如果企业空间中没有分配可用集群,则无法查看企业空间的资源消费情况。有关更多信息,请参阅[集群可见性和授权](../../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/)。 + + {{}} + +4. 在左侧,点击企业空间名称即可查看其项目或工作负载(例如,部署和有状态副本集)的资源消费详情。 diff --git a/content/zh/docs/v3.4/toolbox/web-kubectl.md b/content/zh/docs/v3.4/toolbox/web-kubectl.md new file mode 100644 index 000000000..d2f5e4b14 --- /dev/null +++ b/content/zh/docs/v3.4/toolbox/web-kubectl.md @@ -0,0 +1,42 @@ +--- +title: "Web Kubectl" +keywords: 'KubeSphere, Kubernetes, kubectl, cli' +description: 'KubeSphere 中集成了 Web Kubectl 工具,为 Kubernetes 用户提供一致的用户体验。' +linkTitle: "Web Kubectl" +weight: 15500 +--- + +Kubectl 是 Kubernetes 命令行工具,您可以用它在 Kubernetes 集群上运行命令。Kubectl 可用于部署应用、查看和管理集群资源、查看日志等。 + +KubeSphere 控制台提供 Web Kubectl,方便用户使用。在默认情况下,当前版本中只有被授予 `platform-admin` 角色的用户(例如默认帐户 `admin`)才有权限使用 Web Kubectl 进行集群资源操作和管理。 + +本教程演示了如何使用 Web Kubectl 进行集群资源操作和管理。 + +## 使用 Web Kubectl + +1. 使用被授予 `platform-admin` 角色的用户登录 KubeSphere,在右下角的**工具箱**图标上悬停,然后在弹出菜单中选择 **kubectl**。 + +2. 您可以在弹出窗口中看到 Kubectl 界面,如下图所示。如果您启用了多集群功能,则需要先在右上角的下拉列表中选择目标集群。如果未启用多集群功能,则该下拉列表不可见。 + +3. 在命令行工具中输入 Kubectl 命令,查询并管理 Kubernetes 集群资源。例如,执行以下命令查询集群中所有 PVC 的状态。 + + ```bash + kubectl get pvc --all-namespaces + ``` + +4. 在终端窗口中使用以下语法运行 Kubectl 命令: + + ```bash + kubectl [command] [TYPE] [NAME] [flags] + ``` + + {{< notice note >}} + +- 其中,`command`、`TYPE`、`NAME` 和 `flags` 分别是: + - `command`:指定要对一个或多个资源执行的操作,例如 `create`、`get`、`describe` 和 `delete`。 + - `TYPE`:指定[资源类型](https://kubernetes.io/zh/docs/reference/kubectl/overview/)。资源类型不区分大小写,您可以指定单数、复数或缩写形式。 + - `NAME`:指定资源的名称。名称区分大小写。如果省略名称,则会显示所有资源的详细信息,例如 `kubectl get pods`。 + - `flags`:指定可选标志。例如,您可以使用 `-s` 或 `--server` 标志指定 Kubernetes API 服务器的地址和端口。 +- 如果您需要帮助,请在终端窗口运行 `kubectl help` 或者参考 [Kubernetes Kubectl CLI 文档](https://kubernetes.io/zh/docs/reference/kubectl/overview/)。 + + {{}} diff --git a/content/zh/docs/v3.4/upgrade/_index.md b/content/zh/docs/v3.4/upgrade/_index.md new file mode 100644 index 000000000..1c4ee4f12 --- /dev/null +++ b/content/zh/docs/v3.4/upgrade/_index.md @@ -0,0 +1,14 @@ +--- +title: "升级" +description: "升级 KubeSphere 和 Kubernetes" +layout: "second" + +linkTitle: "升级" + +weight: 7000 + +icon: "/images/docs/v3.3/docs.svg" + +--- + +本章演示集群管理员如何将 KubeSphere 升级到 3.3.1。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/upgrade/air-gapped-upgrade-with-ks-installer.md b/content/zh/docs/v3.4/upgrade/air-gapped-upgrade-with-ks-installer.md new file mode 100644 index 000000000..ed7085300 --- /dev/null +++ b/content/zh/docs/v3.4/upgrade/air-gapped-upgrade-with-ks-installer.md @@ -0,0 +1,182 @@ +--- +title: "使用 ks-installer 离线升级" +keywords: "离线环境, 升级, kubesphere, 3.3.1" +description: "使用 ks-installer 和离线包升级 KubeSphere。" +linkTitle: "使用 ks-installer 离线升级" +weight: 7500 +--- + +对于 Kubernetes 集群不是通过 [KubeKey](../../installing-on-linux/introduction/kubekey/) 部署而是由云厂商托管或自行部署的用户,推荐使用 ks-installer。本教程**只用于升级 KubeSphere**。集群运维员应负责提前升级 Kubernetes。 + + +## 准备工作 + +- 您需要有一个运行 KubeSphere v3.2.x 的集群。如果您的 KubeSphere 是 v3.1.0 或更早的版本,请先升级至 v3.2.x。 +- 请仔细阅读 [3.3.2 版本说明](../../../v3.3/release/release-v332/)。 +- 提前备份所有重要的组件。 +- Docker 仓库。您需要有一个 Harbor 或其他 Docker 仓库。有关更多信息,请参见[准备一个私有镜像仓库](../../installing-on-linux/introduction/air-gapped-installation/#步骤-2准备一个私有镜像仓库)。 +- KubeSphere 3.3 支持的 Kubernetes 版本:v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如需使用边缘节点,推荐安装 v1.21.x。 + +## 重要提示 + +KubeSphere 3.3.1 对内置角色和自定义角色的授权项做了一些调整。在您升级到 KubeSphere 3.3.1 时,请注意以下几点: + + - 内置角色调整:移除了平台级内置角色 `users-manager`(用户管理员)和 `workspace-manager`(企业空间管理员),如果已有用户绑定了 `users-manager` 或 `workspace-manager`,他们的角色将会在升级之后变更为 `platform-regular`。增加了平台级内置角色 `platform-self-provisioner`。关于平台角色的具体描述,请参见[创建用户](../../quick-start/create-workspace-and-project/#创建用户)。 + - 自定义角色授权项调整: + - 移除平台层级自定义角色授权项:用户管理,角色管理,企业空间管理。 + - 移除企业空间层级自定义角色授权项:成员管理,角色管理,用户组管理。 + - 移除命名空间层级自定义角色授权项:成员管理,角色管理。 + - 升级到 KubeSphere 3.3.1 后,自定义角色会被保留,但是其包含的已被移除的授权项会被删除。 + +## 步骤 1:准备安装镜像 + +当您在离线环境中安装 KubeSphere 时,需要事先准备一个包含所有必需镜像的镜像包。 + +1. 使用以下命令从能够访问互联网的机器上下载镜像清单文件 `images-list.txt`: + + ```bash + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/images-list.txt + ``` + + {{< notice note >}} + + 该文件根据不同的模块列出了 `##+modulename` 下的镜像。您可以按照相同的规则把自己的镜像添加到这个文件中。要查看完整文件,请参见[附录](../../installing-on-kubernetes/on-prem-kubernetes/install-ks-on-linux-airgapped/#kubesphere-v332-镜像清单)。 + + {{}} + +2. 下载 `offline-installation-tool.sh`。 + + ```bash + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/offline-installation-tool.sh + ``` + +3. 使 `.sh` 文件可执行。 + + ```bash + chmod +x offline-installation-tool.sh + ``` + +4. 您可以执行命令 `./offline-installation-tool.sh -h` 来查看如何使用脚本: + + ```bash + root@master:/home/ubuntu# ./offline-installation-tool.sh -h + Usage: + + ./offline-installation-tool.sh [-l IMAGES-LIST] [-d IMAGES-DIR] [-r PRIVATE-REGISTRY] [-v KUBERNETES-VERSION ] + + Description: + -b : save kubernetes' binaries. + -d IMAGES-DIR : the dir of files (tar.gz) which generated by `docker save`. default: ./kubesphere-images + -l IMAGES-LIST : text file with list of images. + -r PRIVATE-REGISTRY : target private registry:port. + -s : save model will be applied. Pull the images in the IMAGES-LIST and save images as a tar.gz file. + -v KUBERNETES-VERSION : download kubernetes' binaries. default: v1.17.9 + -h : usage message + ``` + +5. 在 `offline-installation-tool.sh` 中拉取镜像。 + + ```bash + ./offline-installation-tool.sh -s -l images-list.txt -d ./kubesphere-images + ``` + + {{< notice note >}} + + 您可以根据需要选择拉取的镜像。例如,如果已经有一个 Kubernetes 集群了,您可以在 `images-list.text` 中删除 `##k8s-images` 和在它下面的相关镜像。 + + {{}} + +## 步骤 2:推送镜像至您的私有仓库 + +将打包的镜像文件传输至您的本地机器,并运行以下命令把它推送至仓库。 + +```bash +./offline-installation-tool.sh -l images-list.txt -d ./kubesphere-images -r dockerhub.kubekey.local +``` + +{{< notice note >}} + +命令中的域名是 `dockerhub.kubekey.local`。请确保使用您**自己仓库的地址**。 + +{{}} + +## 步骤 3:下载 ks-installer + +与在现有 Kubernetes 集群上在线安装 KubeSphere 相似,您需要事先下载 `kubesphere-installer.yaml`。 + +1. 执行以下命令下载 ks-installer,并将其传输至您充当任务机的机器,用于安装。 + + ```bash + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml + ``` + +2. 验证您已在 `cluster-configuration.yaml` 中的 `spec.local_registry` 字段指定了私有镜像仓库地址。请注意,如果您的已有集群通过离线安装方式搭建,您应该已配置了此地址。如果您的集群采用在线安装方式搭建而需要进行离线升级,执行以下命令编辑您已有 KubeSphere 3.3 集群的 `cluster-configuration.yaml` 文件,并添加私有镜像仓库地址: + + ```bash + kubectl edit cc -n kubesphere-system + ``` + + 例如,本教程中的仓库地址是 `dockerhub.kubekey.local`,将它用作 `.spec.local_registry` 的值,如下所示: + + ```yaml + spec: + persistence: + storageClass: "" + authentication: + jwtSecret: "" + local_registry: dockerhub.kubekey.local # Add this line manually; make sure you use your own registry address. + ``` + +3. 编辑完成后保存 `cluster-configuration.yaml`。使用以下命令将 `ks-installer` 替换为您**自己仓库的地址**。 + + ```bash + sed -i "s#^\s*image: kubesphere.*/ks-installer:.*# image: dockerhub.kubekey.local/kubesphere/ks-installer:v3.3.2#" kubesphere-installer.yaml + ``` + + {{< notice warning >}} + + 命令中的仓库地址是 `dockerhub.kubekey.local`。请确保使用您自己仓库的地址。 + + {{}} + +## 步骤 4:升级 KubeSphere + +确保上述所有步骤都已完成后,执行以下命令。 + +```bash +kubectl apply -f kubesphere-installer.yaml +``` + +## 步骤 5:验证安装 + +安装完成后,您会看到以下内容: + +```bash +##################################################### +### Welcome to KubeSphere! ### +##################################################### + +Console: http://192.168.0.2:30880 +Account: admin +Password: P@88w0rd + +NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. + +##################################################### +https://kubesphere.io 20xx-xx-xx xx:xx:xx +##################################################### +``` + +现在,您可以通过 `http://{IP}:30880` 使用默认帐户和密码 `admin/P@88w0rd` 访问 KubeSphere 的 Web 控制台。 + +{{< notice note >}} + +要访问控制台,请确保在您的安全组中打开端口 30880。 + +{{}} diff --git a/content/zh/docs/v3.4/upgrade/air-gapped-upgrade-with-kubekey.md b/content/zh/docs/v3.4/upgrade/air-gapped-upgrade-with-kubekey.md new file mode 100644 index 000000000..bb0b52e6a --- /dev/null +++ b/content/zh/docs/v3.4/upgrade/air-gapped-upgrade-with-kubekey.md @@ -0,0 +1,352 @@ +--- +title: "使用 KubeKey 离线升级" +keywords: "离线环境, kubernetes, 升级, kubesphere, 3.3" +description: "使用离线包升级 Kubernetes 和 KubeSphere。" +linkTitle: "使用 KubeKey 离线升级" +weight: 7400 +--- +对于 KubeSphere 和 Kubernetes 都是通过 [KubeKey](../../installing-on-linux/introduction/kubekey/) 部署的用户,推荐使用 KubeKey 离线升级。如果您的 Kubernetes 集群由云厂商托管或自行配置,请参考[使用 ks-installer 离线升级](../air-gapped-upgrade-with-ks-installer/)。 + +## 准备工作 + +- 您需要有一个运行 KubeSphere v3.2.x 的集群。如果您的 KubeSphere 是 v3.1.0 或更早的版本,请先升级至 v3.2.x。 +- 您的 Kubernetes 版本必须为 1.20.x、1.21.x、1.22.x,1.23.x 或 1.24.x。 +- 请仔细阅读 [3.3.2 版本说明](../../../v3.3/release/release-v332/)。 +- 提前备份所有重要的组件。 +- Docker 仓库。您需要有一个 Harbor 或其他 Docker 仓库。 +- 请确保每个节点都可以从该 Docker 仓库拉取镜像或向其推送镜像。 + +## 重要提示 + +KubeSphere 3.3.1 对内置角色和自定义角色的授权项做了一些调整。在您升级到 KubeSphere 3.3.1 时,请注意以下几点: + + - 内置角色调整:移除了平台级内置角色 `users-manager`(用户管理员)和 `workspace-manager`(企业空间管理员),如果已有用户绑定了 `users-manager` 或 `workspace-manager`,他们的角色将会在升级之后变更为 `platform-regular`。增加了平台级内置角色 `platform-self-provisioner`。关于平台角色的具体描述,请参见[创建用户](../../quick-start/create-workspace-and-project/#创建用户)。 + + - 自定义角色授权项调整: + - 移除平台层级自定义角色授权项:用户管理,角色管理,企业空间管理。 + - 移除企业空间层级自定义角色授权项:成员管理,角色管理,用户组管理。 + - 移除命名空间层级自定义角色授权项:成员管理,角色管理。 + - 升级到 KubeSphere 3.3.1 后,自定义角色会被保留,但是其包含的已被移除的授权项会被删除。 + +## 升级 KubeSphere 和 Kubernetes + +单节点集群 (All-in-One) 和多节点集群的升级步骤不同。 + +{{< notice info >}} + +当升级 Kubernetes 时,KubeKey 将从一个小版本升级到下一个小版本,直到目标版本。例如,您会发现升级过程是从 1.16 先升级到 1.17 然后再升级到 1.18,而不是直接从 1.16 升级到 1.18。 + +{{}} + + +### 系统要求 + +| 系统 | 最低要求(每个节点) | +| ------------------------------------------------------------ | -------------------------------- | +| **Ubuntu** *16.04, 18.04,20.04* | CPU:2 核,内存:4 G,硬盘:40 G | +| **Debian** *Buster, Stretch* | CPU:2 核,内存:4 G,硬盘:40 G | +| **CentOS** *7.x* | CPU:2 核,内存:4 G,硬盘:40 G | +| **Red Hat Enterprise Linux** *7* | CPU:2 核,内存:4 G,硬盘:40 G | +| **SUSE Linux Enterprise Server** *15* **/openSUSE Leap** *15.2* | CPU:2 核,内存:4 G,硬盘:40 G | + +{{< notice note >}} + +[KubeKey](https://github.com/kubesphere/kubekey) 使用 `/var/lib/docker` 作为默认路径来存储所有 Docker 相关文件(包括镜像)。建议您添加附加存储卷,分别给 `/var/lib/docker` 和 `/mnt/registry` 挂载至少 **100G**。请参见 [fdisk](https://www.computerhope.com/unix/fdisk.htm) 的参考命令。 + +{{}} + + +### 步骤 1:下载 KubeKey + +1. 执行以下命令下载 KubeKey 并解压: + + {{< tabs >}} + + {{< tab "如果您能正常访问 GitHub/Googleapis" >}} + + 从 [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) 下载 KubeKey 或者直接运行以下命令。 + + ```bash + curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - + ``` + + {{}} + + {{< tab "如果您访问 GitHub/Googleapis 受限" >}} + + 首先运行以下命令,以确保您从正确的区域下载 KubeKey。 + + ```bash + export KKZONE=cn + ``` + + 运行以下命令来下载 KubeKey: + + ```bash + curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - + ``` + {{}} + + {{}} + +2. 解压文件后,执行以下命令,使 `kk` 可执行: + + ```bash + chmod +x kk + ``` + +### 步骤 2:准备安装镜像 + +当您在 Linux 上安装 KubeSphere 和 Kubernetes 时,需要准备一个包含所有必需镜像的镜像包,并事先下载 Kubernetes 二进制文件。 + +1. 使用以下命令从能够访问互联网的机器上下载镜像清单文件 `images-list.txt`: + + ```bash + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/images-list.txt + ``` + + {{< notice note >}} + + 该文件根据不同的模块列出了 `##+modulename` 下的镜像。您可以按照相同的规则把自己的镜像添加到这个文件中。 + + {{}} + +2. 下载 `offline-installation-tool.sh`。 + + ```bash + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/offline-installation-tool.sh + ``` + +3. 使 `.sh` 文件可执行。 + + ```bash + chmod +x offline-installation-tool.sh + ``` + +4. 您可以执行命令 `./offline-installation-tool.sh -h` 来查看如何使用脚本: + + ```bash + root@master:/home/ubuntu# ./offline-installation-tool.sh -h + Usage: + + ./offline-installation-tool.sh [-l IMAGES-LIST] [-d IMAGES-DIR] [-r PRIVATE-REGISTRY] [-v KUBERNETES-VERSION ] + + Description: + -b : save kubernetes' binaries. + -d IMAGES-DIR : the dir of files (tar.gz) which generated by `docker save`. default: /home/ubuntu/kubesphere-images + -l IMAGES-LIST : text file with list of images. + -r PRIVATE-REGISTRY : target private registry:port. + -s : save model will be applied. Pull the images in the IMAGES-LIST and save images as a tar.gz file. + -v KUBERNETES-VERSION : download kubernetes' binaries. default: v1.17.9 + -h : usage message + ``` + +5. 下载 Kubernetes 二进制文件。 + + ```bash + ./offline-installation-tool.sh -b -v v1.22.12 + ``` + + 如果您无法访问 Google 的对象存储服务,请运行以下命令添加环境变量以变更来源。 + + ```bash + export KKZONE=cn;./offline-installation-tool.sh -b -v v1.22.12 + ``` + + {{< notice note >}} + + - 您可以根据自己的需求变更下载的 Kubernetes 版本。安装 KubeSphere 3.3 的建议 Kubernetes 版本:v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如需使用边缘节点,推荐安装 v1.21.x。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.23.10。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../installing-on-linux/introduction/kubekey/#支持矩阵)。 + + - 运行脚本后,会自动创建一个文件夹 `kubekey`。请注意,您稍后创建集群时,该文件和 `kk` 必须放在同一个目录下。 + + {{}} + +6. 在 `offline-installation-tool.sh` 中拉取镜像。 + + ```bash + ./offline-installation-tool.sh -s -l images-list.txt -d ./kubesphere-images + ``` + + {{< notice note >}} + + 您可以根据需要选择拉取的镜像。例如,如果已有一个 Kubernetes 集群,您可以在 `images-list.text` 中删除 `##k8s-images` 和在它下面的相关镜像。 + + {{}} + +### 步骤 3:推送镜像至私有仓库 + +将打包的镜像文件传输至您的本地机器,并运行以下命令把它推送至仓库。 + +```bash +./offline-installation-tool.sh -l images-list.txt -d ./kubesphere-images -r dockerhub.kubekey.local +``` + + {{< notice note >}} + + 命令中的域名是 `dockerhub.kubekey.local`。请确保使用您**自己仓库的地址**。 + + {{}} + +### 离线升级 All-in-One 集群 + +#### 示例机器 +| 主机名称 | IP | 角色 | 端口 | URL | +| -------- | ----------- | -------------------- | ---- | ----------------------- | +| master | 192.168.1.1 | Docker 仓库 | 5000 | http://192.168.1.1:5000 | +| master | 192.168.1.1 | master、etcd、worker | | | + +#### 版本 + +| | Kubernetes | KubeSphere | +| ------ | ---------- | ---------- | +| 升级前 | v1.18.6 | v3.2.x | +| 升级后 | v1.22.12 | 3.3 | + +#### 升级集群 + +本示例中,KubeSphere 安装在单个节点上,您需要指定一个配置文件以添加主机信息。此外,离线安装时,请务必将 `.spec.registry.privateRegistry` 设置为**您自己的仓库地址**。有关更多信息,请参见下面的内容。 + +#### 创建示例配置文件 + +执行以下命令生成示例配置文件用于安装: + +```bash +./kk create config [--with-kubernetes version] [--with-kubesphere version] [(-f | --file) path] +``` + +例如: + +```bash +./kk create config --with-kubernetes v1.22.12 --with-kubesphere v3.3.2 -f config-sample.yaml +``` + +{{< notice note >}} + +请确保 Kubernetes 版本和您下载的版本一致。 + +{{}} + +#### 编辑配置文件 + +编辑该配置文件 `config-sample.yaml`。请查看[供您参考的示例](https://github.com/kubesphere/kubekey/blob/release-2.2/docs/config-example.md)。 + + {{< notice warning >}} + +离线安装时,您必须指定 `privateRegistry`,在本示例中是 `dockerhub.kubekey.local`。 + + {{}} + +设置 `config-sample.yaml` 文件中的 `hosts`: + +```yaml + hosts: + - {name: ks.master, address: 192.168.1.1, internalAddress: 192.168.1.1, user: root, password: Qcloud@123} + roleGroups: + etcd: + - ks.master + control-plane: + - ks.master + worker: + - ks.master +``` + +设置 `config-sample.yaml` 文件中的 `privateRegistry`: +```yaml + registry: + registryMirrors: [] + insecureRegistries: [] + privateRegistry: dockerhub.kubekey.local +``` + +#### 将单节点集群升级至 KubeSphere 3.3 和 Kubernetes v1.22.12 + +```bash +./kk upgrade -f config-sample.yaml +``` + +要将 Kubernetes 升级至特定版本,可以在 `--with-kubernetes` 标志后明确指定版本号。以下是可用版本:v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如需使用边缘节点,推荐安装 v1.21.x。 + +### 离线升级多节点集群 + +#### 示例机器 +| 主机名称 | IP | 角色 | 端口 | URL | +| -------- | ----------- | ------------ | ---- | ----------------------- | +| master | 192.168.1.1 | Docker 仓库 | 5000 | http://192.168.1.1:5000 | +| master | 192.168.1.1 | master、etcd | | | +| slave1 | 192.168.1.2 | worker | | | +| slave1 | 192.168.1.3 | worker | | | + + +#### 版本 + +| | Kubernetes | KubeSphere | +| ------ | ---------- | ---------- | +| 升级前 | v1.18.6 | v3.2.x | +| 升级后 | v1.22.12 | 3.3.x | + +#### 升级集群 + +本示例中,KubeSphere 安装在多个节点上,因此您需要指定一个配置文件以添加主机信息。此外,离线安装时,请务必将 `.spec.registry.privateRegistry` 设置为**您自己的仓库地址**。有关更多信息,请参见下面的内容。 + +#### 创建示例配置文件 + +执行以下命令生成示例配置文件用于安装: + +```bash +./kk create config [--with-kubernetes version] [--with-kubesphere version] [(-f | --file) path] +``` + +例如: + +```bash +./kk create config --with-kubernetes v1.22.12 --with-kubesphere v3.3.2 -f config-sample.yaml +``` + +{{< notice note >}} + +请确保 Kubernetes 版本和您下载的版本一致。 + +{{}} + +#### 编辑配置文件 + +编辑该配置文件 `config-sample.yaml`。请查看[供您参考的示例](https://github.com/kubesphere/kubekey/blob/release-2.2/docs/config-example.md)。 + + {{< notice warning >}} + +离线安装时,您必须指定 `privateRegistry`,在本示例中是 `dockerhub.kubekey.local`。 + + {{}} + +设置 `config-sample.yaml` 文件中的 `hosts`: + +```yaml + hosts: + - {name: ks.master, address: 192.168.1.1, internalAddress: 192.168.1.1, user: root, password: Qcloud@123} + - {name: ks.slave1, address: 192.168.1.2, internalAddress: 192.168.1.2, user: root, privateKeyPath: "/root/.ssh/kp-qingcloud"} + - {name: ks.slave2, address: 192.168.1.3, internalAddress: 192.168.1.3, user: root, privateKeyPath: "/root/.ssh/kp-qingcloud"} + roleGroups: + etcd: + - ks.master + control-plane: + - ks.master + worker: + - ks.slave1 + - ks.slave2 +``` +设置 `config-sample.yaml` 文件中的 `privateRegistry`: +```yaml + registry: + registryMirrors: [] + insecureRegistries: [] + privateRegistry: dockerhub.kubekey.local +``` + +#### 将多节点集群升级至 KubeSphere 3.3 和 Kubernetes v1.22.12 + +```bash +./kk upgrade -f config-sample.yaml +``` + +要将 Kubernetes 升级至特定版本,可以在 `--with-kubernetes` 标志后明确指定版本号。以下是可用版本:v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如需使用边缘节点,推荐安装 v1.21.x。 + diff --git a/content/zh/docs/v3.4/upgrade/overview.md b/content/zh/docs/v3.4/upgrade/overview.md new file mode 100644 index 000000000..2ad1cb7c8 --- /dev/null +++ b/content/zh/docs/v3.4/upgrade/overview.md @@ -0,0 +1,31 @@ +--- +title: "概述" +keywords: "Kubernetes, 升级, KubeSphere, 3.3, 升级" +description: "了解升级之前需要注意的事项,例如版本和升级工具。" +linkTitle: "概述" +weight: 7100 +--- + +## 确定您的升级方案 + +KubeSphere 3.3 与 Kubernetes 1.19.x、1.20.x、1.21.x、* 1.22.x、* 1.23.x、* 1.24.x 兼容: + +- 在您升级集群至 KubeSphere 3.3 之前,您的 KubeSphere 集群版本必须为 v3.2.x。 + +- 您可选择只将 KubeSphere 升级到 3.3 或者同时升级 Kubernetes(到更高版本)和 KubeSphere(到 3.3)。 + +- 带星号的 Kubernetes 版本可能出现边缘节点部分功能不可用的情况。因此,如需使用边缘节点,推荐安装 Kubernetes v1.21.x 及之前的版本。 + +## 升级前 + +{{< notice warning >}} + +- 您应该先在测试环境中实施升级模拟。在测试环境中成功升级并且所有应用程序都正常运行之后,再在生产环境中升级您的集群。 +- 在升级过程中,应用程序可能会短暂中断(尤其是单副本容器组),请安排合理的升级时间。 +- 建议在生产环境中升级之前备份 etcd 和有状态应用程序。您可以使用 [Velero](https://velero.io/) 来备份和迁移 Kubernetes 资源以及持久化存储卷。 + +{{}} + +## 升级工具 + +根据您已有集群的搭建方式,您可以使用 KubeKey 或 ks-installer 升级集群。如果您的集群由 KubeKey 搭建,[建议您使用 KubeKey 升级集群](../upgrade-with-kubekey/)。如果您通过其他方式搭建集群,[请使用 ks-installer 升级集群](../upgrade-with-ks-installer/)。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/upgrade/upgrade-with-ks-installer.md b/content/zh/docs/v3.4/upgrade/upgrade-with-ks-installer.md new file mode 100644 index 000000000..748e97f28 --- /dev/null +++ b/content/zh/docs/v3.4/upgrade/upgrade-with-ks-installer.md @@ -0,0 +1,40 @@ +--- +title: "使用 ks-installer 升级" +keywords: "kubernetes, 升级, kubesphere, 3.3" +description: "使用 ks-installer 升级 KubeSphere。" +linkTitle: "使用 ks-installer 升级" +weight: 7300 +--- + +对于 Kubernetes 集群不是通过 [KubeKey](../../installing-on-linux/introduction/kubekey/) 部署而是由云厂商托管或自行搭建的用户,推荐使用 ks-installer 升级。本教程**仅用于升级 KubeSphere**。集群运维员应负责提前升级 Kubernetes。 + +## 准备工作 + +- 您需要有一个运行 KubeSphere v3.2.x 的集群。如果您的 KubeSphere 是 v3.1.0 或更早的版本,请先升级至 v3.2.x。 +- 请仔细阅读 [3.3.2 版本说明](../../../v3.3/release/release-v332/)。 +- 提前备份所有重要的组件。 +- KubeSphere 3.3 支持的 Kubernetes 版本:v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如需使用边缘节点,推荐安装 v1.21.x。 + +## 重要提示 + +KubeSphere 3.3.1 对内置角色和自定义角色的授权项做了一些调整。在您升级到 KubeSphere 3.3.1 时,请注意以下几点: + + - 内置角色调整:移除了平台级内置角色 `users-manager`(用户管理员)和 `workspace-manager`(企业空间管理员),如果已有用户绑定了 `users-manager` 或 `workspace-manager`,他们的角色将会在升级之后变更为 `platform-regular`。增加了平台级内置角色 `platform-self-provisioner`。关于平台角色的具体描述,请参见[创建用户](../../quick-start/create-workspace-and-project/#创建用户)。 + + - 自定义角色授权项调整: + - 移除平台层级自定义角色授权项:用户管理,角色管理,企业空间管理。 + - 移除企业空间层级自定义角色授权项:成员管理,角色管理,用户组管理。 + - 移除命名空间层级自定义角色授权项:成员管理,角色管理。 + - 升级到 KubeSphere 3.3.1 后,自定义角色会被保留,但是其包含的已被移除的授权项会被删除。 + +## 应用 ks-installer + +运行以下命令升级集群: + +```bash +kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml --force +``` + +## 启用可插拔组件 + +您可以在升级后启用 KubeSphere 3.3 的[可插拔组件](../../pluggable-components/overview/)以体验该容器平台的更多功能。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/upgrade/upgrade-with-kubekey.md b/content/zh/docs/v3.4/upgrade/upgrade-with-kubekey.md new file mode 100644 index 000000000..08bdb9bdd --- /dev/null +++ b/content/zh/docs/v3.4/upgrade/upgrade-with-kubekey.md @@ -0,0 +1,149 @@ +--- +title: "使用 KubeKey 升级" +keywords: "Kubernetes, 升级, KubeSphere, 3.3, KubeKey" +description: "使用 KubeKey 升级 Kubernetes 和 KubeSphere。" +linkTitle: "使用 KubeKey 升级" +weight: 7200 +--- + +对于 KubeSphere 和 Kubernetes 都由 [KubeKey](../../installing-on-linux/introduction/kubekey/) 部署的用户,推荐使用 KubeKey 升级。如果您的 Kubernetes 集群由云厂商托管或自行配置,请参考[使用 ks-installer 升级](../upgrade-with-ks-installer/)。 + +本教程演示如何使用 KubeKey 升级集群。 + + +## 准备工作 + +- 您需要有一个运行 KubeSphere v3.2.x 的集群。如果您的 KubeSphere 是 v3.1.0 或更早的版本,请先升级至 v3.2.x。 +- 请仔细阅读 [3.3.2 版本说明](../../../v3.3/release/release-v332/)。 +- 提前备份所有重要的组件。 +- 确定您的升级方案。本文档中提供 [All-in-One 集群](#all-in-one-集群)和[多节点集群](#多节点集群)的两种升级场景。 + +## 重要提示 + +KubeSphere 3.3.1 对内置角色和自定义角色的授权项做了一些调整。在您升级到 KubeSphere 3.3.1 时,请注意以下几点: + + - 内置角色调整:移除了平台级内置角色 `users-manager`(用户管理员)和 `workspace-manager`(企业空间管理员),如果已有用户绑定了 `users-manager` 或 `workspace-manager`,他们的角色将会在升级之后变更为 `platform-regular`。增加了平台级内置角色 `platform-self-provisioner`。关于平台角色的具体描述,请参见[创建用户](../../quick-start/create-workspace-and-project/#创建用户)。 + + - 自定义角色授权项调整: + - 移除平台层级自定义角色授权项:用户管理,角色管理,企业空间管理。 + - 移除企业空间层级自定义角色授权项:成员管理,角色管理,用户组管理。 + - 移除命名空间层级自定义角色授权项:成员管理,角色管理。 + - 升级到 KubeSphere 3.3.1 后,自定义角色会被保留,但是其包含的已被移除的授权项会被删除。 + +## 下载 KubeKey + +升级集群前执行以下命令下载 KubeKey。 + +{{< tabs >}} + +{{< tab "如果您能正常访问 GitHub/Googleapis" >}} + +从 [GitHub 发布页面](https://github.com/kubesphere/kubekey/releases)下载 KubeKey 或直接使用以下命令。 + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{}} + +{{< tab "如果您访问 GitHub/Googleapis 受限" >}} + +先执行以下命令以确保您从正确的区域下载 KubeKey。 + +```bash +export KKZONE=cn +``` + +执行以下命令下载 KubeKey。 + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh - +``` + +{{< notice note >}} + +下载 KubeKey 后,如果您将其传至新的机器,且访问 Googleapis 同样受限,请您在执行以下步骤之前务必再次执行 `export KKZONE=cn` 命令。 + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +执行以上命令会下载最新版 KubeKey,您可以修改命令中的版本号以下载指定版本。 + +{{}} + +为 `kk` 添加可执行权限: + +```bash +chmod +x kk +``` + +## 升级 KubeSphere 和 Kubernetes + +单节点集群 (All-in-One) 和多节点集群的升级步骤不同。 + +{{< notice info >}} + +当升级 Kubernetes 时,KubeKey 将从一个小版本升级到下一个小版本,直到目标版本。例如,您会发现升级过程先从 1.16 先升级到 1.17 然后再升级到 1.18,而不是直接从 1.16 升级到 1.18。 +{{}} + +### All-in-One 集群 + +运行以下命令使用 KubeKey 将您的单节点集群升级至 KubeSphere 3.3 和 Kubernetes v1.22.12: + +```bash +./kk upgrade --with-kubernetes v1.22.12 --with-kubesphere v3.3.2 +``` + +要将 Kubernetes 升级至特定版本,请在 `--with-kubernetes` 标志后明确指定版本号。以下是可用版本:v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如需使用边缘节点,推荐安装 v1.21.x。 + +### 多节点集群 + +#### 步骤 1:使用 KubeKey 生成配置文件 + +运行以下命令会基于您的集群创建一个 `sample.yaml` 配置文件。 + +```bash +./kk create config --from-cluster +``` + +{{< notice note >}} + +假设您的 kubeconfig 位于 `~/.kube/config`。您可以通过 `--kubeconfig` 标志进行修改。 + +{{}} + +#### 步骤 2:修改配置文件模板 + +根据您的集群配置修改 `sample.yaml` 文件,请确保正确修改以下字段。 + +- `hosts`:您主机的基本信息(主机名和 IP 地址)以及使用 SSH 连接至主机的信息。 +- `roleGroups.etcd`:etcd 节点。 +- `controlPlaneEndpoint`:负载均衡器地址(可选)。 +- `registry`:镜像服务信息(可选)。 + +{{< notice note >}} + +有关更多信息,请参见[编辑配置文件](../../installing-on-linux/introduction/multioverview/#2-编辑配置文件),或参考[完整配置文件](https://github.com/kubesphere/kubekey/blob/release-2.2/docs/config-example.md)中的 `Cluster` 部分获取更多信息。 + +{{}} + +#### 步骤 3:升级集群 + +运行以下命令,将您的集群升级至 KubeSphere 3.3 和 Kubernetes v1.22.12: + +```bash +./kk upgrade --with-kubernetes v1.22.12 --with-kubesphere v3.3.2 -f sample.yaml +``` + +要将 Kubernetes 升级至特定版本,请在 `--with-kubernetes` 标志后明确指定版本号。以下是可用版本:v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如需使用边缘节点,推荐安装 v1.21.x。 + +{{< notice note >}} + +若要使用 KubeSphere 3.3 的新功能,您需要在升级后启用对应的可插拔组件。 + +{{}} \ No newline at end of file diff --git a/content/zh/docs/v3.4/upgrade/what-changed.md b/content/zh/docs/v3.4/upgrade/what-changed.md new file mode 100644 index 000000000..2842515ac --- /dev/null +++ b/content/zh/docs/v3.4/upgrade/what-changed.md @@ -0,0 +1,12 @@ +--- +title: "升级后的变更" +keywords: "Kubernetes, 升级, KubeSphere, 3.3" +description: "了解升级后的变更。" + +linkTitle: "升级后的变更" +weight: 7600 +--- + +本文说明先前版本现有设置在升级后的变更。如果您想了解 KubeSphere 3.3 的所有新功能和优化,请直接参阅 [3.3.0 版本说明](../../../v3.3/release/release-v330/)和 [3.3.1 版本说明](../../../v3.3/release/release-v331/)和[3.3.2 版本说明](../../../v3.3/release/release-v332/)。 + + diff --git a/content/zh/docs/v3.4/workspace-administration/_index.md b/content/zh/docs/v3.4/workspace-administration/_index.md new file mode 100644 index 000000000..744cad23a --- /dev/null +++ b/content/zh/docs/v3.4/workspace-administration/_index.md @@ -0,0 +1,17 @@ +--- +title: "企业空间管理和用户指南" +description: "本章帮助您更好地管理 KubeSphere 企业空间。" +layout: "second" + +linkTitle: "企业空间管理和用户指南" + +weight: 9000 + +icon: "/images/docs/v3.3/docs.svg" + +--- + +KubeSphere 租户在企业空间中进行操作,管理项目和应用,而企业空间管理员负责管理应用仓库。拥有必要权限的租户可以进一步从应用仓库中部署和使用应用模板。他们也可以使用上传并发布至应用商店的单个应用模板。此外,管理员还控制一个企业空间的网络是否与其他企业空间相互隔离。 + +本章演示企业空间管理员和租户如何在企业空间层级进行操作。 + diff --git a/content/zh/docs/v3.4/workspace-administration/app-repository/_index.md b/content/zh/docs/v3.4/workspace-administration/app-repository/_index.md new file mode 100644 index 000000000..a9388d00b --- /dev/null +++ b/content/zh/docs/v3.4/workspace-administration/app-repository/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: "应用仓库" +weight: 9300 + +_build: + render: false +--- diff --git a/content/zh/docs/v3.4/workspace-administration/app-repository/import-helm-repository.md b/content/zh/docs/v3.4/workspace-administration/app-repository/import-helm-repository.md new file mode 100644 index 000000000..026684550 --- /dev/null +++ b/content/zh/docs/v3.4/workspace-administration/app-repository/import-helm-repository.md @@ -0,0 +1,54 @@ +--- +title: "导入 Helm 仓库" +keywords: "Kubernetes, Helm, KubeSphere, 应用程序" +description: "导入 Helm 仓库至 KubeSphere,为企业空间中的租户提供应用模板。" + +linkTitle: "导入 Helm 仓库" +weight: 9310 +--- + +KubeSphere 构建的应用仓库可以让用户使用基于 Helm Chart 的 Kubernetes 应用程序。KubeSphere 基于[OpenPitrix](https://github.com/openpitrix/openpitrix) 提供应用仓库服务,OpenPitrix 是由青云QingCloud 发起的开源跨云应用程序管理平台,支持基于 Helm Chart 类型的 Kubernetes 应用程序。在应用仓库中,每个应用程序都是基础软件包存储库。您需要事先创建应用仓库,才能从中部署和管理应用。 + +为了创建仓库,您可以使用 HTTP 或 HTTPS 服务器或者对象存储解决方案来存储文件包。具体地说,应用仓库依靠独立于 OpenPitrix 的外部存储,例如 [MinIO](https://min.io/) 对象存储、[QingStor 对象存储](https://github.com/qingstor)以及 [AWS 对象存储](https://aws.amazon.com/cn/what-is-cloud-object-storage/)。这些对象存储服务用于存储开发者创建的配置包和索引文件。注册仓库后,配置包就会自动被索引为可部署的应用程序。 + +本教程演示了如何向 KubeSphere 中添加应用仓库。 + +## 准备工作 + +- 您需要启用 [KubeSphere 应用商店 (OpenPitrix)](../../../pluggable-components/app-store/)。 +- 您需要准备一个应用仓库。请参考 [Helm 官方文档](https://v2.helm.sh/docs/developing_charts/#the-chart-repository-guide)创建仓库,或者[上传自己的应用至 KubeSphere 公共仓库](../../../workspace-administration/app-repository/upload-app-to-public-repository/)。此外,也可以使用下方步骤中的示例仓库,这里仅用作演示。 +- 您需要创建一个企业空间和一个用户 (`ws-admin`)。该用户必须在企业空间中被授予 `workspace-admin` 角色。有关更多信息,请参考[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 + +## 添加应用仓库 + +1. 以 `ws-admin` 身份登录 KubeSphere Web 控制台。在企业空间页面,转到**应用管理**下的**应用仓库**,然后点击**添加**。 + +2. 在弹出的对话框中,输入应用仓库名称并添加仓库 URL。例如,输入 `https://charts.kubesphere.io/main`。 + + - **名称**:为仓库设置一个简洁明了的名称,方便用户识别。 + - **URL**:遵循 RFC 3986 规范并支持以下三种协议: + + - S3:S3 格式的 URL,例如 `s3..amazonaws.com`,用于访问使用 S3 接口的 Amazon S3 服务。如果您选择此类型,则需要提供 Access Key ID 和 Secret Access Key。 + + - HTTP:例如 `http://docs-repo.gd2.qingstor.com`。示例中包含一个样例应用 NGINX,创建仓库后会自动导入。您可以用应用模板来部署它。 + + - HTTPS:例如 `https://docs-repo.gd2.qingstor.com`。 + + {{< notice note >}} + +如果您想要对 HTTP/HTTPS 进行基本访问验证,可以使用类似此格式的 URL:`http://username:password@docs-repo.gd2.qingstor.com`。 + {{}} + + - **同步间隔**:同步远端应用仓库的周期。 + - **描述**:简单介绍应用仓库的主要特性。 + +3. 输入必需的字段后,点击**验证**以验证 URL。如果 URL 可用,您会在它旁边看到一个绿色的对号,点击**确定**完成操作。 + + {{< notice note >}} + +- 在本地私有云环境中,您可以基于 [ChartMuseum](https://chartmuseum.com/) 构建自己的仓库。然后,您可以开发和上传应用程序至该仓库,再根据您的需求将这些应用程序部署至 KubeSphere。 +- 如果您需要设置 HTTP 基本访问验证,请参考[此文件](https://github.com/helm/chartmuseum#basic-auth)。 + +{{}} + +4. 导入完成后,仓库会列在下方的仓库列表中,并且 KubeSphere 会自动加载该仓库中的所有应用,并添加为应用模板。当用户使用应用模板来部署应用时,可以在该仓库中查看这些应用。有关更多信息,请参见[用应用模板部署应用](../../../project-user-guide/application/deploy-app-from-template/)。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/workspace-administration/app-repository/upload-app-to-public-repository.md b/content/zh/docs/v3.4/workspace-administration/app-repository/upload-app-to-public-repository.md new file mode 100644 index 000000000..2effb6c62 --- /dev/null +++ b/content/zh/docs/v3.4/workspace-administration/app-repository/upload-app-to-public-repository.md @@ -0,0 +1,44 @@ +--- +title: "上传应用至 KubeSphere 的 GitHub 仓库" +keywords: "Kubernetes, Helm, KubeSphere, 应用程序" +description: "上传您自己的应用至 KubeSphere 的 GitHub 仓库。" +linkTitle: "上传应用至 KubeSphere 的 GitHub 仓库" +weight: 9320 +--- + +KubeSphere 提供一个可供测试和开发的应用仓库,用户可以上传应用至该仓库,应用审核通过后即可作为应用模板使用。 + +## 上传应用 + +首先请根据 [Helm 文档](https://helm.sh/docs/topics/charts/)构建您的应用,您可以参考该 KubeSphere 应用仓库中的现有应用。官方应用存储在 [src/main](https://github.com/kubesphere/helm-charts/tree/master/src/main) 路径下,测试应用存储在 [src/test](https://github.com/kubesphere/helm-charts/tree/master/src/test) 路径下。 + +### 步骤 1:开发应用 + +1. [Fork KubeSphere 的应用仓库](https://github.com/kubesphere/helm-charts/fork)。 + +2. 根据 [Helm 文档安装](https://helm.sh/docs/intro/install/) Helm。 + +3. 执行以下命令初始化 Helm 客户端: + + ```bash + helm init --client-only + ``` + +4. 创建您的应用。例如,在 `src/test` 目录下创建名为 `mychart` 的应用。 + + ```bash + cd src/test + helm create mychart + cd mychart + ``` + +5. 您会看到 Helm 在该目录中生成了相关的模板文件。有关更多信息,请参见[创建应用](../../../application-store/app-developer-guide/helm-developer-guide/#创建应用)。 + +### 步骤 2:提交应用 + +开发完成后,请向 [KubeSphere 官方仓库](https://github.com/kubesphere/helm-charts)提交 PR 以审核您的应用。 + +### 步骤 3:部署应用 + +PR 审核通过后,您的应用即可使用。有关更多信息,请参考[导入 Helm 仓库](../import-helm-repository/)将 `https://charts.kubesphere.io/main` 添加至 KubeSphere。 + diff --git a/content/zh/docs/v3.4/workspace-administration/department-management.md b/content/zh/docs/v3.4/workspace-administration/department-management.md new file mode 100644 index 000000000..9d87bbef8 --- /dev/null +++ b/content/zh/docs/v3.4/workspace-administration/department-management.md @@ -0,0 +1,80 @@ +--- +title: "部门管理" +keywords: 'KubeSphere, Kubernetes, 部门, 角色, 权限, 用户组' +description: '在企业空间中创建部门,将用户分配到不同部门中并授予权限。' +linkTitle: "部门管理" +weight: 9800 +--- + +本文档介绍如何管理企业空间中的部门。 + +企业空间中的部门是用来管理权限的逻辑单元。您可以在部门中设置企业空间角色、多个项目角色以及多个 DevOps 项目角色,还可以将用户分配到部门中以批量管理用户权限。 + +## 准备工作 + +- 您需要[创建一个企业空间和一个用户](../../quick-start/create-workspace-and-project/),该用户需在该企业空间中具有 `workspace-admin` 角色。本文档以 `demo-ws` 企业空间和 `ws-admin` 用户为例。 +- 如需在一个部门中设置项目角色或者 DevOps 项目角色,则需要在该企业空间中[创建至少一个项目或一个 DevOps 项目](../../quick-start/create-workspace-and-project/)。 + +## 创建部门 + +1. 以 `ws-admin` 用户登录 KubeSphere Web 控制台并进入 `demo-ws` 企业空间。 + +2. 在左侧导航栏选择**企业空间设置**下的**部门**,点击右侧的**设置部门**。 + +3. 在**设置部门**对话框中,设置以下参数,然后点击**确定**创建部门。 + + {{< notice note >}} + + * 如果企业空间中已经创建过部门,您可以点击**创建部门**为该企业空间添加更多部门。 + * 您可以在每个部门中创建多个部门和多个子部门。如需创建子部门,在左侧部门树中选择一个部门,然后点击右侧的**创建部门**。 + + {{}} + + * **名称**:为部门设置一个名称。 + * **别名**:为部门设置一个别名。 + * **企业空间角色**:当前企业空间中所有部门成员的角色。 + * **项目角色**:一个项目中所有部门成员的角色。您可以点击**添加项目**来指定多个项目角色。每个项目只能指定一个角色。 + * **DevOps 项目角色**:一个 DevOps 项目中所有部门成员的角色。您可以点击**添加 DevOps 项目**来指定多个 DevOps 项目角色。每个 DevOps 项目只能指定一个角色。 + +4. 部门创建完成后,点击**确定**,然后点击**关闭**。在**部门**页面,可以在左侧的部门树中看到已创建的部门。 + +## 分配用户至部门 + +1. 在**部门**页面,选择左侧部门树中的一个部门,点击右侧的**未分配**。 + +2. 在用户列表中,点击用户右侧的 ,对出现的提示消息点击**确定**,以将用户分配到该部门。 + + {{< notice note >}} + + * 如果部门提供的权限与用户的现有权限重叠,则会为用户添加新的权限。用户的现有权限不受影响。 + * 分配到某个部门的用户可以根据与该部门关联的企业空间角色、项目角色和 DevOps 项目角色来执行操作,而无需被邀请到企业空间、项目和 DevOps 项目中。 + + {{}} + +## 从部门中移除用户 + +1. 在**部门**页面,选择左侧部门树中的一个部门,然后点击右侧的**已分配**。 +2. 在已分配用户列表中,点击用户右侧的 ,在出现的对话框中输入相应的用户名,然后点击**确定**来移除用户。 + +## 删除和编辑部门 + +1. 在**部门**页面,点击**设置部门**。 + +2. 在**设置部门**对话框的左侧,点击需要编辑或删除部门的上级部门。 + +3. 点击部门右侧的 进行编辑。 + + {{< notice note >}} + + 有关详细信息,请参见[创建部门](../../workspace-administration/department-management/#创建部门)。 + + {{}} + +4. 点击部门右侧的 ,在出现的对话框中输入相应的部门名称,然后点击**确定**来删除该部门。 + + {{< notice note >}} + + * 如果删除的部门包含子部门,则子部门也将被删除。 + * 部门删除后,所有部门成员的授权也将被取消。 + + {{}} \ No newline at end of file diff --git a/content/zh/docs/v3.4/workspace-administration/project-quotas.md b/content/zh/docs/v3.4/workspace-administration/project-quotas.md new file mode 100644 index 000000000..91d6b9bb1 --- /dev/null +++ b/content/zh/docs/v3.4/workspace-administration/project-quotas.md @@ -0,0 +1,55 @@ +--- +title: "项目配额" +keywords: 'KubeSphere, Kubernetes, 项目, 配额, 资源, 请求, 限制' +description: '设置请求和限制,控制项目中的资源使用情况。' +linkTitle: "项目配额" +weight: 9600 +--- + +KubeSphere 使用预留(Request)和限制(Limit)来控制项目中的资源(例如 CPU 和内存)使用情况,在 Kubernetes 中也称为[资源配额](https://kubernetes.io/zh/docs/concepts/policy/resource-quotas/)。请求确保项目能够获得其所需的资源,因为这些资源已经得到明确保障和预留。相反地,限制确保项目不能使用超过特定值的资源。 + +除了 CPU 和内存,您还可以单独为其他对象设置资源配额,例如项目中的容器组、[部署](../../project-user-guide/application-workloads/deployments/)、[任务](../../project-user-guide/application-workloads/jobs/)、[服务](../../project-user-guide/application-workloads/services/)和[配置字典](../../project-user-guide/configuration/configmaps/)。 + +本教程演示如何配置项目配额。 + +## 准备工作 + +您需要有一个可用的企业空间、一个项目和一个用户 (`ws-admin`)。该用户必须在企业空间层级拥有 `admin` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../quick-start/create-workspace-and-project/)。 + +{{< notice note >}} + +如果使用 `project-admin` 用户(该用户在项目层级拥有 `admin` 角色),您也可以为新项目(即其配额尚未设置)设置项目配额。不过,项目配额设置完成之后,`project-admin` 无法更改配额。一般情况下,`ws-admin` 负责为项目设置限制和请求。`project-admin` 负责为项目中的容器[设置限制范围](../../project-administration/container-limit-ranges/)。 + +{{}} + +## 设置项目配额 + +1. 以 `ws-admin` 身份登录控制台,进入一个项目。如果该项目是新创建的项目,您可以在**概览**页面看到项目配额尚未设置。点击**编辑配额**来配置配额。 + +2. 在弹出对话框中,您可以看到 KubeSphere 默认不为项目设置任何请求或限制。要设置请求和限制来控制 CPU 和内存资源,请将滑块移动到期望的值或者直接输入数字。字段留空意味着您不设置任何请求或限制。 + + {{< notice note >}} + + 限制必须大于请求。 + + {{}} + +3. 要为其他资源设置配额,在**项目资源配额**下点击**添加**,选择一个资源或输入资源名称并设置配额。 + +4. 点击**确定**完成配额设置。 + +5. 在**项目设置**下的**基本信息**页面,您可以查看该项目的所有资源配额。 + +6. 要更改项目配额,请在**基本信息**页面点击**编辑项目**,然后选择**编辑项目配额**。 + + {{< notice note >}} + + 对于[多集群项目](../../project-administration/project-and-multicluster-project/#多集群项目),**管理项目**下拉菜单中不会显示**编辑配额**选项。若要为多集群项目设置配额,前往**项目设置**下的**项目配额**,并点击**编辑配额**。请注意,由于多集群项目跨集群运行,您可以为多集群项目针对不同集群分别设置资源配额。 + + {{}} + +7. 在**项目配额**页面更改项目配额,然后点击**确定**。 + +## 另请参见 + +[容器限制范围](../../project-administration/container-limit-ranges/) diff --git a/content/zh/docs/v3.4/workspace-administration/role-and-member-management.md b/content/zh/docs/v3.4/workspace-administration/role-and-member-management.md new file mode 100644 index 000000000..1623f4e28 --- /dev/null +++ b/content/zh/docs/v3.4/workspace-administration/role-and-member-management.md @@ -0,0 +1,63 @@ +--- +title: "企业空间角色和成员管理" +keywords: "Kubernetes, 企业空间, KubeSphere, 多租户" +description: "自定义企业空间角色并将角色授予用户。" +linkTitle: "企业空间角色和成员管理" +weight: 9400 +--- + +本教程演示如何在企业空间中管理角色和成员。 + +## 准备工作 + +至少已创建一个企业空间,例如 `demo-workspace`。您还需要准备一个用户(如 `ws-admin`),该用户在企业空间级别具有 `workspace-admin` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../quick-start/create-workspace-and-project/)。 + +{{< notice note >}} + +实际角色名称的格式:`workspace name-role name`。例如,在名为 `demo-workspace` 的企业空间中,角色 `admin` 的实际角色名称为 `demo-workspace-admin`。 + +{{}} + +## 内置角色 + +**企业空间角色**页面列出了以下四个可用的内置角色。创建企业空间时,KubeSphere 会自动创建内置角色,并且内置角色无法进行编辑或删除。您只能查看内置角色的权限或将其分配给用户。 + +| **名称** | **描述** | +| ------------------ | ------------------------------------------------------------ | +| `workspace-viewer` | 企业空间观察员,可以查看企业空间中的所有资源。 | +| `workspace-self-provisioner` | 企业空间普通成员,可以查看企业设置、管理应用模板、创建项目和 DevOps 项目。 | +| `workspace-regular` | 企业空间普通成员,可以查看企业空间设置。 | +| `workspace-admin` | 企业空间管理员,可以管理企业空间中的所有资源。 | + +若要查看角色所含权限: + +1. 以 `ws-admin` 身份登录控制台。在**企业空间角色**中,点击一个角色(例如,`workspace-admin`)以查看角色详情。 + +2. 点击**授权用户**选项卡,查看所有被授予该角色的用户。 + +## 创建企业空间角色 + +1. 转到**企业空间设置**下的**企业空间角色**。 + +2. 在**企业空间角色**中,点击**创建**并设置**名称**(例如,`demo-project-admin`)。点击**编辑权限**继续。 + +3. 在弹出的窗口中,权限归类在不同的**功能模块**下。在本示例中,点击**项目管理**,并为该角色选择**项目创建**、**项目管理**和**项目查看**。点击**确定**完成操作。 + + {{< notice note >}} + +**依赖于**表示当前授权项依赖所列出的授权项,勾选该权限后系统会自动选上所有依赖权限。 + + {{}} + +4. 新创建的角色将在**企业空间角色**中列出,点击右侧的 以编辑该角色的信息、权限,或删除该角色。 + +## 邀请新成员 + +1. 转到**企业空间设置**下**企业空间成员**,点击**邀请**。 +2. 点击右侧的 以邀请一名成员加入企业空间,并为其分配一个角色。 + + + +3. 将成员加入企业空间后,点击**确定**。您可以在**企业空间成员**列表中查看新邀请的成员。 + +4. 若要编辑现有成员的角色或将其从企业空间中移除,点击右侧的 并选择对应的操作。 \ No newline at end of file diff --git a/content/zh/docs/v3.4/workspace-administration/upload-helm-based-application.md b/content/zh/docs/v3.4/workspace-administration/upload-helm-based-application.md new file mode 100644 index 000000000..8fc3091b2 --- /dev/null +++ b/content/zh/docs/v3.4/workspace-administration/upload-helm-based-application.md @@ -0,0 +1,38 @@ +--- +title: "上传基于 Helm 的应用程序" +keywords: "Kubernetes, Helm, KubeSphere, OpenPitrix, 应用程序" +description: "了解如何向您的企业空间上传基于 Helm 的应用程序用作应用模板。" +linkTitle: "上传基于 Helm 的应用程序" +weight: 9200 +--- + +KubeSphere 提供应用程序的全生命周期管理。例如,企业空间管理员可以上传或创建新的应用模板,并进行快速测试。此外,管理员会将经过充分测试的应用发布到[应用商店](../../application-store/),这样其他用户能一键部署这些应用。为了开发应用模板,企业空间管理员首先需要将打包的 [Helm chart](https://helm.sh/) 上传到 KubeSphere。 + +本教程演示了如何通过上传打包的 Helm chart 来开发应用模板。 + +## 准备工作 + +- 您需要启用 [KubeSphere 应用商店 (OpenPitrix)](../../pluggable-components/app-store/)。 +- 您需要创建一个企业空间和一个用户 (`project-admin`)。该用户必须被邀请至企业空间中,并被授予 `workspace-self-provisioner` 角色。有关更多信息,请参考[创建企业空间、项目、用户和角色](../../quick-start/create-workspace-and-project/)。 + +## 动手实验 + +1. 用 `project-admin` 帐户登录 KubeSphere。在企业空间页面,转到**应用管理**下的**应用模板**,点击**创建**。 + +2. 在弹出的对话框中,点击**上传**。您可以上传自己的 Helm chart,或者下载 [Nginx chart](/files/application-templates/nginx-0.1.0.tgz) 用它作为示例来完成接下来的步骤。 + +3. 文件包上传完毕后,点击**确定**继续。 + +4. 您可以在**应用信息**下查看应用的基本信息。点击**上传图标**来上传应用的图标。您也可以跳过上传图标,直接点击**确定**。 + + {{< notice note >}} + +应用图标支持的最大分辨率为 96 × 96 像素。 + +{{}} + +5. 成功上传后,模板列表中会列出应用,状态为**开发中**,意味着该应用正在开发中。上传的应用对同一企业空间下的所有成员均可见。 + +6. 点击应用,随后打开的页面默认选中**版本**标签。点击待提交版本以展开菜单,您可以在菜单上看到**删除**、**测试**、**提交发布**的选项。 + +7. 有关如何将应用发布到应用商店的更多信息,请参考[应用程序生命周期管理](../../application-store/app-lifecycle-management/)。 diff --git a/content/zh/docs/v3.4/workspace-administration/what-is-workspace.md b/content/zh/docs/v3.4/workspace-administration/what-is-workspace.md new file mode 100644 index 000000000..7c22be939 --- /dev/null +++ b/content/zh/docs/v3.4/workspace-administration/what-is-workspace.md @@ -0,0 +1,81 @@ +--- +title: "企业空间概述" +keywords: "Kubernetes, KubeSphere, workspace" +description: "了解 KubeSphere 企业空间的概念以及如何创建和删除企业空间。" +linkTitle: "企业空间概述" +weight: 9100 +--- + +企业空间是用来管理[项目](../../project-administration/)、[DevOps 项目](../../devops-user-guide/)、[应用模板](../upload-helm-based-application/)和应用仓库的一种逻辑单元。您可以在企业空间中控制资源访问权限,也可以安全地在团队内部分享资源。 + +最佳的做法是为租户(集群管理员除外)创建新的企业空间。同一名租户可以在多个企业空间中工作,并且多个租户可以通过不同方式访问同一个企业空间。 + +本教程演示如何创建和删除企业空间。 + +## 准备工作 + +准备一个被授予 `workspaces-manager` 角色的用户,例如[创建企业空间、项目、用户和角色](../../quick-start/create-workspace-and-project/)中创建的 `ws-manager` 帐户。 + +## 创建企业空间 + +1. 以 `ws-manager` 身份登录 KubeSphere Web 控制台。点击左上角的**平台管理**并选择**访问控制**。在**企业空间**页面,点击**创建**。 + + +2. 对于单节点集群,您需要在**基本信息**页面,为创建的企业空间输入名称,并从下拉菜单中选择一名企业空间管理员。点击**创建**。 + + - **名称**:为企业空间设置一个专属名称。 + - **别名**:该企业空间的另一种名称。 + - **管理员**:管理该企业空间的用户。 + - **描述**:企业空间的简短介绍。 + + 对于多节点集群,设置企业空间的基本信息后,点击**下一步**。在**集群设置**页面,选择企业空间需要使用的集群,然后点击**创建**。 + +3. 企业空间创建后将显示在企业空间列表中。 + +4. 点击该企业空间,您可以在**概览**页面查看企业空间中的资源状态。 + +## 删除企业空间 + +在 KubeSphere 中,可以通过企业空间对项目进行分组管理,企业空间下项目的生命周期会受到企业空间的影响。具体来说,企业空间删除之后,企业空间下的项目及关联的资源也同时会被销毁。 + +删除企业空间之前,请先确定您是否要解绑部分关键项目。 + +### 删除前解绑项目 + +若要删除企业空间并保留其中的部分项目,删除前请先执行以下命令: + +``` +kubectl label ns kubesphere.io/workspace- && kubectl patch ns -p '{"metadata":{"ownerReferences":[]}}' --type=merge +``` + +{{< notice note >}} + +以上命令会移除与企业空间关联的标签并移除 ownerReferences。之后,您可以将解绑的项目重新[分配给新的企业空间](../../faq/access-control/add-kubernetes-namespace-to-kubesphere-workspace/)。 + +{{}} + +### 在控制台上删除企业空间 + +从企业空间解绑关键项目后,您可以按照以下步骤删除企业空间。 + +{{< notice note >}} + +如果您使用 kubectl 删除企业空间资源对象,请务必谨慎操作。 + +{{}} + +1. 在企业空间页面,转到**企业空间设置**菜单下的**基本信息**。在**基本信息**页面,您可以查看该企业空间的基本信息,例如项目数量和成员数量。 + + {{< notice note >}} + + 在该页面,您可以点击**编辑信息**更改企业空间的基本信息(企业空间名称无法更改),也可以开启或关闭[网络隔离](../../workspace-administration/workspace-network-isolation/)。 + + {{}} + +2. 若要删除企业空间,点击**管理 > 删除企业空间**。在出现的对话框中输入企业空间的名称,然后点击**确定**。 + + {{< notice warning >}} + + 企业空间删除后将无法恢复,并且企业空间下的资源也同时会被销毁。 + + {{}} diff --git a/content/zh/docs/v3.4/workspace-administration/workspace-network-isolation.md b/content/zh/docs/v3.4/workspace-administration/workspace-network-isolation.md new file mode 100644 index 000000000..81d0e02cb --- /dev/null +++ b/content/zh/docs/v3.4/workspace-administration/workspace-network-isolation.md @@ -0,0 +1,37 @@ +--- +title: "企业空间网络隔离" +keywords: 'KubeSphere, Kubernetes, Calico, 网络策略' +description: '在您的企业空间中开启或关闭网络策略。' +linkTitle: "企业空间网络隔离" +weight: 9500 +--- + +## 准备工作 + +- 已经启用[网络策略](../../pluggable-components/network-policy/)。 + +- 需要使用拥有 `workspace-admin` 角色的用户。例如,使用在[创建企业空间、项目、用户和角色](../../quick-start/create-workspace-and-project/)教程中创建的 `ws-admin` 用户。 + + {{< notice note >}} + + 关于网络策略的实现,您可以参考 [KubeSphere NetworkPolicy](https://github.com/kubesphere/community/blob/master/sig-network/concepts-and-designs/kubesphere-network-policy.md)。 + + {{}} + +## 开启或关闭企业空间网络隔离 + +企业空间网络隔离默认关闭。您可以在**企业空间设置**下的**基本信息**页面开启网络隔离。 + +{{< notice note >}} + +当网络隔离开启时,默认允许出站流量,而不同企业空间的进站流量将被拒绝。如果您需要自定义网络策略,则需要开启[项目网络隔离](../../project-administration/project-network-isolation/)并在**项目设置**中添加网络策略。 + +{{}} + +您也可以在**基本信息**页面关闭网络隔离。 + +## 最佳做法 + +要确保企业空间中的所有容器组都安全,一个最佳做法是开启企业空间网络隔离。 + +当网络隔离开启时,其他企业空间无法访问该企业空间。如果企业空间的默认网络隔离无法满足您的需求,请开启项目网络隔离并自定义您的项目网络策略。 diff --git a/content/zh/docs/v3.4/workspace-administration/workspace-quotas.md b/content/zh/docs/v3.4/workspace-administration/workspace-quotas.md new file mode 100644 index 000000000..e9041cae2 --- /dev/null +++ b/content/zh/docs/v3.4/workspace-administration/workspace-quotas.md @@ -0,0 +1,41 @@ +--- +title: "企业空间配额" +keywords: 'KubeSphere, Kubernetes, 企业空间, 配额' +description: '设置企业空间配额以管理企业空间中所有项目和 DevOps 项目的总资源用量。' +linkTitle: "企业空间配额" +weight: 9700 +--- + +企业空间配额用于管理企业空间中所有项目和 DevOps 项目的总资源用量。企业空间配额与[项目配额](../project-quotas/)相似,也包含 CPU 和内存的预留(Request)和限制(Limit)。预留确保企业空间中的项目能够获得其所需的资源,因为这些资源已经得到明确保障和预留。相反,限制则确保企业空间中的所有项目的资源用量不能超过特定数值。 + +在[多集群架构](../../multicluster-management/)中,由于您需要[将一个或多个集群分配到企业空间中](../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/),您可以设置该企业空间在不同集群上的资源用量。 + +本教程演示如何管理企业空间中的资源配额。 + +## 准备工作 + +您需要准备一个可用的企业空间和一个用户 (`ws-manager`)。该用户必须在平台层级具有 `workspaces-manager` 角色。有关更多信息,请参阅[创建企业空间、项目、用户和角色](../../quick-start/create-workspace-and-project/)。 + +## 设置企业空间配额 + +1. 使用 `ws-manager` 用户登录 KubeSphere Web 控制台,进入企业空间。 + +2. 在**企业空间设置**下,选择**企业空间配额**。 + +3. **企业空间配额**页面列有分配到该企业空间的全部可用集群,以及各集群的 CPU 限额、CPU 需求、内存限额和内存需求。 + +4. 在列表右侧点击**编辑配额**即可查看企业空间配额信息。默认情况下,KubeSphere 不为企业空间设置任何资源预留或资源限制。如需设置资源预留或资源限制来管理 CPU 和内存资源,您可以移动 至期望数值或直接输入期望数值。将字段设为空值表示不对资源进行预留或限制。 + + {{< notice note >}} + + 资源预留不能超过资源限制。 + + {{}} + +5. 配额设置完成后,点击**确定**。 + +## 另请参见 + +[项目配额](../project-quotas/) + +[容器限制范围](../../project-administration/container-limit-ranges/) \ No newline at end of file