mirror of
https://github.com/kubesphere/kubekey.git
synced 2025-12-26 01:22:51 +00:00
Merge remote-tracking branch 'origin/master' into release-3.1
This commit is contained in:
commit
0696c4a96c
|
|
@ -22,7 +22,7 @@ jobs:
|
|||
with:
|
||||
go-version: 1.19
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3.4.0
|
||||
uses: golangci/golangci-lint-action@v3.6.0
|
||||
with:
|
||||
version: v1.50.1
|
||||
working-directory: ${{matrix.working-directory}}
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'kubesphere/kubekey'
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go 1.19
|
||||
uses: actions/setup-go@v3
|
||||
|
|
|
|||
|
|
@ -36,6 +36,7 @@ type Chart struct {
|
|||
Version string `yaml:"version" json:"version,omitempty"`
|
||||
ValuesFile string `yaml:"valuesFile" json:"valuesFile,omitempty"`
|
||||
Values []string `yaml:"values" json:"values,omitempty"`
|
||||
Wait bool `yaml:"wait" json:"wait,omitempty"`
|
||||
}
|
||||
|
||||
type Yaml struct {
|
||||
|
|
|
|||
|
|
@ -173,9 +173,6 @@ func (cfg *ClusterSpec) GroupHosts() map[string][]*KubeHost {
|
|||
if len(roleGroups[Etcd]) == 0 && cfg.Etcd.Type == KubeKey {
|
||||
logger.Log.Fatal(errors.New("The number of etcd cannot be 0"))
|
||||
}
|
||||
if len(roleGroups[Registry]) > 1 {
|
||||
logger.Log.Fatal(errors.New("The number of registry node cannot be greater than 1."))
|
||||
}
|
||||
|
||||
for _, host := range roleGroups[ControlPlane] {
|
||||
host.SetRole(Master)
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ const (
|
|||
DefaultSSHTimeout = 30
|
||||
DefaultEtcdVersion = "v3.5.6"
|
||||
DefaultEtcdPort = "2379"
|
||||
DefaultDockerVersion = "20.10.8"
|
||||
DefaultDockerVersion = "24.0.6"
|
||||
DefaultContainerdVersion = "1.6.4"
|
||||
DefaultRuncVersion = "v1.1.1"
|
||||
DefaultCrictlVersion = "v1.24.0"
|
||||
|
|
|
|||
|
|
@ -33,6 +33,7 @@ type CalicoCfg struct {
|
|||
VethMTU int `yaml:"vethMTU" json:"vethMTU,omitempty"`
|
||||
Ipv4NatOutgoing *bool `yaml:"ipv4NatOutgoing" json:"ipv4NatOutgoing,omitempty"`
|
||||
DefaultIPPOOL *bool `yaml:"defaultIPPOOL" json:"defaultIPPOOL,omitempty"`
|
||||
EnableTypha *bool `yaml:"enableTypha" json:"enableTypha,omitempty"`
|
||||
}
|
||||
|
||||
type FlannelCfg struct {
|
||||
|
|
@ -183,6 +184,14 @@ func (c *CalicoCfg) EnableDefaultIPPOOL() bool {
|
|||
return *c.DefaultIPPOOL
|
||||
}
|
||||
|
||||
// Typha is used to determine whether to enable calico Typha
|
||||
func (c *CalicoCfg) Typha() bool {
|
||||
if c.EnableTypha == nil {
|
||||
return false
|
||||
}
|
||||
return *c.EnableTypha
|
||||
}
|
||||
|
||||
// EnableInit is used to determine whether to create default network
|
||||
func (h *HybridnetCfg) EnableInit() bool {
|
||||
if h.Init == nil {
|
||||
|
|
|
|||
|
|
@ -93,6 +93,7 @@ func InstallChart(kubeConf *common.KubeConf, addon *kubekeyapiv1alpha2.Addon, ku
|
|||
client.Keyring = defaultKeyring()
|
||||
client.RepoURL = addon.Sources.Chart.Repo
|
||||
client.Version = addon.Sources.Chart.Version
|
||||
client.Wait = addon.Sources.Chart.Wait
|
||||
//client.Force = true
|
||||
|
||||
if client.Version == "" && client.Devel {
|
||||
|
|
|
|||
|
|
@ -105,9 +105,13 @@ func (g *GenerateCerts) Execute(runtime connector.Runtime) error {
|
|||
|
||||
var altName cert.AltNames
|
||||
|
||||
dnsList := []string{"localhost", g.KubeConf.Cluster.Registry.PrivateRegistry, runtime.GetHostsByRole(common.Registry)[0].GetName()}
|
||||
ipList := []net.IP{net.IPv4(127, 0, 0, 1), net.IPv6loopback, netutils.ParseIPSloppy(runtime.GetHostsByRole(common.Registry)[0].GetInternalAddress())}
|
||||
dnsList := []string{"localhost", RegistryCertificateBaseName}
|
||||
ipList := []net.IP{net.IPv4(127, 0, 0, 1), net.IPv6loopback}
|
||||
|
||||
for _, h := range runtime.GetHostsByRole(common.Registry) {
|
||||
dnsList = append(dnsList, h.GetName())
|
||||
ipList = append(ipList, netutils.ParseIPSloppy(h.GetInternalAddress()))
|
||||
}
|
||||
altName.DNSNames = dnsList
|
||||
altName.IPs = ipList
|
||||
|
||||
|
|
|
|||
|
|
@ -250,18 +250,10 @@ func InstallHarbor(i *InstallRegistryModule) []task.Interface {
|
|||
}
|
||||
|
||||
generateHarborConfig := &task.RemoteTask{
|
||||
Name: "GenerateHarborConfig",
|
||||
Desc: "Generate harbor config",
|
||||
Hosts: i.Runtime.GetHostsByRole(common.Registry),
|
||||
Action: &action.Template{
|
||||
Template: templates.HarborConfigTempl,
|
||||
Dst: "/opt/harbor/harbor.yml",
|
||||
Data: util.Data{
|
||||
"Domain": i.KubeConf.Cluster.Registry.PrivateRegistry,
|
||||
"Certificate": fmt.Sprintf("%s.pem", i.KubeConf.Cluster.Registry.PrivateRegistry),
|
||||
"Key": fmt.Sprintf("%s-key.pem", i.KubeConf.Cluster.Registry.PrivateRegistry),
|
||||
},
|
||||
},
|
||||
Name: "GenerateHarborConfig",
|
||||
Desc: "Generate harbor config",
|
||||
Hosts: i.Runtime.GetHostsByRole(common.Registry),
|
||||
Action: new(GenerateHarborConfig),
|
||||
Parallel: true,
|
||||
Retry: 1,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,6 +18,9 @@ package registry
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kubesphere/kubekey/v3/cmd/kk/pkg/bootstrap/registry/templates"
|
||||
"github.com/kubesphere/kubekey/v3/cmd/kk/pkg/core/action"
|
||||
"github.com/kubesphere/kubekey/v3/cmd/kk/pkg/core/util"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
|
|
@ -212,6 +215,29 @@ func (g *SyncHarborPackage) Execute(runtime connector.Runtime) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
type GenerateHarborConfig struct {
|
||||
common.KubeAction
|
||||
}
|
||||
|
||||
func (g *GenerateHarborConfig) Execute(runtime connector.Runtime) error {
|
||||
host := runtime.RemoteHost()
|
||||
templateAction := action.Template{
|
||||
Template: templates.HarborConfigTempl,
|
||||
Dst: "/opt/harbor/harbor.yml",
|
||||
Data: util.Data{
|
||||
"Domain": host.GetName(),
|
||||
"Certificate": fmt.Sprintf("%s.pem", RegistryCertificateBaseName),
|
||||
"Key": fmt.Sprintf("%s-key.pem", RegistryCertificateBaseName),
|
||||
"Password": templates.Password(g.KubeConf, RegistryCertificateBaseName),
|
||||
},
|
||||
}
|
||||
templateAction.Init(nil, nil)
|
||||
if err := templateAction.Execute(runtime); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type StartHarbor struct {
|
||||
common.KubeAction
|
||||
}
|
||||
|
|
|
|||
|
|
@ -14,6 +14,9 @@ limitations under the License.
|
|||
package templates
|
||||
|
||||
import (
|
||||
"github.com/kubesphere/kubekey/v3/cmd/kk/pkg/common"
|
||||
"github.com/kubesphere/kubekey/v3/cmd/kk/pkg/registry"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/lithammer/dedent"
|
||||
|
|
@ -133,3 +136,14 @@ proxy:
|
|||
|
||||
`)))
|
||||
)
|
||||
|
||||
func Password(kubeConf *common.KubeConf, domain string) string {
|
||||
auths := registry.DockerRegistryAuthEntries(kubeConf.Cluster.Registry.Auths)
|
||||
for repo, entry := range auths {
|
||||
if strings.Contains(repo, domain) {
|
||||
return entry.Password
|
||||
}
|
||||
}
|
||||
|
||||
return "Harbor12345"
|
||||
}
|
||||
|
|
@ -45,7 +45,6 @@ import (
|
|||
"github.com/kubesphere/kubekey/v3/cmd/kk/pkg/files"
|
||||
"github.com/kubesphere/kubekey/v3/cmd/kk/pkg/images"
|
||||
"github.com/kubesphere/kubekey/v3/cmd/kk/pkg/kubernetes/templates"
|
||||
"github.com/kubesphere/kubekey/v3/cmd/kk/pkg/kubernetes/templates/v1beta2"
|
||||
"github.com/kubesphere/kubekey/v3/cmd/kk/pkg/utils"
|
||||
)
|
||||
|
||||
|
|
@ -249,11 +248,11 @@ func (g *GenerateKubeadmConfig) Execute(runtime connector.Runtime) error {
|
|||
}
|
||||
}
|
||||
|
||||
_, ApiServerArgs := util.GetArgs(v1beta2.GetApiServerArgs(g.WithSecurityEnhancement, g.KubeConf.Cluster.Kubernetes.EnableAudit()), g.KubeConf.Cluster.Kubernetes.ApiServerArgs)
|
||||
_, ControllerManagerArgs := util.GetArgs(v1beta2.GetControllermanagerArgs(g.KubeConf.Cluster.Kubernetes.Version, g.WithSecurityEnhancement), g.KubeConf.Cluster.Kubernetes.ControllerManagerArgs)
|
||||
_, SchedulerArgs := util.GetArgs(v1beta2.GetSchedulerArgs(g.WithSecurityEnhancement), g.KubeConf.Cluster.Kubernetes.SchedulerArgs)
|
||||
_, ApiServerArgs := util.GetArgs(templates.GetApiServerArgs(g.WithSecurityEnhancement, g.KubeConf.Cluster.Kubernetes.EnableAudit()), g.KubeConf.Cluster.Kubernetes.ApiServerArgs)
|
||||
_, ControllerManagerArgs := util.GetArgs(templates.GetControllermanagerArgs(g.KubeConf.Cluster.Kubernetes.Version, g.WithSecurityEnhancement), g.KubeConf.Cluster.Kubernetes.ControllerManagerArgs)
|
||||
_, SchedulerArgs := util.GetArgs(templates.GetSchedulerArgs(g.WithSecurityEnhancement), g.KubeConf.Cluster.Kubernetes.SchedulerArgs)
|
||||
|
||||
checkCgroupDriver, err := v1beta2.GetKubeletCgroupDriver(runtime, g.KubeConf)
|
||||
checkCgroupDriver, err := templates.GetKubeletCgroupDriver(runtime, g.KubeConf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -273,8 +272,8 @@ func (g *GenerateKubeadmConfig) Execute(runtime connector.Runtime) error {
|
|||
}
|
||||
|
||||
templateAction := action.Template{
|
||||
Template: v1beta2.KubeadmConfig,
|
||||
Dst: filepath.Join(common.KubeConfigDir, v1beta2.KubeadmConfig.Name()),
|
||||
Template: templates.KubeadmConfig,
|
||||
Dst: filepath.Join(common.KubeConfigDir, templates.KubeadmConfig.Name()),
|
||||
Data: util.Data{
|
||||
"IsInitCluster": g.IsInitConfiguration,
|
||||
"ImageRepo": strings.TrimSuffix(images.GetImage(runtime, g.KubeConf, "kube-apiserver").ImageRepo(), "/kube-apiserver"),
|
||||
|
|
@ -296,12 +295,13 @@ func (g *GenerateKubeadmConfig) Execute(runtime connector.Runtime) error {
|
|||
"ExternalEtcd": externalEtcd,
|
||||
"NodeCidrMaskSize": g.KubeConf.Cluster.Kubernetes.NodeCidrMaskSize,
|
||||
"CriSock": g.KubeConf.Cluster.Kubernetes.ContainerRuntimeEndpoint,
|
||||
"ApiServerArgs": v1beta2.UpdateFeatureGatesConfiguration(ApiServerArgs, g.KubeConf),
|
||||
"ApiServerArgs": templates.UpdateFeatureGatesConfiguration(ApiServerArgs, g.KubeConf),
|
||||
"EnableAudit": g.KubeConf.Cluster.Kubernetes.EnableAudit(),
|
||||
"ControllerManagerArgs": v1beta2.UpdateFeatureGatesConfiguration(ControllerManagerArgs, g.KubeConf),
|
||||
"SchedulerArgs": v1beta2.UpdateFeatureGatesConfiguration(SchedulerArgs, g.KubeConf),
|
||||
"KubeletConfiguration": v1beta2.GetKubeletConfiguration(runtime, g.KubeConf, g.KubeConf.Cluster.Kubernetes.ContainerRuntimeEndpoint, g.WithSecurityEnhancement),
|
||||
"KubeProxyConfiguration": v1beta2.GetKubeProxyConfiguration(g.KubeConf),
|
||||
"ControllerManagerArgs": templates.UpdateFeatureGatesConfiguration(ControllerManagerArgs, g.KubeConf),
|
||||
"SchedulerArgs": templates.UpdateFeatureGatesConfiguration(SchedulerArgs, g.KubeConf),
|
||||
"KubeletConfiguration": templates.GetKubeletConfiguration(runtime, g.KubeConf, g.KubeConf.Cluster.Kubernetes.ContainerRuntimeEndpoint, g.WithSecurityEnhancement),
|
||||
"KubeProxyConfiguration": templates.GetKubeProxyConfiguration(g.KubeConf),
|
||||
"IsV1beta3": versionutil.MustParseSemantic(g.KubeConf.Cluster.Kubernetes.Version).AtLeast(versionutil.MustParseSemantic("v1.22.0")),
|
||||
"IsControlPlane": host.IsRole(common.Master),
|
||||
"CgroupDriver": checkCgroupDriver,
|
||||
"BootstrapToken": bootstrapToken,
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1beta2
|
||||
package templates
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
|
@ -38,7 +38,7 @@ var (
|
|||
dedent.Dedent(`
|
||||
{{- if .IsInitCluster -}}
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1beta2
|
||||
apiVersion: kubeadm.k8s.io/{{ if .IsV1beta3 }}v1beta3{{ else }}v1beta2{{ end }}
|
||||
kind: ClusterConfiguration
|
||||
etcd:
|
||||
{{- if .EtcdTypeIsKubeadm }}
|
||||
|
|
@ -106,7 +106,7 @@ scheduler:
|
|||
{{ toYaml .SchedulerArgs | indent 4 }}
|
||||
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1beta2
|
||||
apiVersion: kubeadm.k8s.io/{{ if .IsV1beta3 }}v1beta3{{ else }}v1beta2{{ end }}
|
||||
kind: InitConfiguration
|
||||
localAPIEndpoint:
|
||||
advertiseAddress: {{ .AdvertiseAddress }}
|
||||
|
|
@ -128,7 +128,7 @@ kind: KubeletConfiguration
|
|||
|
||||
{{- else -}}
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1beta2
|
||||
apiVersion: kubeadm.k8s.io/{{ if .IsV1beta3 }}v1beta3{{ else }}v1beta2{{ end }}
|
||||
kind: JoinConfiguration
|
||||
discovery:
|
||||
bootstrapToken:
|
||||
|
|
@ -159,14 +159,10 @@ var (
|
|||
FeatureGatesDefaultConfiguration = map[string]bool{
|
||||
"RotateKubeletServerCertificate": true, //k8s 1.7+
|
||||
"TTLAfterFinished": true, //k8s 1.12+
|
||||
"ExpandCSIVolumes": true, //k8s 1.14+
|
||||
"CSIStorageCapacity": true, //k8s 1.19+
|
||||
}
|
||||
FeatureGatesSecurityDefaultConfiguration = map[string]bool{
|
||||
"RotateKubeletServerCertificate": true, //k8s 1.7+
|
||||
"TTLAfterFinished": true, //k8s 1.12+
|
||||
"ExpandCSIVolumes": true, //k8s 1.14+
|
||||
"CSIStorageCapacity": true, //k8s 1.19+
|
||||
"SeccompDefault": true, //kubelet
|
||||
}
|
||||
|
||||
|
|
@ -18,16 +18,17 @@ package pipelines
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/kubesphere/kubekey/v3/cmd/kk/pkg/artifact"
|
||||
"github.com/kubesphere/kubekey/v3/cmd/kk/pkg/binaries"
|
||||
"github.com/kubesphere/kubekey/v3/cmd/kk/pkg/bootstrap/os"
|
||||
"github.com/kubesphere/kubekey/v3/cmd/kk/pkg/bootstrap/precheck"
|
||||
"github.com/kubesphere/kubekey/v3/cmd/kk/pkg/bootstrap/registry"
|
||||
"github.com/kubesphere/kubekey/v3/cmd/kk/pkg/common"
|
||||
"github.com/kubesphere/kubekey/v3/cmd/kk/pkg/core/logger"
|
||||
"github.com/kubesphere/kubekey/v3/cmd/kk/pkg/core/module"
|
||||
"github.com/kubesphere/kubekey/v3/cmd/kk/pkg/core/pipeline"
|
||||
"github.com/kubesphere/kubekey/v3/cmd/kk/pkg/filesystem"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func NewInitRegistryPipeline(runtime *common.KubeRuntime) error {
|
||||
|
|
@ -74,6 +75,10 @@ func InitRegistry(args common.Argument, downloadCmd string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if len(runtime.GetHostsByRole("registry")) <= 0 {
|
||||
logger.Log.Fatal(errors.New("The number of registry must be greater then 0."))
|
||||
}
|
||||
|
||||
if err := NewInitRegistryPipeline(runtime); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -109,7 +109,7 @@ func deployCalico(d *DeployNetworkPluginModule) []task.Interface {
|
|||
"CalicoNodeImage": images.GetImage(d.Runtime, d.KubeConf, "calico-node").ImageName(),
|
||||
"CalicoFlexvolImage": images.GetImage(d.Runtime, d.KubeConf, "calico-flexvol").ImageName(),
|
||||
"CalicoControllersImage": images.GetImage(d.Runtime, d.KubeConf, "calico-kube-controllers").ImageName(),
|
||||
"TyphaEnabled": len(d.Runtime.GetHostsByRole(common.K8s)) > 50,
|
||||
"TyphaEnabled": len(d.Runtime.GetHostsByRole(common.K8s)) > 50 || d.KubeConf.Cluster.Network.Calico.Typha(),
|
||||
"VethMTU": d.KubeConf.Cluster.Network.Calico.VethMTU,
|
||||
"NodeCidrMaskSize": d.KubeConf.Cluster.Kubernetes.NodeCidrMaskSize,
|
||||
"IPIPMode": d.KubeConf.Cluster.Network.Calico.IPIPMode,
|
||||
|
|
@ -137,7 +137,7 @@ func deployCalico(d *DeployNetworkPluginModule) []task.Interface {
|
|||
"CalicoFlexvolImage": images.GetImage(d.Runtime, d.KubeConf, "calico-flexvol").ImageName(),
|
||||
"CalicoControllersImage": images.GetImage(d.Runtime, d.KubeConf, "calico-kube-controllers").ImageName(),
|
||||
"CalicoTyphaImage": images.GetImage(d.Runtime, d.KubeConf, "calico-typha").ImageName(),
|
||||
"TyphaEnabled": len(d.Runtime.GetHostsByRole(common.K8s)) > 50,
|
||||
"TyphaEnabled": len(d.Runtime.GetHostsByRole(common.K8s)) > 50 || d.KubeConf.Cluster.Network.Calico.Typha(),
|
||||
"VethMTU": d.KubeConf.Cluster.Network.Calico.VethMTU,
|
||||
"NodeCidrMaskSize": d.KubeConf.Cluster.Kubernetes.NodeCidrMaskSize,
|
||||
"IPIPMode": d.KubeConf.Cluster.Network.Calico.IPIPMode,
|
||||
|
|
|
|||
|
|
@ -36,6 +36,8 @@ const (
|
|||
V124
|
||||
V125
|
||||
V126
|
||||
V127
|
||||
V128
|
||||
)
|
||||
|
||||
var VersionList = []Version{
|
||||
|
|
@ -47,6 +49,8 @@ var VersionList = []Version{
|
|||
V124,
|
||||
V125,
|
||||
V126,
|
||||
V127,
|
||||
V128,
|
||||
}
|
||||
|
||||
func (v Version) String() string {
|
||||
|
|
@ -67,6 +71,10 @@ func (v Version) String() string {
|
|||
return "v1.25"
|
||||
case V126:
|
||||
return "v1.26"
|
||||
case V127:
|
||||
return "v1.27"
|
||||
case V128:
|
||||
return "v1.28"
|
||||
default:
|
||||
return "invalid option"
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,229 @@
|
|||
## 一、Harbor 简介
|
||||
|
||||
Harbor 是由 VMware 公司使用 Go 语言开发,主要就是用于存放镜像使用,同时我们还可以通过 Web 界面来对存放的镜像进行管理。并且 Harbor 提供的功能有:基于角色的访问控制,镜像远程复制同步,以及审计日志等功能。官方文档
|
||||
|
||||
### 1.Harbor 功能介绍
|
||||
|
||||
1)基于角色的访问控制: 我们可以通过项目来对用户进行权限划分,项目中可以包含多个镜像。
|
||||
|
||||
2)审计管理: 我们可以在用户审计管理中,找到我们所有对镜像仓库做的操作。
|
||||
|
||||
3)镜像复制: 我们可以通过配置,使在两台 Harbor 服务器间实现镜像同步。
|
||||
|
||||
4)漏洞扫描: Harbor 会定期对仓库中的镜像进行扫描,并进行策略检查,以防止部署出易受到攻击的镜像。
|
||||
|
||||
### 2.Harbor 高可用方式
|
||||
|
||||
目前 Harbor 最常见的高可用方式有两种,分别是:
|
||||
|
||||
1)安装两台 Harbor 仓库,他们共同使用一个存储(一般常见的便是 NFS 共享存储)
|
||||
|
||||

|
||||
|
||||
2)安装两台 Harbor 仓库,并互相配置同步关系。
|
||||
|
||||

|
||||
|
||||
因为第一种方式的话,需要额外配置 Redis 和 PostgreSQL 以及 NFS 服务,所以我们下面使用第二种方式进行 Harbor 高可用配置。
|
||||
|
||||
# Harbor镜像仓库高可用方案设计
|
||||
|
||||
采用2台harbor仓库互为主备的方案,如下图所示
|
||||
|
||||

|
||||
|
||||
注意:主备方案
|
||||
|
||||
由于VIP的浮动,主备节点其实是互为主备;在部署harbor时,需要注意主备节点上的harbor.yml中hostname不要配置为浮动IP(reg.harbor.4a或192.168.10.200),应配置为各自IP或者hostname;
|
||||
早先,将VIP的域名reg.harbor.4a配置到190和191上的harbor.yml中(hostname: reg.harbor.4a)导致一个问题:只有主节点可做为target被添加,用作镜像同步(也就是无法在主节点的仓库管理中创建备节点的target,即便添加了也无法连通)。
|
||||
|
||||
准备文件(在源码的script文件夹下,keepalived镜像可以从互联网阿里云镜像仓库下载)如下
|
||||
|
||||
```
|
||||
# tree .
|
||||
.
|
||||
├── harborCreateRegistriesAndReplications.sh
|
||||
├── keepalived21.tar
|
||||
├── kk
|
||||
└── harbor_keepalived
|
||||
├── check_harbor.sh
|
||||
├── docker-compose-keepalived-backup.yaml
|
||||
├── docker-compose-keepalived-master.yaml
|
||||
├── keepalived-backup.conf
|
||||
└── keepalived-master.conf
|
||||
|
||||
1 directory, 8 files
|
||||
```
|
||||
|
||||
kk: kubekey支持多节点harbor仓库代码(包含本pr)编译生成二进制文件
|
||||
|
||||
harborCreateRegistriesAndReplications.sh:配置harbor互为主备的脚本
|
||||
|
||||
keepalived21.tar:keepalived的docker镜像
|
||||
|
||||
harbor_keepalived:keepalived master和slave的docker-compose部署文件
|
||||
|
||||
## kubekey部署多节点harbor仓库
|
||||
|
||||
通过二次开发kubekey源码,实现了kubekey部署harbor仓库支持多节点,并且配置同一套harbor证书。证书中包含所有部署harbor节点的主机名和IP认证设置。
|
||||
|
||||
后续集成到一键部署脚本中,通过配置registry角色的多个节点来部署多harbor仓库。推荐2个harbor仓库,部署过多占用资源。
|
||||
|
||||
## harbor仓库互为主备设置
|
||||
|
||||
harbor仓库部署后,通过调用harbor仓库api建立备份仓库,建立备份规则。
|
||||
|
||||
例如master1节点上仓库和master2节点仓库配置如下
|
||||
|
||||
```
|
||||
#!/bin/bash
|
||||
|
||||
Harbor_master1_Address=master1:7443
|
||||
master1_Address=192.168.122.61
|
||||
Harbor_master2_Address=master2:7443
|
||||
master2_Address=192.168.122.62
|
||||
|
||||
Harbor_User=admin #登录Harbor的用户
|
||||
Harbor_Passwd="Harbor12345" #登录Harbor的用户密码
|
||||
Harbor_UserPwd="$Harbor_User:$Harbor_Passwd"
|
||||
|
||||
# create registry
|
||||
curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master1_Address}/api/v2.0/registries" -d "{\"name\": \"master1_2_master2\", \"type\": \"harbor\", \"url\":\"https://${master2_Address}:7443\", \"credential\": {\"access_key\": \"${Harbor_User}\", \"access_secret\": \"${Harbor_Passwd}\"}, \"insecure\": true}"
|
||||
# create registry
|
||||
curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master2_Address}/api/v2.0/registries" -d "{\"name\": \"master2_2_master1\", \"type\": \"harbor\", \"url\":\"https://${master1_Address}:7443\", \"credential\": {\"access_key\": \"${Harbor_User}\", \"access_secret\": \"${Harbor_Passwd}\"}, \"insecure\": true}"
|
||||
|
||||
#createReplication
|
||||
curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master1_Address}/api/v2.0/replication/policies" -d "{\"name\": \"master1_2_master2\", \"enabled\": true, \"deletion\":true, \"override\":true, \"replicate_deletion\":true, \"dest_registry\":{ \"id\": 1, \"name\": \"master1_2_master2\"}, \"trigger\": {\"type\": \"event_based\"}, \"dest_namespace_replace_count\":1 }"
|
||||
|
||||
#createReplication
|
||||
curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master2_Address}/api/v2.0/replication/policies" -d "{\"name\": \"master2_2_master1\", \"enabled\": true, \"deletion\":true, \"override\":true, \"replicate_deletion\":true, \"dest_registry\":{ \"id\": 1, \"name\": \"master2_2_master1\"}, \"trigger\": {\"type\": \"event_based\"}, \"dest_namespace_replace_count\":1 }"
|
||||
```
|
||||
|
||||
## keepalived管理harbor服务VIP
|
||||
|
||||
使用docker-compose管理keepalived服务
|
||||
|
||||
keepalived master服务器配置如下
|
||||
|
||||
```
|
||||
# cat docker-compose-keepalived-master.yaml
|
||||
version: '3.8'
|
||||
|
||||
# Docker-Compose 单容器使用参考 YAML 配置文件
|
||||
# 更多配置参数请参考镜像 README.md 文档中说明
|
||||
services:
|
||||
keepalived:
|
||||
image: 'dockerhub.kubekey.local/kubesphere/keepalived:2.1'
|
||||
privileged: true
|
||||
network_mode: host
|
||||
volumes:
|
||||
- ./keepalived-master.conf:/srv/conf/keepalived/keepalived.conf
|
||||
- ./check_harbor.sh:/srv/conf/keepalived/check_harbor.sh
|
||||
container_name: keepalived
|
||||
restart: on-failure
|
||||
|
||||
# cat keepalived-master.conf
|
||||
vrrp_script check_harbor {
|
||||
script "/srv/conf/keepalived/check_harbor.sh"
|
||||
interval 10 # 间隔时间,单位为秒,默认1秒
|
||||
fall 2 # 脚本几次失败转换为失败
|
||||
rise 2 # 脚本连续监测成功后,把服务器从失败标记为成功的次数
|
||||
timeout 5
|
||||
init_fail
|
||||
}
|
||||
global_defs {
|
||||
script_user root
|
||||
router_id harbor-ha
|
||||
enable_script_security
|
||||
lvs_sync_daemon ens3 VI_1
|
||||
}
|
||||
vrrp_instance VI_1 {
|
||||
state MASTER
|
||||
interface ens3
|
||||
virtual_router_id 31 # 如果同一个局域网中有多套keepalive,那么要保证该id唯一
|
||||
priority 100
|
||||
advert_int 1
|
||||
authentication {
|
||||
auth_type PASS
|
||||
auth_pass k8s-test
|
||||
}
|
||||
virtual_ipaddress {
|
||||
192.168.122.59
|
||||
}
|
||||
track_script {
|
||||
check_harbor
|
||||
}
|
||||
}
|
||||
# cat check_harbor.sh
|
||||
#!/bin/bash
|
||||
#count=$(docker-compose -f /opt/harbor/docker-compose.yml ps -a|grep healthy|wc -l)
|
||||
# 不能频繁调用docker-compose 否则会有非常多的临时目录被创建:/tmp/_MEI*
|
||||
count=$(docker ps |grep goharbor|grep healthy|wc -l)
|
||||
status=$(ss -tlnp|grep -w 443|wc -l)
|
||||
if [ $count -ne 11 -a ];then
|
||||
exit 8
|
||||
elif [ $status -lt 2 ];then
|
||||
exit 9
|
||||
else
|
||||
exit 0
|
||||
fi
|
||||
```
|
||||
|
||||
keepalived slave服务器跟master区别配置如下
|
||||
|
||||
1、state BACKUP 与 MASTER
|
||||
|
||||
2、priority master配置为100,slave设置为50
|
||||
|
||||
```
|
||||
# cat docker-compose-keepalived-backup.yaml
|
||||
version: '3.8'
|
||||
|
||||
# Docker-Compose 单容器使用参考 YAML 配置文件
|
||||
# 更多配置参数请参考镜像 README.md 文档中说明
|
||||
services:
|
||||
keepalived:
|
||||
image: 'dockerhub.kubekey.local/kubesphere/keepalived:2.1'
|
||||
privileged: true
|
||||
network_mode: host
|
||||
volumes:
|
||||
- ./keepalived-backup.conf:/srv/conf/keepalived/keepalived.conf
|
||||
- ./check_harbor.sh:/srv/conf/keepalived/check_harbor.sh
|
||||
container_name: keepalived
|
||||
restart: on-failure
|
||||
|
||||
# cat keepalived-backup.conf
|
||||
vrrp_script check_harbor {
|
||||
script "/srv/conf/keepalived/check_harbor.sh"
|
||||
interval 10 # 间隔时间,单位为秒,默认1秒
|
||||
fall 2 # 脚本几次失败转换为失败
|
||||
rise 2 # 脚本连续监测成功后,把服务器从失败标记为成功的次数
|
||||
timeout 5
|
||||
init_fail
|
||||
}
|
||||
global_defs {
|
||||
script_user root
|
||||
router_id harbor-ha
|
||||
enable_script_security
|
||||
lvs_sync_daemon ens3 VI_1
|
||||
}
|
||||
vrrp_instance VI_1 {
|
||||
state BACKUP
|
||||
interface ens3
|
||||
virtual_router_id 31 # 如果同一个局域网中有多套keepalive,那么要保证该id唯一
|
||||
priority 50
|
||||
advert_int 1
|
||||
authentication {
|
||||
auth_type PASS
|
||||
auth_pass k8s-test
|
||||
}
|
||||
virtual_ipaddress {
|
||||
192.168.122.59
|
||||
}
|
||||
track_script {
|
||||
check_harbor
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
经常需要变动的参数是设置keepalived的interface和vip地址值,实际环境下可以参数化keepalived这2个值。
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 33 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 34 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 42 KiB |
|
|
@ -91,6 +91,7 @@
|
|||
| v1.25.11 | :white_check_mark: |
|
||||
| v1.25.12 | :white_check_mark: |
|
||||
| v1.25.13 | :white_check_mark: |
|
||||
| v1.25.14 | :white_check_mark: |
|
||||
| v1.26.0 | :white_check_mark: |
|
||||
| v1.26.1 | :white_check_mark: |
|
||||
| v1.26.2 | :white_check_mark: |
|
||||
|
|
@ -100,11 +101,14 @@
|
|||
| v1.26.6 | :white_check_mark: |
|
||||
| v1.26.7 | :white_check_mark: |
|
||||
| v1.26.8 | :white_check_mark: |
|
||||
| v1.26.9 | :white_check_mark: |
|
||||
| v1.27.0 | :white_check_mark: |
|
||||
| v1.27.1 | :white_check_mark: |
|
||||
| v1.27.2 | :white_check_mark: |
|
||||
| v1.27.3 | :white_check_mark: |
|
||||
| v1.27.4 | :white_check_mark: |
|
||||
| v1.27.5 | :white_check_mark: |
|
||||
| v1.27.6 | :white_check_mark: |
|
||||
| v1.28.0 | :white_check_mark: |
|
||||
| v1.28.1 | :white_check_mark: |
|
||||
| v1.28.2 | :white_check_mark: |
|
||||
|
|
|
|||
|
|
@ -0,0 +1,64 @@
|
|||
#!/bin/bash
|
||||
|
||||
|
||||
function createRegistries() {
|
||||
|
||||
# create registry
|
||||
curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master1_Address}/api/v2.0/registries" -d "{\"name\": \"master1_2_master2\", \"type\": \"harbor\", \"url\":\"https://${master2_Address}:7443\", \"credential\": {\"access_key\": \"${Harbor_User}\", \"access_secret\": \"${Harbor_Passwd}\"}, \"insecure\": true}"
|
||||
# create registry
|
||||
curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master1_Address}/api/v2.0/registries" -d "{\"name\": \"master1_2_master3\", \"type\": \"harbor\", \"url\":\"https://${master3_Address}:7443\", \"credential\": {\"access_key\": \"${Harbor_User}\", \"access_secret\": \"${Harbor_Passwd}\"}, \"insecure\": true}"
|
||||
|
||||
# create registry
|
||||
curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master2_Address}/api/v2.0/registries" -d "{\"name\": \"master2_2_master1\", \"type\": \"harbor\", \"url\":\"https://${master1_Address}:7443\", \"credential\": {\"access_key\": \"${Harbor_User}\", \"access_secret\": \"${Harbor_Passwd}\"}, \"insecure\": true}"
|
||||
# create registry
|
||||
curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master2_Address}/api/v2.0/registries" -d "{\"name\": \"master2_2_master3\", \"type\": \"harbor\", \"url\":\"https://${master3_Address}:7443\", \"credential\": {\"access_key\": \"${Harbor_User}\", \"access_secret\": \"${Harbor_Passwd}\"}, \"insecure\": true}"
|
||||
|
||||
# create registry
|
||||
curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master3_Address}/api/v2.0/registries" -d "{\"name\": \"master3_2_master1\", \"type\": \"harbor\", \"url\":\"https://${master1_Address}:7443\", \"credential\": {\"access_key\": \"${Harbor_User}\", \"access_secret\": \"${Harbor_Passwd}\"}, \"insecure\": true}"
|
||||
# create registry
|
||||
curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master3_Address}/api/v2.0/registries" -d "{\"name\": \"master3_2_master2\", \"type\": \"harbor\", \"url\":\"https://${master2_Address}:7443\", \"credential\": {\"access_key\": \"${Harbor_User}\", \"access_secret\": \"${Harbor_Passwd}\"}, \"insecure\": true}"
|
||||
|
||||
}
|
||||
|
||||
function listRegistries() {
|
||||
curl -k -u $Harbor_UserPwd -X GET -H "Content-Type: application/json" "https://${Harbor_master1_Address}/api/v2.0/registries"
|
||||
curl -k -u $Harbor_UserPwd -X GET -H "Content-Type: application/json" "https://${Harbor_master2_Address}/api/v2.0/registries"
|
||||
curl -k -u $Harbor_UserPwd -X GET -H "Content-Type: application/json" "https://${Harbor_master3_Address}/api/v2.0/registries"
|
||||
|
||||
}
|
||||
|
||||
function createReplication() {
|
||||
|
||||
curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master1_Address}/api/v2.0/replication/policies" -d "{\"name\": \"master1_2_master2\", \"enabled\": true, \"deletion\":true, \"override\":true, \"replicate_deletion\":true, \"dest_registry\":{ \"id\": 1, \"name\": \"master1_2_master2\"}, \"trigger\": {\"type\": \"event_based\"}, \"dest_namespace_replace_count\":1 }"
|
||||
curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master1_Address}/api/v2.0/replication/policies" -d "{\"name\": \"master1_2_master3\", \"enabled\": true, \"deletion\":true, \"override\":true, \"replicate_deletion\":true, \"dest_registry\":{ \"id\": 2, \"name\": \"master1_2_master3\"}, \"trigger\": {\"type\": \"event_based\"}, \"dest_namespace_replace_count\":1 }"
|
||||
|
||||
curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master2_Address}/api/v2.0/replication/policies" -d "{\"name\": \"master2_2_master1\", \"enabled\": true, \"deletion\":true, \"override\":true, \"replicate_deletion\":true, \"dest_registry\":{ \"id\": 1, \"name\": \"master2_2_master1\"}, \"trigger\": {\"type\": \"event_based\"}, \"dest_namespace_replace_count\":1 }"
|
||||
curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master2_Address}/api/v2.0/replication/policies" -d "{\"name\": \"master2_2_master3\", \"enabled\": true, \"deletion\":true, \"override\":true, \"replicate_deletion\":true, \"dest_registry\":{ \"id\": 2, \"name\": \"master2_2_master3\"}, \"trigger\": {\"type\": \"event_based\"}, \"dest_namespace_replace_count\":1 }"
|
||||
|
||||
curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master3_Address}/api/v2.0/replication/policies" -d "{\"name\": \"master3_2_master1\", \"enabled\": true, \"deletion\":true, \"override\":true, \"replicate_deletion\":true, \"dest_registry\":{ \"id\": 1, \"name\": \"master3_2_master1\"}, \"trigger\": {\"type\": \"event_based\"}, \"dest_namespace_replace_count\":1 }"
|
||||
curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master3_Address}/api/v2.0/replication/policies" -d "{\"name\": \"master3_2_master2\", \"enabled\": true, \"deletion\":true, \"override\":true, \"replicate_deletion\":true, \"dest_registry\":{ \"id\": 2, \"name\": \"master3_2_master2\"}, \"trigger\": {\"type\": \"event_based\"}, \"dest_namespace_replace_count\":1 }"
|
||||
}
|
||||
|
||||
function listReplications() {
|
||||
|
||||
curl -k -u $Harbor_UserPwd -X GET -H "Content-Type: application/json" "https://${Harbor_master1_Address}/api/v2.0/replication/policies"
|
||||
curl -k -u $Harbor_UserPwd -X GET -H "Content-Type: application/json" "https://${Harbor_master2_Address}/api/v2.0/replication/policies"
|
||||
curl -k -u $Harbor_UserPwd -X GET -H "Content-Type: application/json" "https://${Harbor_master3_Address}/api/v2.0/replication/policies"
|
||||
}
|
||||
|
||||
#### main ######
|
||||
Harbor_master1_Address=master1:7443
|
||||
master1_Address=192.168.122.61
|
||||
Harbor_master2_Address=master2:7443
|
||||
master2_Address=192.168.122.62
|
||||
Harbor_master3_Address=master3:7443
|
||||
master3_Address=192.168.122.63
|
||||
Harbor_User=admin #登录Harbor的用户
|
||||
Harbor_Passwd="Harbor12345" #登录Harbor的用户密码
|
||||
Harbor_UserPwd="$Harbor_User:$Harbor_Passwd"
|
||||
|
||||
|
||||
createRegistries
|
||||
listRegistries
|
||||
createReplication
|
||||
listReplications
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
#!/bin/bash
|
||||
#count=$(docker-compose -f /opt/harbor/docker-compose.yml ps -a|grep healthy|wc -l)
|
||||
# 不能频繁调用docker-compose 否则会有非常多的临时目录被创建:/tmp/_MEI*
|
||||
count=$(docker ps |grep goharbor|grep healthy|wc -l)
|
||||
status=$(ss -tlnp|grep -w 443|wc -l)
|
||||
if [ $count -ne 11 -a ];then
|
||||
exit 8
|
||||
elif [ $status -lt 2 ];then
|
||||
exit 9
|
||||
else
|
||||
exit 0
|
||||
fi
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
version: '3.8'
|
||||
|
||||
# Docker-Compose 单容器使用参考 YAML 配置文件
|
||||
# 更多配置参数请参考镜像 README.md 文档中说明
|
||||
services:
|
||||
keepalived:
|
||||
image: 'registry.cn-shenzhen.aliyuncs.com/colovu/keepalived:2.1'
|
||||
privileged: true
|
||||
network_mode: host
|
||||
volumes:
|
||||
- ./keepalived-backup.conf:/srv/conf/keepalived/keepalived.conf
|
||||
- ./check_harbor.sh:/srv/conf/keepalived/check_harbor.sh
|
||||
container_name: keepalived
|
||||
restart: on-failure
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
version: '3.8'
|
||||
|
||||
# Docker-Compose 单容器使用参考 YAML 配置文件
|
||||
# 更多配置参数请参考镜像 README.md 文档中说明
|
||||
services:
|
||||
keepalived:
|
||||
image: 'registry.cn-shenzhen.aliyuncs.com/colovu/keepalived:2.1'
|
||||
privileged: true
|
||||
network_mode: host
|
||||
volumes:
|
||||
- ./keepalived-master.conf:/srv/conf/keepalived/keepalived.conf
|
||||
- ./check_harbor.sh:/srv/conf/keepalived/check_harbor.sh
|
||||
container_name: keepalived
|
||||
restart: on-failure
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
vrrp_script check_harbor {
|
||||
script "/srv/conf/keepalived/check_harbor.sh"
|
||||
interval 10 # 间隔时间,单位为秒,默认1秒
|
||||
fall 2 # 脚本几次失败转换为失败
|
||||
rise 2 # 脚本连续监测成功后,把服务器从失败标记为成功的次数
|
||||
timeout 5
|
||||
init_fail
|
||||
}
|
||||
global_defs {
|
||||
script_user root
|
||||
router_id harbor-ha
|
||||
enable_script_security
|
||||
lvs_sync_daemon ens3 VI_1
|
||||
}
|
||||
vrrp_instance VI_1 {
|
||||
state BACKUP
|
||||
interface ens3
|
||||
virtual_router_id 31 # 如果同一个局域网中有多套keepalive,那么要保证该id唯一
|
||||
priority 50
|
||||
advert_int 1
|
||||
authentication {
|
||||
auth_type PASS
|
||||
auth_pass k8s-test
|
||||
}
|
||||
virtual_ipaddress {
|
||||
192.168.122.59
|
||||
}
|
||||
track_script {
|
||||
check_harbor
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
vrrp_script check_harbor {
|
||||
script "/srv/conf/keepalived/check_harbor.sh"
|
||||
interval 10 # 间隔时间,单位为秒,默认1秒
|
||||
fall 2 # 脚本几次失败转换为失败
|
||||
rise 2 # 脚本连续监测成功后,把服务器从失败标记为成功的次数
|
||||
timeout 5
|
||||
init_fail
|
||||
}
|
||||
global_defs {
|
||||
script_user root
|
||||
router_id harbor-ha
|
||||
enable_script_security
|
||||
lvs_sync_daemon ens3 VI_1
|
||||
}
|
||||
vrrp_instance VI_1 {
|
||||
state MASTER
|
||||
interface ens3
|
||||
virtual_router_id 31 # 如果同一个局域网中有多套keepalive,那么要保证该id唯一
|
||||
priority 100
|
||||
advert_int 1
|
||||
authentication {
|
||||
auth_type PASS
|
||||
auth_pass k8s-test
|
||||
}
|
||||
virtual_ipaddress {
|
||||
192.168.122.59
|
||||
}
|
||||
track_script {
|
||||
check_harbor
|
||||
}
|
||||
}
|
||||
|
|
@ -91,6 +91,7 @@
|
|||
"v1.25.11": "6ff43cc8266a21c7b62878a0a9507b085bbb079a37b095fab5bcd31f2dbd80e0",
|
||||
"v1.25.12": "293252f0a1727bfad4ef4fe99d704a56ecea45e39b0ea77f629c55da39e377da",
|
||||
"v1.25.13": "4694df9c5d700280c186980907ec8e695364f461b20e868336a71edabac2253e",
|
||||
"v1.25.14": "6cce9224e8b939bb0c218ab1b047a934a8c2b23f07c7ade4586b5e1a4013c80f",
|
||||
"v1.26.0": "72631449f26b7203701a1b99f6914f31859583a0e247c3ac0f6aaf59ca80af19",
|
||||
"v1.26.1": "1531abfe96e2e9d8af9219192c65d04df8507a46a081ae1e101478e95d2b63da",
|
||||
"v1.26.2": "277d880dc6d79994fd333e49d42943b7c9183b1c4ffdbf9da59f806acec7fd82",
|
||||
|
|
@ -100,14 +101,17 @@
|
|||
"v1.26.6": "ba699c3c26aaf64ef46d34621de9f3b62e37656943e09f23dc3bf5aa7b3f5094",
|
||||
"v1.26.7": "812e6d0e94a3fc77d3e9d09dbe709190b77408936cc4e960d916e8401be11090",
|
||||
"v1.26.8": "233a89277ca49dbd666b7391c6c0e43c33d2f08052d5b93e9cd0100ee69430c8",
|
||||
"v1.26.9": "73e128821dd1f799a75c922218d12f6c4618b8e29cc7dae2a7390fb80092d3d9",
|
||||
"v1.27.0": "78d0e04705a7bdb76a514d60f60c073b16334b15f57ee87f064354ca8a233e80",
|
||||
"v1.27.1": "c7d32d698e99b90f877025104cb4a9f3f8c707e99e6817940f260135b6d1ad0a",
|
||||
"v1.27.2": "95c4bfb7929900506a42de4d92280f06efe6b47e0a32cbc1f5a1ed737592977a",
|
||||
"v1.27.3": "2cd663f25c2490bd614a6c0ad9089a47ef315caf0dbdf78efd787d5653b1c6e3",
|
||||
"v1.27.4": "7be21d6fb3707fbbe8f0db0403db6234c8af773b941f931bf8248759ee988bcd",
|
||||
"v1.27.5": "35df8efa6e1bc864ed3c48a665caed634a5c46cfd7f41cda5ad66defdfddb2aa",
|
||||
"v1.27.6": "2bcdd68957ec25d0689bb56f32b4ec86e38463d2691d5ea21cd109c7afa3aa7c",
|
||||
"v1.28.0": "12ea68bfef0377ccedc1a7c98a05ea76907decbcf1e1ec858a60a7b9b73211bb",
|
||||
"v1.28.1": "6134dbc92dcb83c3bae1a8030f7bb391419b5d13ea94badd3a79b7ece75b2736"
|
||||
"v1.28.1": "6134dbc92dcb83c3bae1a8030f7bb391419b5d13ea94badd3a79b7ece75b2736",
|
||||
"v1.28.2": "6a4808230661c69431143db2e200ea2d021c7f1b1085e6353583075471310d00"
|
||||
},
|
||||
"arm64": {
|
||||
"v1.19.0": "db1c432646e6e6484989b6f7191f3610996ac593409f12574290bfc008ea11f5",
|
||||
|
|
@ -200,6 +204,7 @@
|
|||
"v1.25.11": "570d87d56a24778bd0854270eeddc8bcfb275f1c711cced5b5948f631e0c3ede",
|
||||
"v1.25.12": "6a22e2e830f9df16a96a1ac5a4034b950b89a0cc90b19dc1fb104b268e4cd251",
|
||||
"v1.25.13": "d5380bd3f0562aee30d888f22b5650c7af54da83d9fe5187821bcedf21885a11",
|
||||
"v1.25.14": "525181225d963ddbc17765587a7b5919aa68d7264a197f6a1359f32e7f4a2e03",
|
||||
"v1.26.0": "652844c9518786273e094825b74a1988c871552dc6ccf71366558e67409859d1",
|
||||
"v1.26.1": "db101c4bb8e33bd69241de227ed317feee6d44dbd674891e1b9e11c6e8b369bb",
|
||||
"v1.26.2": "f210d8617acf7c601196294f7ca97e4330b75dad00df6b8dd12393730c501473",
|
||||
|
|
@ -209,14 +214,17 @@
|
|||
"v1.26.6": "003c7740750ad92d2ff3d58d4a15015906c120c93c7aa605ba98edd936061542",
|
||||
"v1.26.7": "34192ceac2287029b36e2d6b682e55dee245ae622701dc3b36bd3203019b18d1",
|
||||
"v1.26.8": "f12d5d748abb8586723b78a2f0300f88faf0391f56d4d49f1ad1cef74160a1b5",
|
||||
"v1.26.9": "14c87cbb9a3fa02308a9546aad192ce2d93e5d1d0296d28ba449079e6a1cb2b2",
|
||||
"v1.27.0": "acd805c6783b678ee0068b9dd8165bbfd879c345fd9c25d6a978dbc965f48544",
|
||||
"v1.27.1": "024a59cd6fc76784b597c0c1cf300526e856e8c9fefa5fa7948158929b739551",
|
||||
"v1.27.2": "8f01f363f7c7f92de2f2276124a895503cdc5a60ff549440170880f296b087eb",
|
||||
"v1.27.3": "495e2193ed779d25584b4b532796c2270df0f7139ef15fb89dc7980603615ef4",
|
||||
"v1.27.4": "b4ede8a18ef3d1cfa61e6fbca8fcab02f8eee3d0770d2329490fa7be90a4cae4",
|
||||
"v1.27.5": "3023ef1d2eff885af860e13c8b9fcdb857d259728f16bf992d59c2be522cec82",
|
||||
"v1.27.6": "faec35315203913b835e9b789d89001a05e072943c960bcf4de1e331d08e10c8",
|
||||
"v1.28.0": "b9b473d2d9136559b19eb465006af77df45c09862cd7ce6673a33aae517ff5ab",
|
||||
"v1.28.1": "7d2f68917470a5d66bd2a7d62897f59cb4afaeffb2f26c028afa119acd8c3fc8"
|
||||
"v1.28.1": "7d2f68917470a5d66bd2a7d62897f59cb4afaeffb2f26c028afa119acd8c3fc8",
|
||||
"v1.28.2": "010789a94cf512d918ec4a3ef8ec734dea0061d89a8293059ef9101ca1bf6bff"
|
||||
}
|
||||
},
|
||||
"kubelet": {
|
||||
|
|
@ -311,6 +319,7 @@
|
|||
"v1.25.11": "4801700e29405e49a7e51cccb806decd65ca3a5068d459a40be3b4c5846b9a46",
|
||||
"v1.25.12": "7aa7d0b4512e6d79ada2017c054b07aaf30d4dc0d740449364a5e2c26e2c1842",
|
||||
"v1.25.13": "0399cfd7031cf5f3d7f8485b243a5ef37230e63d105d5f29966f0f81a58a8f6d",
|
||||
"v1.25.14": "b9d1dbd9e7c1d3bda6249f38d7cd4f63e4188fa31cddd80d5e8ac1ce3a9a4d96",
|
||||
"v1.26.0": "b64949fe696c77565edbe4100a315b6bf8f0e2325daeb762f7e865f16a6e54b5",
|
||||
"v1.26.1": "8b99dd73f309ca1ac4005db638e82f949ffcfb877a060089ec0e729503db8198",
|
||||
"v1.26.2": "e6dd2ee432a093492936ff8505f084b5ed41662f50231f1c11ae08ee8582a3f5",
|
||||
|
|
@ -320,14 +329,17 @@
|
|||
"v1.26.6": "da82477404414eb342d6b93533f372aa1c41956a57517453ef3d39ebbfdf8cc2",
|
||||
"v1.26.7": "2926ea2cd7fcd644d24a258bdf21e1a8cfd95412b1079914ca46466dae1d74f2",
|
||||
"v1.26.8": "1c68a65a6a0c2230325e29da0cc3eaaef9bbf688a7a0bb8243b4a7ebfe0e3363",
|
||||
"v1.26.9": "baa2b021ab2f90c342518e2b8981a18de7e1e6b33f11c57e3ff23d40364877a8",
|
||||
"v1.27.0": "0b4ed4fcd75d33f5dff3ba17776e6089847fc83064d3f7a3ad59a34e94e60a29",
|
||||
"v1.27.1": "cb2845fff0ce41c400489393da73925d28fbee54cfeb7834cd4d11e622cbd3a7",
|
||||
"v1.27.2": "a0d12afcab3b2836de4a427558d067bebdff040e9b306b0512c93d9d2a066579",
|
||||
"v1.27.3": "c0e18da6a55830cf4910ecd7261597c66ea3f8f58cf44d4adb6bdcb6e2e6f0bf",
|
||||
"v1.27.4": "385f65878dc8b48df0f2bd369535ff273390518b5ac2cc1a1684d65619324704",
|
||||
"v1.27.5": "66df07ab4f9d72028c97ec7e5eea23adc0ab62a209ba2285431456d7d75a5bb3",
|
||||
"v1.27.6": "daa42f9b6f5e2176bbce0d24d89a05613000630bcddec1fafd2a8d42a523ce9d",
|
||||
"v1.28.0": "bfb6b977100963f2879a33e5fbaa59a5276ba829a957a6819c936e9c1465f981",
|
||||
"v1.28.1": "2bc22332f44f8fcd3fce57879fd873f977949ebd261571fbae31fbb2713a5dd3"
|
||||
"v1.28.1": "2bc22332f44f8fcd3fce57879fd873f977949ebd261571fbae31fbb2713a5dd3",
|
||||
"v1.28.2": "17edb866636f14eceaad58c56eab12af7ab3be3c78400aff9680635d927f1185"
|
||||
},
|
||||
"arm64": {
|
||||
"v1.19.0": "d8fa5a9739ecc387dfcc55afa91ac6f4b0ccd01f1423c423dbd312d787bbb6bf",
|
||||
|
|
@ -420,6 +432,7 @@
|
|||
"v1.25.11": "0140cf3aee0b9386fc8430c32bc94c169a6e50640947933733896e01490cbf6c",
|
||||
"v1.25.12": "3402d0fcec5105bb08b917bb5a29a979c674aa10a12a1dfe4e0d80b292f9fe56",
|
||||
"v1.25.13": "7a29aabb40a984f104b88c09312e897bb710e6bb68022537f8700e70971b984b",
|
||||
"v1.25.14": "3a3d4ac26b26baef43188a6f52d40a20043db3ffdbcbefab8be222b58ce0f713",
|
||||
"v1.26.0": "fb033c1d079cac8babb04a25abecbc6cc1a2afb53f56ef1d73f8dc3b15b3c09e",
|
||||
"v1.26.1": "f4b514162b52d19909cf0ddf0b816d8d7751c5f1de60eda90cd84dcccc56c399",
|
||||
"v1.26.2": "33e77f93d141d3b9e207ae50ff050186dea084ac26f9ec88280f85bab9dad310",
|
||||
|
|
@ -429,14 +442,17 @@
|
|||
"v1.26.6": "44c2cd64e1317df8252bca1cf196227c543005a3b10d52fb114401cb1617f32f",
|
||||
"v1.26.7": "73e086cfd8cd1cef559e739e19aff2932f8a9e0bdba3c9faeb9185a86d067fbb",
|
||||
"v1.26.8": "0f15e484c4a7a7c3bad9e0aa4d4334ca029b97513fbe03f053201dd937cf316e",
|
||||
"v1.26.9": "f6b1dcee9960ffe6b778dc91cabef8ce4a7bd06c76378ef2784232709eace6a5",
|
||||
"v1.27.0": "37aa2edc7c0c4b3e488518c6a4b44c8aade75a55010534ee2be291220c73d157",
|
||||
"v1.27.1": "dbb09d297d924575654db38ed2fc627e35913c2d4000c34613ac6de4995457d0",
|
||||
"v1.27.2": "810cd9a611e9f084e57c9ee466e33c324b2228d4249ff38c2588a0cc3224f10d",
|
||||
"v1.27.3": "2838fd55340d59f777d7bd7e5989fc72b7a0ca198cf4f3f723cd9956859ce942",
|
||||
"v1.27.4": "c75ad8e7c7ef05c0c021b21a9fe86e92f64db1e4c1bc84e1baf45d8dbb8ba8d1",
|
||||
"v1.27.5": "4e78fafdeb5d61ab6ebcd6e75e968c47001c321bec169bb9bd9f001132de5321",
|
||||
"v1.27.6": "be579ef4e8fa3e1de9d40a77e4d35d99e535a293f66bf3038cbea9cf803d11e5",
|
||||
"v1.28.0": "05dd12e35783cab4960e885ec0e7d0e461989b94297e7bea9018ccbd15c4dce9",
|
||||
"v1.28.1": "9b7fa64b2785da4a38768377961e227f8da629c56a5df43ca1b665dd07b56f3c"
|
||||
"v1.28.1": "9b7fa64b2785da4a38768377961e227f8da629c56a5df43ca1b665dd07b56f3c",
|
||||
"v1.28.2": "32269e9ec38c561d028b65c3048ea6a100e1292cbe9e505565222455c8096577"
|
||||
}
|
||||
},
|
||||
"kubectl": {
|
||||
|
|
@ -531,6 +547,7 @@
|
|||
"v1.25.11": "d12bc7d26313546827683ff7b79d0cb2e7ac17cdad4dce138ed518e478b148a7",
|
||||
"v1.25.12": "75842752ea07cb8ee2210df40faa7c61e1317e76d5c7968e380cae83447d4a0f",
|
||||
"v1.25.13": "22c5d5cb95b671ea7d7accd77e60e4a787b6d40a6b8ba4d6c364cb3ca818c29a",
|
||||
"v1.25.14": "06351e043b8ecd1206854643a2094ccf218180c1b3fab5243f78d2ccfc630ca2",
|
||||
"v1.26.0": "b6769d8ac6a0ed0f13b307d289dc092ad86180b08f5b5044af152808c04950ae",
|
||||
"v1.26.1": "d57be22cfa25f7427cfb538cfc8853d763878f8b36c76ce93830f6f2d67c6e5d",
|
||||
"v1.26.2": "fcf86d21fb1a49b012bce7845cf00081d2dd7a59f424b28621799deceb5227b3",
|
||||
|
|
@ -540,14 +557,17 @@
|
|||
"v1.26.6": "ee23a539b5600bba9d6a404c6d4ea02af3abee92ad572f1b003d6f5a30c6f8ab",
|
||||
"v1.26.7": "d9dc7741e5f279c28ef32fbbe1daa8ebc36622391c33470efed5eb8426959971",
|
||||
"v1.26.8": "d8e0dba258d1096f95bb6746ca359db2ee8abe226e777f89dc8a5d1bb76795aa",
|
||||
"v1.26.9": "98ea4a13895e54ba24f57e0d369ff6be0d3906895305d5390197069b1da12ae2",
|
||||
"v1.27.0": "71a78259d70da9c5540c4cf4cff121f443e863376f68f89a759d90cef3f51e87",
|
||||
"v1.27.1": "7fe3a762d926fb068bae32c399880e946e8caf3d903078bea9b169dcd5c17f6d",
|
||||
"v1.27.2": "4f38ee903f35b300d3b005a9c6bfb9a46a57f92e89ae602ef9c129b91dc6c5a5",
|
||||
"v1.27.3": "fba6c062e754a120bc8105cde1344de200452fe014a8759e06e4eec7ed258a09",
|
||||
"v1.27.4": "4685bfcf732260f72fce58379e812e091557ef1dfc1bc8084226c7891dd6028f",
|
||||
"v1.27.5": "9a091fb65e4cf4e8be3ce9a21c79210177dd7ce31a2998ec638c92f37f058bcd",
|
||||
"v1.27.6": "2b7adb71c8630904da1b94e262c8c3c477e9609b3c0ed8ae1213a1e156ae38dd",
|
||||
"v1.28.0": "4717660fd1466ec72d59000bb1d9f5cdc91fac31d491043ca62b34398e0799ce",
|
||||
"v1.28.1": "e7a7d6f9d06fab38b4128785aa80f65c54f6675a0d2abef655259ddd852274e1"
|
||||
"v1.28.1": "e7a7d6f9d06fab38b4128785aa80f65c54f6675a0d2abef655259ddd852274e1",
|
||||
"v1.28.2": "c922440b043e5de1afa3c1382f8c663a25f055978cbc6e8423493ec157579ec5"
|
||||
},
|
||||
"arm64": {
|
||||
"v1.19.0": "d4adf1b6b97252025cb2f7febf55daa3f42dc305822e3da133f77fd33071ec2f",
|
||||
|
|
@ -640,6 +660,7 @@
|
|||
"v1.25.11": "2eb5109735c1442dd3b91a15ff74e24748efd967a3d7bf1a2b16e7aa78400677",
|
||||
"v1.25.12": "315a1515b7fe254d7aa4f5928007b4f4e586bfd91ea6cbf392718099920dcb8a",
|
||||
"v1.25.13": "90bb3c9126b64f5eee2bef5a584da8bf0a38334e341b427b6986261af5f0d49b",
|
||||
"v1.25.14": "a52ec9119e390ad872a74fc560a6569b1758a4217fd2b03e966f77aaa2a2b706",
|
||||
"v1.26.0": "79b14e4ddada9e81d2989f36a89faa9e56f8abe6e0246e7bdc305c93c3731ea4",
|
||||
"v1.26.1": "4027cb0a2840bc14ec3f18151b3360dd2d1f6ce730ed5ac28bd846c17e7d73f5",
|
||||
"v1.26.2": "291e85bef77e8440205c873686e9938d7f87c0534e9a491de64e3cc0584295b6",
|
||||
|
|
@ -649,14 +670,17 @@
|
|||
"v1.26.6": "8261d35cd374c438104bb5257e6c9dafb8443cd0eed8272b219ec5aa17b8ca40",
|
||||
"v1.26.7": "71edc4c6838a7332e5f82abb35642ce7f905059a258690b0a585d3ed6de285b3",
|
||||
"v1.26.8": "e93f836cba409b5ef5341020d9501067a51bf8210cb35649518e5f4d114244cf",
|
||||
"v1.26.9": "f945c63220b393ddf8df67d87e67ff74b7f56219a670dee38bc597a078588e90",
|
||||
"v1.27.0": "f8e09630211f2b7c6a8cc38835e7dea94708d401f5c84b23a37c70c604602ddc",
|
||||
"v1.27.1": "fd3cb8f16e6ed8aee9955b76e3027ac423b6d1cc7356867310d128082e2db916",
|
||||
"v1.27.2": "1b0966692e398efe71fe59f913eaec44ffd4468cc1acd00bf91c29fa8ff8f578",
|
||||
"v1.27.3": "7bb7fec4e28e0b50b603d64e47629e812408751bd1e0ce059b2fee83b0e3ff6f",
|
||||
"v1.27.4": "5178cbb51dcfff286c20bc847d64dd35cd5993b81a2e3609581377a520a6425d",
|
||||
"v1.27.5": "0158955c59c775165937918f910380ed7b52fca4a26fb41a369734e83aa44874",
|
||||
"v1.27.6": "7322a6f600de6d0d06cf333bdc24cd2a340bba12920b0c2385c97884c808c810",
|
||||
"v1.28.0": "f5484bd9cac66b183c653abed30226b561f537d15346c605cc81d98095f1717c",
|
||||
"v1.28.1": "46954a604b784a8b0dc16754cfc3fa26aabca9fd4ffd109cd028bfba99d492f6"
|
||||
"v1.28.1": "46954a604b784a8b0dc16754cfc3fa26aabca9fd4ffd109cd028bfba99d492f6",
|
||||
"v1.28.2": "ea6d89b677a8d9df331a82139bb90d9968131530b94eab26cee561531eff4c53"
|
||||
}
|
||||
},
|
||||
"etcd": {
|
||||
|
|
@ -860,7 +884,8 @@
|
|||
"20.10.22": "945c3a3ddcb79ee7307496c2f39eb3d8372466e8654e63d60bbb462e4a3c1427",
|
||||
"20.10.23": "0ee39f72cc434137d294c14d30897826bad6e24979e421f51a252769ad37e6d1",
|
||||
"23.0.0": "6a03bbda96845b7451be2f6aba69c3816c60a97de318e83fd1b39d1be262d8af",
|
||||
"23.0.1": "ec8a71e79125d3ca76f7cc295f35eea225f4450e0ffe0775f103e2952ff580f6"
|
||||
"23.0.1": "ec8a71e79125d3ca76f7cc295f35eea225f4450e0ffe0775f103e2952ff580f6",
|
||||
"24.0.6": "99792dec613df93169a118b05312a722a63604b868e4c941b1b436abcf3bb70f"
|
||||
},
|
||||
"arm64": {
|
||||
"20.10.2": "9ea59f249ae92bbaa9831a22f2affa2edc9e824f9daaba831ca51d6d22ef2df5",
|
||||
|
|
@ -886,7 +911,8 @@
|
|||
"20.10.22": "2c75cd6c3dc9b81cb5bde664c882e4339a2054e09cf09606f9f7dd6970e7f078",
|
||||
"20.10.23": "5c40bb7dcd1aad94be49ad75d24e7fd409119ed0eaad04f5d13c4fddfb397c8a",
|
||||
"23.0.0": "2919ff3448187d4f13cfbe2332707cff3f6dcf2baaac42a34bea8dd21f434f4a",
|
||||
"23.0.1": "3865f837dbd951b19eeb5f7d87aada2e865b2017e9462fe389f0e5d9a438324d"
|
||||
"23.0.1": "3865f837dbd951b19eeb5f7d87aada2e865b2017e9462fe389f0e5d9a438324d",
|
||||
"24.0.6": "d9f58aecc42451503e82e6e0562cafa1812b334c92186a7f486e111e70a0f5bd"
|
||||
}
|
||||
},
|
||||
"containerd": {
|
||||
|
|
|
|||
Loading…
Reference in New Issue