mirror of
https://github.com/kubesphere/kubekey.git
synced 2025-12-26 01:22:51 +00:00
Merge branch 'master' of https://github.com/wenwenxiong/kubekey
This commit is contained in:
commit
3d0c1b62ca
|
|
@ -705,6 +705,34 @@
|
|||
"code",
|
||||
"doc"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "testwill",
|
||||
"name": "guangwu",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/8717479?v=4",
|
||||
"profile": "https://github.com/testwill",
|
||||
"contributions": [
|
||||
"code",
|
||||
"doc"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "wongearl",
|
||||
"name": "wongearl",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/36498442?v=4",
|
||||
"profile": "https://github.com/wongearl",
|
||||
"contributions": [
|
||||
"code"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "wenwenxiong",
|
||||
"name": "wenwenxiong",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/10548812?v=4",
|
||||
"profile": "https://github.com/wenwenxiong",
|
||||
"contributions": [
|
||||
"code"
|
||||
]
|
||||
}
|
||||
],
|
||||
"contributorsPerLine": 7,
|
||||
|
|
|
|||
|
|
@ -104,6 +104,11 @@ Contributions of any kind are welcome! Thanks goes to these wonderful contributo
|
|||
<td align="center" valign="top" width="14.28%"><a href="https://kiragoo.github.io"><img src="https://avatars.githubusercontent.com/u/7400711?v=4?s=100" width="100px;" alt="kiragoo"/><br /><sub><b>kiragoo</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=kiragoo" title="Code">💻</a></td>
|
||||
<td align="center" valign="top" width="14.28%"><a href="https://github.com/jojotong"><img src="https://avatars.githubusercontent.com/u/100849526?v=4?s=100" width="100px;" alt="jojotong"/><br /><sub><b>jojotong</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=jojotong" title="Code">💻</a></td>
|
||||
<td align="center" valign="top" width="14.28%"><a href="https://github.com/littleBlackHouse"><img src="https://avatars.githubusercontent.com/u/54946465?v=4?s=100" width="100px;" alt="littleBlackHouse"/><br /><sub><b>littleBlackHouse</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=littleBlackHouse" title="Code">💻</a> <a href="https://github.com/kubesphere/kubekey/commits?author=littleBlackHouse" title="Documentation">📖</a></td>
|
||||
<td align="center" valign="top" width="14.28%"><a href="https://github.com/testwill"><img src="https://avatars.githubusercontent.com/u/8717479?v=4?s=100" width="100px;" alt="guangwu"/><br /><sub><b>guangwu</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=testwill" title="Code">💻</a> <a href="https://github.com/kubesphere/kubekey/commits?author=testwill" title="Documentation">📖</a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" valign="top" width="14.28%"><a href="https://github.com/wongearl"><img src="https://avatars.githubusercontent.com/u/36498442?v=4?s=100" width="100px;" alt="wongearl"/><br /><sub><b>wongearl</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=wongearl" title="Code">💻</a></td>
|
||||
<td align="center" valign="top" width="14.28%"><a href="https://github.com/wenwenxiong"><img src="https://avatars.githubusercontent.com/u/10548812?v=4?s=100" width="100px;" alt="wenwenxiong"/><br /><sub><b>wenwenxiong</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=wenwenxiong" title="Code">💻</a></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
|
|
|||
|
|
@ -388,6 +388,11 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
|
|||
<td align="center" valign="top" width="14.28%"><a href="https://kiragoo.github.io"><img src="https://avatars.githubusercontent.com/u/7400711?v=4?s=100" width="100px;" alt="kiragoo"/><br /><sub><b>kiragoo</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=kiragoo" title="Code">💻</a></td>
|
||||
<td align="center" valign="top" width="14.28%"><a href="https://github.com/jojotong"><img src="https://avatars.githubusercontent.com/u/100849526?v=4?s=100" width="100px;" alt="jojotong"/><br /><sub><b>jojotong</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=jojotong" title="Code">💻</a></td>
|
||||
<td align="center" valign="top" width="14.28%"><a href="https://github.com/littleBlackHouse"><img src="https://avatars.githubusercontent.com/u/54946465?v=4?s=100" width="100px;" alt="littleBlackHouse"/><br /><sub><b>littleBlackHouse</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=littleBlackHouse" title="Code">💻</a> <a href="https://github.com/kubesphere/kubekey/commits?author=littleBlackHouse" title="Documentation">📖</a></td>
|
||||
<td align="center" valign="top" width="14.28%"><a href="https://github.com/testwill"><img src="https://avatars.githubusercontent.com/u/8717479?v=4?s=100" width="100px;" alt="guangwu"/><br /><sub><b>guangwu</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=testwill" title="Code">💻</a> <a href="https://github.com/kubesphere/kubekey/commits?author=testwill" title="Documentation">📖</a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" valign="top" width="14.28%"><a href="https://github.com/wongearl"><img src="https://avatars.githubusercontent.com/u/36498442?v=4?s=100" width="100px;" alt="wongearl"/><br /><sub><b>wongearl</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=wongearl" title="Code">💻</a></td>
|
||||
<td align="center" valign="top" width="14.28%"><a href="https://github.com/wenwenxiong"><img src="https://avatars.githubusercontent.com/u/10548812?v=4?s=100" width="100px;" alt="wenwenxiong"/><br /><sub><b>wenwenxiong</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=wenwenxiong" title="Code">💻</a></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
|
|
|||
|
|
@ -405,6 +405,11 @@ kubectl completion bash >/etc/bash_completion.d/kubectl
|
|||
<td align="center" valign="top" width="14.28%"><a href="https://kiragoo.github.io"><img src="https://avatars.githubusercontent.com/u/7400711?v=4?s=100" width="100px;" alt="kiragoo"/><br /><sub><b>kiragoo</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=kiragoo" title="Code">💻</a></td>
|
||||
<td align="center" valign="top" width="14.28%"><a href="https://github.com/jojotong"><img src="https://avatars.githubusercontent.com/u/100849526?v=4?s=100" width="100px;" alt="jojotong"/><br /><sub><b>jojotong</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=jojotong" title="Code">💻</a></td>
|
||||
<td align="center" valign="top" width="14.28%"><a href="https://github.com/littleBlackHouse"><img src="https://avatars.githubusercontent.com/u/54946465?v=4?s=100" width="100px;" alt="littleBlackHouse"/><br /><sub><b>littleBlackHouse</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=littleBlackHouse" title="Code">💻</a> <a href="https://github.com/kubesphere/kubekey/commits?author=littleBlackHouse" title="Documentation">📖</a></td>
|
||||
<td align="center" valign="top" width="14.28%"><a href="https://github.com/testwill"><img src="https://avatars.githubusercontent.com/u/8717479?v=4?s=100" width="100px;" alt="guangwu"/><br /><sub><b>guangwu</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=testwill" title="Code">💻</a> <a href="https://github.com/kubesphere/kubekey/commits?author=testwill" title="Documentation">📖</a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" valign="top" width="14.28%"><a href="https://github.com/wongearl"><img src="https://avatars.githubusercontent.com/u/36498442?v=4?s=100" width="100px;" alt="wongearl"/><br /><sub><b>wongearl</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=wongearl" title="Code">💻</a></td>
|
||||
<td align="center" valign="top" width="14.28%"><a href="https://github.com/wenwenxiong"><img src="https://avatars.githubusercontent.com/u/10548812?v=4?s=100" width="100px;" alt="wenwenxiong"/><br /><sub><b>wenwenxiong</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=wenwenxiong" title="Code">💻</a></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
|
|
|||
|
|
@ -42,6 +42,12 @@ type Auth struct {
|
|||
// +optional
|
||||
PrivateKeyPath string `yaml:"privateKeyPath,omitempty" json:"privateKeyPath,omitempty"`
|
||||
|
||||
// Secret is the secret of the PrivateKey or Password for SSH authentication.It should in the same namespace as capkk.
|
||||
// When Password is empty, replace it with data.password.
|
||||
// When PrivateKey is empty, replace it with data.privateKey
|
||||
// +optional
|
||||
Secret string `yaml:"secret,omitempty" json:"secret,omitempty"`
|
||||
|
||||
// Timeout is the timeout for establish an SSH connection.
|
||||
// +optional
|
||||
Timeout *time.Duration `yaml:"timeout,omitempty" json:"timeout,omitempty"`
|
||||
|
|
|
|||
|
|
@ -195,7 +195,7 @@ func validateLoadBalancer(loadBalancer *KKLoadBalancerSpec) []*field.Error {
|
|||
func validateClusterNodes(nodes Nodes) []*field.Error {
|
||||
var errs field.ErrorList
|
||||
|
||||
if nodes.Auth.Password == "" && nodes.Auth.PrivateKey == "" && nodes.Auth.PrivateKeyPath == "" {
|
||||
if nodes.Auth.Password == "" && nodes.Auth.PrivateKey == "" && nodes.Auth.PrivateKeyPath == "" && nodes.Auth.Secret == "" {
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "nodes", "auth"), "password and privateKey can't both be empty"))
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -74,6 +74,7 @@ type HostCfg struct {
|
|||
type ControlPlaneEndpoint struct {
|
||||
InternalLoadbalancer string `yaml:"internalLoadbalancer" json:"internalLoadbalancer,omitempty"`
|
||||
Domain string `yaml:"domain" json:"domain,omitempty"`
|
||||
ExternalDNS *bool `yaml:"externalDNS" json:"externalDNS"`
|
||||
Address string `yaml:"address" json:"address,omitempty"`
|
||||
Port int `yaml:"port" json:"port,omitempty"`
|
||||
KubeVip KubeVip `yaml:"kubevip" json:"kubevip,omitempty"`
|
||||
|
|
@ -127,7 +128,10 @@ func (cfg *ClusterSpec) GenerateCertSANs() []string {
|
|||
extraCertSANs := make([]string, 0)
|
||||
|
||||
extraCertSANs = append(extraCertSANs, cfg.ControlPlaneEndpoint.Domain)
|
||||
extraCertSANs = append(extraCertSANs, cfg.ControlPlaneEndpoint.Address)
|
||||
|
||||
if cfg.ControlPlaneEndpoint.Address != "" {
|
||||
extraCertSANs = append(extraCertSANs, cfg.ControlPlaneEndpoint.Address)
|
||||
}
|
||||
|
||||
for _, host := range cfg.Hosts {
|
||||
extraCertSANs = append(extraCertSANs, host.Name)
|
||||
|
|
@ -293,3 +297,11 @@ func (c ControlPlaneEndpoint) IsInternalLBEnabled() bool {
|
|||
func (c ControlPlaneEndpoint) IsInternalLBEnabledVip() bool {
|
||||
return c.InternalLoadbalancer == Kubevip
|
||||
}
|
||||
|
||||
// EnableExternalDNS is used to determine whether to use external dns to resolve kube-apiserver domain.
|
||||
func (c *ControlPlaneEndpoint) EnableExternalDNS() bool {
|
||||
if c.ExternalDNS == nil {
|
||||
return false
|
||||
}
|
||||
return *c.ExternalDNS
|
||||
}
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ const (
|
|||
DefaultDNSDomain = "cluster.local"
|
||||
DefaultArch = "amd64"
|
||||
DefaultSSHTimeout = 30
|
||||
DefaultEtcdVersion = "v3.4.13"
|
||||
DefaultEtcdVersion = "v3.5.6"
|
||||
DefaultEtcdPort = "2379"
|
||||
DefaultDockerVersion = "20.10.8"
|
||||
DefaultContainerdVersion = "1.6.4"
|
||||
|
|
@ -94,7 +94,7 @@ const (
|
|||
DefaultOpenEBSBasePath = "/var/openebs/local"
|
||||
|
||||
Docker = "docker"
|
||||
Conatinerd = "containerd"
|
||||
Containerd = "containerd"
|
||||
Crio = "crio"
|
||||
Isula = "isula"
|
||||
|
||||
|
|
@ -184,16 +184,11 @@ func SetDefaultHostsCfg(cfg *ClusterSpec) []HostCfg {
|
|||
}
|
||||
|
||||
func SetDefaultLBCfg(cfg *ClusterSpec, masterGroup []*KubeHost) ControlPlaneEndpoint {
|
||||
//The detection is not an HA environment, and the address at LB does not need input
|
||||
if len(masterGroup) == 1 && cfg.ControlPlaneEndpoint.Address != "" {
|
||||
fmt.Println("When the environment is not HA, the LB address does not need to be entered, so delete the corresponding value.")
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
//Check whether LB should be configured
|
||||
if len(masterGroup) >= 3 && !cfg.ControlPlaneEndpoint.IsInternalLBEnabled() && cfg.ControlPlaneEndpoint.Address == "" {
|
||||
fmt.Println("When the environment has at least three masters, You must set the value of the LB address or enable the internal loadbalancer.")
|
||||
os.Exit(0)
|
||||
if len(masterGroup) >= 2 && !cfg.ControlPlaneEndpoint.IsInternalLBEnabled() && cfg.ControlPlaneEndpoint.Address == "" && !cfg.ControlPlaneEndpoint.EnableExternalDNS() {
|
||||
fmt.Println()
|
||||
fmt.Println("Warning: When there are at least two nodes in the control-plane, you should set the value of the LB address or enable the internal loadbalancer, or set 'controlPlaneEndpoint.externalDNS' to 'true' if the 'controlPlaneEndpoint.domain' can be resolved in your dns server.")
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
// Check whether LB address and the internal LB are both enabled
|
||||
|
|
@ -202,7 +197,7 @@ func SetDefaultLBCfg(cfg *ClusterSpec, masterGroup []*KubeHost) ControlPlaneEndp
|
|||
os.Exit(0)
|
||||
}
|
||||
|
||||
if cfg.ControlPlaneEndpoint.Address == "" || cfg.ControlPlaneEndpoint.Address == "127.0.0.1" {
|
||||
if (cfg.ControlPlaneEndpoint.Address == "" && !cfg.ControlPlaneEndpoint.EnableExternalDNS()) || cfg.ControlPlaneEndpoint.Address == "127.0.0.1" {
|
||||
cfg.ControlPlaneEndpoint.Address = masterGroup[0].InternalAddress
|
||||
}
|
||||
if cfg.ControlPlaneEndpoint.Domain == "" {
|
||||
|
|
@ -321,7 +316,7 @@ func SetDefaultClusterCfg(cfg *ClusterSpec) Kubernetes {
|
|||
cfg.Kubernetes.ContainerRuntimeEndpoint = ""
|
||||
case Crio:
|
||||
cfg.Kubernetes.ContainerRuntimeEndpoint = DefaultCrioEndpoint
|
||||
case Conatinerd:
|
||||
case Containerd:
|
||||
cfg.Kubernetes.ContainerRuntimeEndpoint = DefaultContainerdEndpoint
|
||||
case Isula:
|
||||
cfg.Kubernetes.ContainerRuntimeEndpoint = DefaultIsulaEndpoint
|
||||
|
|
|
|||
|
|
@ -46,6 +46,7 @@ type Kubernetes struct {
|
|||
FeatureGates map[string]bool `yaml:"featureGates" json:"featureGates,omitempty"`
|
||||
KubeletConfiguration runtime.RawExtension `yaml:"kubeletConfiguration" json:"kubeletConfiguration,omitempty"`
|
||||
KubeProxyConfiguration runtime.RawExtension `yaml:"kubeProxyConfiguration" json:"kubeProxyConfiguration,omitempty"`
|
||||
Audit Audit `yaml:"audit" json:"audit,omitempty"`
|
||||
}
|
||||
|
||||
// Kata contains the configuration for the kata in cluster
|
||||
|
|
@ -58,6 +59,11 @@ type NodeFeatureDiscovery struct {
|
|||
Enabled *bool `yaml:"enabled" json:"enabled,omitempty"`
|
||||
}
|
||||
|
||||
// Audit contains the configuration for the kube-apiserver audit in cluster
|
||||
type Audit struct {
|
||||
Enabled *bool `yaml:"enabled" json:"enabled,omitempty"`
|
||||
}
|
||||
|
||||
// EnableNodelocaldns is used to determine whether to deploy nodelocaldns.
|
||||
func (k *Kubernetes) EnableNodelocaldns() bool {
|
||||
if k.Nodelocaldns == nil {
|
||||
|
|
@ -82,9 +88,18 @@ func (k *Kubernetes) EnableNodeFeatureDiscovery() bool {
|
|||
return *k.NodeFeatureDiscovery.Enabled
|
||||
}
|
||||
|
||||
// EnableAutoRenewCerts is used to determine whether to enable AutoRenewCerts.
|
||||
func (k *Kubernetes) EnableAutoRenewCerts() bool {
|
||||
if k.AutoRenewCerts == nil {
|
||||
return false
|
||||
}
|
||||
return *k.AutoRenewCerts
|
||||
}
|
||||
|
||||
// EnableAudit is used to determine whether to enable kube-apiserver audit.
|
||||
func (k *Kubernetes) EnableAudit() bool {
|
||||
if k.Audit.Enabled == nil {
|
||||
return false
|
||||
}
|
||||
return *k.AutoRenewCerts
|
||||
}
|
||||
|
|
|
|||
|
|
@ -81,6 +81,10 @@ type DockerCompose struct {
|
|||
Version string `yaml:"version" json:"version"`
|
||||
}
|
||||
|
||||
type Calicoctl struct {
|
||||
Version string `yaml:"version" json:"version"`
|
||||
}
|
||||
|
||||
type Components struct {
|
||||
Helm Helm `yaml:"helm" json:"helm"`
|
||||
CNI CNI `yaml:"cni" json:"cni"`
|
||||
|
|
@ -90,6 +94,7 @@ type Components struct {
|
|||
DockerRegistry DockerRegistry `yaml:"docker-registry" json:"docker-registry"`
|
||||
Harbor Harbor `yaml:"harbor" json:"harbor"`
|
||||
DockerCompose DockerCompose `yaml:"docker-compose" json:"docker-compose"`
|
||||
Calicoctl Calicoctl `yaml:"calicoctl" json:"calicoctl"`
|
||||
}
|
||||
|
||||
type ManifestRegistry struct {
|
||||
|
|
|
|||
|
|
@ -27,9 +27,10 @@ type NetworkConfig struct {
|
|||
}
|
||||
|
||||
type CalicoCfg struct {
|
||||
IPIPMode string `yaml:"ipipMode" json:"ipipMode,omitempty"`
|
||||
VXLANMode string `yaml:"vxlanMode" json:"vxlanMode,omitempty"`
|
||||
VethMTU int `yaml:"vethMTU" json:"vethMTU,omitempty"`
|
||||
IPIPMode string `yaml:"ipipMode" json:"ipipMode,omitempty"`
|
||||
VXLANMode string `yaml:"vxlanMode" json:"vxlanMode,omitempty"`
|
||||
VethMTU int `yaml:"vethMTU" json:"vethMTU,omitempty"`
|
||||
Ipv4NatOutgoing *bool `yaml:"ipv4NatOutgoing" json:"ipv4NatOutgoing,omitempty"`
|
||||
}
|
||||
|
||||
type FlannelCfg struct {
|
||||
|
|
@ -133,3 +134,11 @@ func (n *NetworkConfig) EnableMultusCNI() bool {
|
|||
}
|
||||
return *n.MultusCNI.Enabled
|
||||
}
|
||||
|
||||
// EnableIPV4POOL_NAT_OUTGOING is used to determine whether to enable CALICO_IPV4POOL_NAT_OUTGOING.
|
||||
func (c *CalicoCfg) EnableIPV4POOL_NAT_OUTGOING() bool {
|
||||
if c.Ipv4NatOutgoing == nil {
|
||||
return true
|
||||
}
|
||||
return *c.Ipv4NatOutgoing
|
||||
}
|
||||
|
|
|
|||
|
|
@ -94,7 +94,7 @@ func (o *MigrateCriOptions) Validate() error {
|
|||
if o.Type == "" {
|
||||
return errors.New("cri Type can not be empty")
|
||||
}
|
||||
if o.Type != common.Docker && o.Type != common.Conatinerd {
|
||||
if o.Type != common.Docker && o.Type != common.Containerd {
|
||||
return errors.Errorf("cri Type is invalid: %s", o.Type)
|
||||
}
|
||||
if o.ClusterCfgFile == "" {
|
||||
|
|
|
|||
|
|
@ -99,7 +99,7 @@ func (o *CreateClusterOptions) Complete(cmd *cobra.Command, args []string) error
|
|||
|
||||
func (o *CreateClusterOptions) Validate(_ *cobra.Command, _ []string) error {
|
||||
switch o.ContainerManager {
|
||||
case common.Docker, common.Conatinerd, common.Crio, common.Isula:
|
||||
case common.Docker, common.Containerd, common.Crio, common.Isula:
|
||||
default:
|
||||
return fmt.Errorf("unsupport container runtime [%s]", o.ContainerManager)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ func NewCmdCreateImages() *cobra.Command {
|
|||
|
||||
func (o *CreateImagesOptions) Validate(_ *cobra.Command, _ []string) error {
|
||||
switch o.ContainerManager {
|
||||
case common.Docker, common.Conatinerd, common.Crio, common.Isula:
|
||||
case common.Docker, common.Containerd, common.Crio, common.Isula:
|
||||
default:
|
||||
return fmt.Errorf("unsupport container runtime [%s]", o.ContainerManager)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,7 +19,6 @@ package plugin
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
|
@ -79,7 +78,7 @@ func (o *PluginListOptions) Run() error {
|
|||
continue
|
||||
}
|
||||
|
||||
files, err := ioutil.ReadDir(dir)
|
||||
files, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
if _, ok := err.(*os.PathError); ok {
|
||||
fmt.Fprintf(o.ErrOut, "Unable to read directory %q from your PATH: %v. Skipping...\n", dir, err)
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ import (
|
|||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
|
@ -96,7 +95,9 @@ func CreateManifest(arg common.Argument, name string) error {
|
|||
case "ubuntu":
|
||||
id = "ubuntu"
|
||||
v := strings.Split(osImageArr[1], ".")
|
||||
version = fmt.Sprintf("%s.%s", v[0], v[1])
|
||||
if len(v) >= 2 {
|
||||
version = fmt.Sprintf("%s.%s", v[0], v[1])
|
||||
}
|
||||
case "centos":
|
||||
id = "centos"
|
||||
version = osImageArr[2]
|
||||
|
|
@ -164,6 +165,7 @@ func CreateManifest(arg common.Argument, name string) error {
|
|||
CNI: kubekeyv1alpha2.CNI{Version: kubekeyv1alpha2.DefaultCniVersion},
|
||||
ETCD: kubekeyv1alpha2.ETCD{Version: kubekeyv1alpha2.DefaultEtcdVersion},
|
||||
Crictl: kubekeyv1alpha2.Crictl{Version: kubekeyv1alpha2.DefaultCrictlVersion},
|
||||
Calicoctl: kubekeyv1alpha2.Calicoctl{Version: kubekeyv1alpha2.DefaultCalicoVersion},
|
||||
ContainerRuntimes: containerArr,
|
||||
},
|
||||
Images: imageArr,
|
||||
|
|
@ -171,7 +173,7 @@ func CreateManifest(arg common.Argument, name string) error {
|
|||
|
||||
manifestStr, err := templates.RenderManifest(options)
|
||||
|
||||
if err := ioutil.WriteFile(arg.FilePath, []byte(manifestStr), 0644); err != nil {
|
||||
if err := os.WriteFile(arg.FilePath, []byte(manifestStr), 0644); err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf("write file %s failed", arg.FilePath))
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -19,7 +19,6 @@ package artifact
|
|||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
|
|
@ -141,7 +140,7 @@ func (m *Md5Check) Execute(runtime connector.Runtime) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
oldMd5, err := ioutil.ReadFile(oldFile)
|
||||
oldMd5, err := os.ReadFile(oldFile)
|
||||
if err != nil {
|
||||
return errors.Wrapf(errors.WithStack(err), "read old md5 file %s failed", oldFile)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -37,8 +37,14 @@ func K3sFilesDownloadHTTP(kubeConf *common.KubeConf, path, version, arch string,
|
|||
kubecni := files.NewKubeBinary("kubecni", arch, kubekeyapiv1alpha2.DefaultCniVersion, path, kubeConf.Arg.DownloadCommand)
|
||||
helm := files.NewKubeBinary("helm", arch, kubekeyapiv1alpha2.DefaultHelmVersion, path, kubeConf.Arg.DownloadCommand)
|
||||
k3s := files.NewKubeBinary("k3s", arch, version, path, kubeConf.Arg.DownloadCommand)
|
||||
calicoctl := files.NewKubeBinary("calicoctl", arch, kubekeyapiv1alpha2.DefaultCalicoVersion, path, kubeConf.Arg.DownloadCommand)
|
||||
|
||||
binaries := []*files.KubeBinary{k3s, helm, kubecni, etcd}
|
||||
|
||||
if kubeConf.Cluster.Network.Plugin == "calico" {
|
||||
binaries = append(binaries, calicoctl)
|
||||
}
|
||||
|
||||
binariesMap := make(map[string]*files.KubeBinary)
|
||||
for _, binary := range binaries {
|
||||
if err := binary.CreateBaseDir(); err != nil {
|
||||
|
|
|
|||
|
|
@ -43,15 +43,20 @@ func K8sFilesDownloadHTTP(kubeConf *common.KubeConf, path, version, arch string,
|
|||
crictl := files.NewKubeBinary("crictl", arch, kubekeyapiv1alpha2.DefaultCrictlVersion, path, kubeConf.Arg.DownloadCommand)
|
||||
containerd := files.NewKubeBinary("containerd", arch, kubekeyapiv1alpha2.DefaultContainerdVersion, path, kubeConf.Arg.DownloadCommand)
|
||||
runc := files.NewKubeBinary("runc", arch, kubekeyapiv1alpha2.DefaultRuncVersion, path, kubeConf.Arg.DownloadCommand)
|
||||
calicoctl := files.NewKubeBinary("calicoctl", arch, kubekeyapiv1alpha2.DefaultCalicoVersion, path, kubeConf.Arg.DownloadCommand)
|
||||
|
||||
binaries := []*files.KubeBinary{kubeadm, kubelet, kubectl, helm, kubecni, crictl, etcd}
|
||||
|
||||
if kubeConf.Cluster.Kubernetes.ContainerManager == kubekeyapiv1alpha2.Docker {
|
||||
binaries = append(binaries, docker)
|
||||
} else if kubeConf.Cluster.Kubernetes.ContainerManager == kubekeyapiv1alpha2.Conatinerd {
|
||||
} else if kubeConf.Cluster.Kubernetes.ContainerManager == kubekeyapiv1alpha2.Containerd {
|
||||
binaries = append(binaries, containerd, runc)
|
||||
}
|
||||
|
||||
if kubeConf.Cluster.Network.Plugin == "calico" {
|
||||
binaries = append(binaries, calicoctl)
|
||||
}
|
||||
|
||||
binariesMap := make(map[string]*files.KubeBinary)
|
||||
for _, binary := range binaries {
|
||||
if err := binary.CreateBaseDir(); err != nil {
|
||||
|
|
@ -103,7 +108,8 @@ func KubernetesArtifactBinariesDownload(manifest *common.ArtifactManifest, path,
|
|||
kubecni := files.NewKubeBinary("kubecni", arch, m.Components.CNI.Version, path, manifest.Arg.DownloadCommand)
|
||||
helm := files.NewKubeBinary("helm", arch, m.Components.Helm.Version, path, manifest.Arg.DownloadCommand)
|
||||
crictl := files.NewKubeBinary("crictl", arch, m.Components.Crictl.Version, path, manifest.Arg.DownloadCommand)
|
||||
binaries := []*files.KubeBinary{kubeadm, kubelet, kubectl, helm, kubecni, etcd}
|
||||
calicoctl := files.NewKubeBinary("calicoctl", arch, m.Components.Calicoctl.Version, path, manifest.Arg.DownloadCommand)
|
||||
binaries := []*files.KubeBinary{kubeadm, kubelet, kubectl, helm, kubecni, etcd, calicoctl}
|
||||
|
||||
containerManagerArr := make([]*files.KubeBinary, 0, 0)
|
||||
containerManagerVersion := make(map[string]struct{})
|
||||
|
|
@ -156,7 +162,7 @@ func CriDownloadHTTP(kubeConf *common.KubeConf, path, arch string, pipelineCache
|
|||
case common.Docker:
|
||||
docker := files.NewKubeBinary("docker", arch, kubekeyapiv1alpha2.DefaultDockerVersion, path, kubeConf.Arg.DownloadCommand)
|
||||
binaries = append(binaries, docker)
|
||||
case common.Conatinerd:
|
||||
case common.Containerd:
|
||||
containerd := files.NewKubeBinary("containerd", arch, kubekeyapiv1alpha2.DefaultContainerdVersion, path, kubeConf.Arg.DownloadCommand)
|
||||
runc := files.NewKubeBinary("runc", arch, kubekeyapiv1alpha2.DefaultRuncVersion, path, kubeConf.Arg.DownloadCommand)
|
||||
crictl := files.NewKubeBinary("crictl", arch, kubekeyapiv1alpha2.DefaultCrictlVersion, path, kubeConf.Arg.DownloadCommand)
|
||||
|
|
|
|||
|
|
@ -171,7 +171,7 @@ func (i *CriBinariesModule) Init() {
|
|||
switch i.KubeConf.Arg.Type {
|
||||
case common.Docker:
|
||||
i.Tasks = CriBinaries(i)
|
||||
case common.Conatinerd:
|
||||
case common.Containerd:
|
||||
i.Tasks = CriBinaries(i)
|
||||
default:
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,7 +18,6 @@ package customscripts
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
|
@ -79,7 +78,7 @@ func (t *CustomScriptTask) Execute(runtime connector.Runtime) error {
|
|||
// wrap use bash file if shell has many lines.
|
||||
RunBash := t.script.Bash
|
||||
if strings.Index(RunBash, "\n") > 0 {
|
||||
tmpFile, err := ioutil.TempFile(os.TempDir(), t.taskDir)
|
||||
tmpFile, err := os.CreateTemp(os.TempDir(), t.taskDir)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "create tmp Bash: %s/%s in local node, err:%s", os.TempDir(), t.taskDir, err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -103,6 +103,15 @@ type ClearNodeOSModule struct {
|
|||
func (c *ClearNodeOSModule) Init() {
|
||||
c.Name = "ClearNodeOSModule"
|
||||
|
||||
stopKubelet := &task.RemoteTask{
|
||||
Name: "StopKubelet",
|
||||
Desc: "Stop Kubelet",
|
||||
Hosts: c.Runtime.GetHostsByRole(common.Worker),
|
||||
Prepare: new(DeleteNode),
|
||||
Action: new(StopKubelet),
|
||||
Parallel: true,
|
||||
}
|
||||
|
||||
resetNetworkConfig := &task.RemoteTask{
|
||||
Name: "ResetNetworkConfig",
|
||||
Desc: "Reset os network config",
|
||||
|
|
@ -131,6 +140,7 @@ func (c *ClearNodeOSModule) Init() {
|
|||
}
|
||||
|
||||
c.Tasks = []task.Interface{
|
||||
stopKubelet,
|
||||
resetNetworkConfig,
|
||||
removeFiles,
|
||||
daemonReload,
|
||||
|
|
|
|||
|
|
@ -197,6 +197,15 @@ func (r *ResetNetworkConfig) Execute(runtime connector.Runtime) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
type StopKubelet struct {
|
||||
common.KubeAction
|
||||
}
|
||||
|
||||
func (s *StopKubelet) Execute(runtime connector.Runtime) error {
|
||||
_, _ = runtime.GetRunner().SudoCmd("systemctl disable kubelet && systemctl stop kubelet && exit 0", false)
|
||||
return nil
|
||||
}
|
||||
|
||||
type UninstallETCD struct {
|
||||
common.KubeAction
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ package templates
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kubesphere/kubekey/v3/cmd/kk/pkg/bootstrap/registry"
|
||||
"text/template"
|
||||
|
||||
"github.com/lithammer/dedent"
|
||||
|
|
@ -63,41 +64,41 @@ echo 'net.bridge.bridge-nf-call-arptables = 1' >> /etc/sysctl.conf
|
|||
echo 'net.bridge.bridge-nf-call-ip6tables = 1' >> /etc/sysctl.conf
|
||||
echo 'net.bridge.bridge-nf-call-iptables = 1' >> /etc/sysctl.conf
|
||||
echo 'net.ipv4.ip_local_reserved_ports = 30000-32767' >> /etc/sysctl.conf
|
||||
echo 'vm.max_map_count = 262144' >> /etc/sysctl.conf
|
||||
echo 'vm.swappiness = 0' >> /etc/sysctl.conf
|
||||
echo 'fs.inotify.max_user_instances = 524288' >> /etc/sysctl.conf
|
||||
echo 'kernel.pid_max = 65535' >> /etc/sysctl.conf
|
||||
echo 'net.ipv4.tcp_tw_recycle = 0' >> /etc/sysctl.conf
|
||||
echo 'net.ipv4.tcp_tw_reuse = 0' >> /etc/sysctl.conf
|
||||
echo 'net.ipv4.conf.all.rp_filter = 1' >> /etc/sysctl.conf
|
||||
echo 'net.ipv4.conf.default.rp_filter = 1' >> /etc/sysctl.conf
|
||||
echo 'vm.overcommit_memory = 1' >> /etc/sysctl.conf
|
||||
echo 'fs.inotify.max_user_watches = 524288' >> /etc/sysctl.conf
|
||||
echo 'fs.pipe-max-size = 4194304' >> /etc/sysctl.conf
|
||||
echo 'net.core.netdev_max_backlog = 65535' >> /etc/sysctl.conf
|
||||
echo 'net.core.rmem_max = 33554432' >> /etc/sysctl.conf
|
||||
echo 'net.core.wmem_max = 33554432' >> /etc/sysctl.conf
|
||||
echo 'net.core.somaxconn = 32768' >> /etc/sysctl.conf
|
||||
echo 'net.ipv4.tcp_max_syn_backlog = 1048576' >> /etc/sysctl.conf
|
||||
echo 'net.ipv4.neigh.default.gc_thresh1 = 512' >> /etc/sysctl.conf
|
||||
echo 'net.ipv4.neigh.default.gc_thresh2 = 2048' >> /etc/sysctl.conf
|
||||
echo 'net.ipv4.neigh.default.gc_thresh3 = 4096' >> /etc/sysctl.conf
|
||||
echo 'net.core.somaxconn = 32768' >> /etc/sysctl.conf
|
||||
echo 'net.ipv4.conf.eth0.arp_accept = 1' >> /etc/sysctl.conf
|
||||
echo 'fs.aio-max-nr = 262144' >> /etc/sysctl.conf
|
||||
echo 'net.ipv4.tcp_retries2 = 15' >> /etc/sysctl.conf
|
||||
echo 'net.ipv4.tcp_max_tw_buckets = 1048576' >> /etc/sysctl.conf
|
||||
echo 'net.ipv4.tcp_max_orphans = 65535' >> /etc/sysctl.conf
|
||||
echo 'net.ipv4.udp_rmem_min = 131072' >> /etc/sysctl.conf
|
||||
echo 'net.ipv4.udp_wmem_min = 131072' >> /etc/sysctl.conf
|
||||
echo 'net.ipv4.conf.all.rp_filter = 1' >> /etc/sysctl.conf
|
||||
echo 'net.ipv4.conf.default.rp_filter = 1' >> /etc/sysctl.conf
|
||||
echo 'net.ipv4.conf.all.arp_accept = 1' >> /etc/sysctl.conf
|
||||
echo 'net.ipv4.conf.default.arp_accept = 1' >> /etc/sysctl.conf
|
||||
echo 'net.ipv4.conf.all.arp_ignore = 1' >> /etc/sysctl.conf
|
||||
echo 'net.ipv4.conf.default.arp_ignore = 1' >> /etc/sysctl.conf
|
||||
echo 'vm.max_map_count = 262144' >> /etc/sysctl.conf
|
||||
echo 'vm.swappiness = 0' >> /etc/sysctl.conf
|
||||
echo 'vm.overcommit_memory = 1' >> /etc/sysctl.conf
|
||||
echo 'fs.inotify.max_user_instances = 524288' >> /etc/sysctl.conf
|
||||
echo 'fs.inotify.max_user_watches = 10240001' >> /etc/sysctl.conf
|
||||
echo 'fs.pipe-max-size = 4194304' >> /etc/sysctl.conf
|
||||
echo 'fs.aio-max-nr = 262144' >> /etc/sysctl.conf
|
||||
echo 'kernel.pid_max = 65535' >> /etc/sysctl.conf
|
||||
echo 'kernel.watchdog_thresh = 5' >> /etc/sysctl.conf
|
||||
echo 'kernel.hung_task_timeout_secs = 5' >> /etc/sysctl.conf
|
||||
|
||||
#See https://help.aliyun.com/document_detail/118806.html#uicontrol-e50-ddj-w0y
|
||||
sed -r -i "s@#{0,}?net.ipv4.tcp_tw_recycle ?= ?(0|1|2)@net.ipv4.tcp_tw_recycle = 0@g" /etc/sysctl.conf
|
||||
sed -r -i "s@#{0,}?net.ipv4.tcp_tw_reuse ?= ?(0|1)@net.ipv4.tcp_tw_reuse = 0@g" /etc/sysctl.conf
|
||||
sed -r -i "s@#{0,}?net.ipv4.conf.all.rp_filter ?= ?(0|1|2)@net.ipv4.conf.all.rp_filter = 1@g" /etc/sysctl.conf
|
||||
sed -r -i "s@#{0,}?net.ipv4.conf.default.rp_filter ?= ?(0|1|2)@net.ipv4.conf.default.rp_filter = 1@g" /etc/sysctl.conf
|
||||
|
||||
sed -r -i "s@#{0,}?net.ipv4.ip_forward ?= ?(0|1)@net.ipv4.ip_forward = 1@g" /etc/sysctl.conf
|
||||
sed -r -i "s@#{0,}?net.bridge.bridge-nf-call-arptables ?= ?(0|1)@net.bridge.bridge-nf-call-arptables = 1@g" /etc/sysctl.conf
|
||||
sed -r -i "s@#{0,}?net.bridge.bridge-nf-call-ip6tables ?= ?(0|1)@net.bridge.bridge-nf-call-ip6tables = 1@g" /etc/sysctl.conf
|
||||
|
|
@ -127,6 +128,12 @@ sed -r -i "s@#{0,}?net.ipv4.udp_rmem_min ?= ?([0-9]{1,})@net.ipv4.udp_rmem_min
|
|||
sed -r -i "s@#{0,}?net.ipv4.udp_wmem_min ?= ?([0-9]{1,})@net.ipv4.udp_wmem_min = 131072@g" /etc/sysctl.conf
|
||||
sed -r -i "s@#{0,}?net.ipv4.conf.all.arp_ignore ?= ??(0|1|2)@net.ipv4.conf.all.arp_ignore = 1@g" /etc/sysctl.conf
|
||||
sed -r -i "s@#{0,}?net.ipv4.conf.default.arp_ignore ?= ??(0|1|2)@net.ipv4.conf.default.arp_ignore = 1@g" /etc/sysctl.conf
|
||||
sed -r -i "s@#{0,}?kernel.watchdog_thresh ?= ?([0-9]{1,})@kernel.watchdog_thresh = 5@g" /etc/sysctl.conf
|
||||
sed -r -i "s@#{0,}?kernel.hung_task_timeout_secs ?= ?([0-9]{1,})@kernel.hung_task_timeout_secs = 5@g" /etc/sysctl.conf
|
||||
|
||||
tmpfile="$$.tmp"
|
||||
awk ' !x[$0]++{print > "'$tmpfile'"}' /etc/sysctl.conf
|
||||
mv $tmpfile /etc/sysctl.conf
|
||||
|
||||
# ulimit
|
||||
echo "* soft nofile 1048576" >> /etc/security/limits.conf
|
||||
|
|
@ -137,21 +144,15 @@ echo "* soft memlock unlimited" >> /etc/security/limits.conf
|
|||
echo "* hard memlock unlimited" >> /etc/security/limits.conf
|
||||
|
||||
sed -r -i "s@#{0,}?\* soft nofile ?([0-9]{1,})@\* soft nofile 1048576@g" /etc/security/limits.conf
|
||||
sed -r -i "s@#{0,}?\* hard nofile ?([0-9]{1,})@\* soft nofile 1048576@g" /etc/security/limits.conf
|
||||
sed -r -i "s@#{0,}?\* soft nproc ?([0-9]{1,})@\* soft nofile 65536@g" /etc/security/limits.conf
|
||||
sed -r -i "s@#{0,}?\* hard nproc ?([0-9]{1,})@\* soft nofile 65536@g" /etc/security/limits.conf
|
||||
sed -r -i "s@#{0,}?\* hard nofile ?([0-9]{1,})@\* hard nofile 1048576@g" /etc/security/limits.conf
|
||||
sed -r -i "s@#{0,}?\* soft nproc ?([0-9]{1,})@\* soft nproc 65536@g" /etc/security/limits.conf
|
||||
sed -r -i "s@#{0,}?\* hard nproc ?([0-9]{1,})@\* hard nproc 65536@g" /etc/security/limits.conf
|
||||
sed -r -i "s@#{0,}?\* soft memlock ?([0-9]{1,}([TGKM]B){0,1}|unlimited)@\* soft memlock unlimited@g" /etc/security/limits.conf
|
||||
sed -r -i "s@#{0,}?\* hard memlock ?([0-9]{1,}([TGKM]B){0,1}|unlimited)@\* hard memlock unlimited@g" /etc/security/limits.conf
|
||||
|
||||
# kernel
|
||||
echo never > /sys/kernel/mm/transparent_hugepage/enabled
|
||||
echo never > /sys/kernel/mm/transparent_hugepage/defrag
|
||||
echo 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' >> /etc/rc.local
|
||||
echo 'echo never > /sys/kernel/mm/transparent_hugepage/defrag' >> /etc/rc.local
|
||||
|
||||
tmpfile="$$.tmp"
|
||||
awk ' !x[$0]++{print > "'$tmpfile'"}' /etc/sysctl.conf
|
||||
mv $tmpfile /etc/sysctl.conf
|
||||
awk ' !x[$0]++{print > "'$tmpfile'"}' /etc/security/limits.conf
|
||||
mv $tmpfile /etc/security/limits.conf
|
||||
|
||||
systemctl stop firewalld 1>/dev/null 2>/dev/null
|
||||
systemctl disable firewalld 1>/dev/null 2>/dev/null
|
||||
|
|
@ -203,6 +204,7 @@ cat >>/etc/hosts<<EOF
|
|||
# kubekey hosts END
|
||||
EOF
|
||||
|
||||
sync
|
||||
echo 3 > /proc/sys/vm/drop_caches
|
||||
|
||||
# Make sure the iptables utility doesn't use the nftables backend.
|
||||
|
|
@ -211,9 +213,6 @@ update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy >/dev/null 2>&1 |
|
|||
update-alternatives --set arptables /usr/sbin/arptables-legacy >/dev/null 2>&1 || true
|
||||
update-alternatives --set ebtables /usr/sbin/ebtables-legacy >/dev/null 2>&1 || true
|
||||
|
||||
ulimit -u 65535
|
||||
ulimit -n 65535
|
||||
|
||||
`)))
|
||||
|
||||
func GenerateHosts(runtime connector.ModuleRuntime, kubeConf *common.KubeConf) []string {
|
||||
|
|
@ -222,8 +221,6 @@ func GenerateHosts(runtime connector.ModuleRuntime, kubeConf *common.KubeConf) [
|
|||
|
||||
if kubeConf.Cluster.ControlPlaneEndpoint.Address != "" {
|
||||
lbHost = fmt.Sprintf("%s %s", kubeConf.Cluster.ControlPlaneEndpoint.Address, kubeConf.Cluster.ControlPlaneEndpoint.Domain)
|
||||
} else {
|
||||
lbHost = fmt.Sprintf("%s %s", runtime.GetHostsByRole(common.Master)[0].GetInternalAddress(), kubeConf.Cluster.ControlPlaneEndpoint.Domain)
|
||||
}
|
||||
|
||||
for _, host := range runtime.GetAllHosts() {
|
||||
|
|
@ -237,7 +234,12 @@ func GenerateHosts(runtime connector.ModuleRuntime, kubeConf *common.KubeConf) [
|
|||
}
|
||||
|
||||
if len(runtime.GetHostsByRole(common.Registry)) > 0 {
|
||||
hostsList = append(hostsList, fmt.Sprintf("%s %s", runtime.GetHostsByRole(common.Registry)[0].GetInternalAddress(), kubeConf.Cluster.Registry.PrivateRegistry))
|
||||
if kubeConf.Cluster.Registry.PrivateRegistry != "" {
|
||||
hostsList = append(hostsList, fmt.Sprintf("%s %s", runtime.GetHostsByRole(common.Registry)[0].GetInternalAddress(), kubeConf.Cluster.Registry.PrivateRegistry))
|
||||
} else {
|
||||
hostsList = append(hostsList, fmt.Sprintf("%s %s", runtime.GetHostsByRole(common.Registry)[0].GetInternalAddress(), registry.RegistryCertificateBaseName))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
hostsList = append(hostsList, lbHost)
|
||||
|
|
|
|||
|
|
@ -17,13 +17,13 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"os"
|
||||
)
|
||||
|
||||
func NewClient(kubeConfig string) (*kubernetes.Clientset, error) {
|
||||
data, err := ioutil.ReadFile(kubeConfig)
|
||||
data, err := os.ReadFile(kubeConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ package common
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
|
@ -54,7 +54,7 @@ func NewArtifactRuntime(arg ArtifactArgument) (*ArtifactRuntime, error) {
|
|||
return nil, errors.Wrap(err, "Failed to look up current directory")
|
||||
}
|
||||
|
||||
fileByte, err := ioutil.ReadFile(fp)
|
||||
fileByte, err := os.ReadFile(fp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Failed to read file %s", fp)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ const (
|
|||
|
||||
Docker = "docker"
|
||||
Crictl = "crictl"
|
||||
Conatinerd = "containerd"
|
||||
Containerd = "containerd"
|
||||
Crio = "crio"
|
||||
Isula = "isula"
|
||||
Runc = "runc"
|
||||
|
|
|
|||
|
|
@ -128,3 +128,11 @@ func (e *EnableKubeProxy) PreCheck(_ connector.Runtime) (bool, error) {
|
|||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
type EnableAudit struct {
|
||||
KubePrepare
|
||||
}
|
||||
|
||||
func (e *EnableAudit) PreCheck(_ connector.Runtime) (bool, error) {
|
||||
return e.KubeConf.Cluster.Kubernetes.EnableAudit(), nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -113,6 +113,7 @@ func (d *DefaultLoader) Load() (*kubekeyapiv1alpha2.Cluster, error) {
|
|||
Worker: {hostname},
|
||||
Registry: {hostname},
|
||||
}
|
||||
allInOne.Spec.ControlPlaneEndpoint.Address = "127.0.0.1"
|
||||
if ver := normalizedBuildVersion(d.KubernetesVersion); ver != "" {
|
||||
s := strings.Split(ver, "-")
|
||||
if len(s) > 1 {
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ func GenerateKubeKeyConfig(arg common.Argument, name string) error {
|
|||
|
||||
if k8sVersion, err := versionutil.ParseGeneric(opt.KubeVersion); err == nil {
|
||||
if k8sVersion.AtLeast(versionutil.MustParseSemantic("v1.24.0")) {
|
||||
opt.ContainerManager = common.Conatinerd
|
||||
opt.ContainerManager = common.Containerd
|
||||
} else {
|
||||
opt.ContainerManager = common.Docker
|
||||
}
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ func (s *SyncContainerd) Execute(runtime connector.Runtime) error {
|
|||
}
|
||||
binariesMap := binariesMapObj.(map[string]*files.KubeBinary)
|
||||
|
||||
containerd, ok := binariesMap[common.Conatinerd]
|
||||
containerd, ok := binariesMap[common.Containerd]
|
||||
if !ok {
|
||||
return errors.New("get KubeBinary key containerd by pipeline cache failed")
|
||||
}
|
||||
|
|
@ -226,7 +226,7 @@ func (i *RestartCri) Execute(runtime connector.Runtime) error {
|
|||
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("systemctl daemon-reload && systemctl restart docker "), true); err != nil {
|
||||
return errors.Wrap(err, "restart docker")
|
||||
}
|
||||
case common.Conatinerd:
|
||||
case common.Containerd:
|
||||
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("systemctl daemon-reload && systemctl restart containerd"), true); err != nil {
|
||||
return errors.Wrap(err, "restart containerd")
|
||||
}
|
||||
|
|
@ -249,7 +249,7 @@ func (i *EditKubeletCri) Execute(runtime connector.Runtime) error {
|
|||
true); err != nil {
|
||||
return errors.Wrap(err, "Change KubeletTo Containerd failed")
|
||||
}
|
||||
case common.Conatinerd:
|
||||
case common.Containerd:
|
||||
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf(
|
||||
"sed -i 's#--network-plugin=cni --pod#--network-plugin=cni --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --pod#' /var/lib/kubelet/kubeadm-flags.env"),
|
||||
true); err != nil {
|
||||
|
|
@ -333,7 +333,7 @@ func MigrateSelfNodeCriTasks(runtime connector.Runtime, kubeAction common.KubeAc
|
|||
Parallel: false,
|
||||
}
|
||||
tasks = append(tasks, CordonNode, DrainNode, Uninstall)
|
||||
case common.Conatinerd:
|
||||
case common.Containerd:
|
||||
Uninstall := &task.RemoteTask{
|
||||
Name: "UninstallContainerd",
|
||||
Desc: "Uninstall containerd",
|
||||
|
|
@ -418,7 +418,7 @@ func MigrateSelfNodeCriTasks(runtime connector.Runtime, kubeAction common.KubeAc
|
|||
tasks = append(tasks, syncBinaries, generateDockerService, generateDockerConfig, enableDocker, dockerLoginRegistry,
|
||||
RestartCri, EditKubeletCri, RestartKubeletNode, UnCordonNode)
|
||||
}
|
||||
if kubeAction.KubeConf.Arg.Type == common.Conatinerd {
|
||||
if kubeAction.KubeConf.Arg.Type == common.Containerd {
|
||||
syncContainerd := &task.RemoteTask{
|
||||
Name: "SyncContainerd",
|
||||
Desc: "Sync containerd binaries",
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ func (i *InstallContainerModule) Init() {
|
|||
switch i.KubeConf.Cluster.Kubernetes.ContainerManager {
|
||||
case common.Docker:
|
||||
i.Tasks = InstallDocker(i)
|
||||
case common.Conatinerd:
|
||||
case common.Containerd:
|
||||
i.Tasks = InstallContainerd(i)
|
||||
case common.Crio:
|
||||
// TODO: Add the steps of cri-o's installation.
|
||||
|
|
@ -263,7 +263,7 @@ func (i *UninstallContainerModule) Init() {
|
|||
switch i.KubeConf.Cluster.Kubernetes.ContainerManager {
|
||||
case common.Docker:
|
||||
i.Tasks = UninstallDocker(i)
|
||||
case common.Conatinerd:
|
||||
case common.Containerd:
|
||||
i.Tasks = UninstallContainerd(i)
|
||||
case common.Crio:
|
||||
// TODO: Add the steps of cri-o's installation.
|
||||
|
|
|
|||
|
|
@ -22,7 +22,6 @@ import (
|
|||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"path"
|
||||
|
|
@ -187,7 +186,7 @@ func validateOptions(cfg Cfg) (Cfg, error) {
|
|||
}
|
||||
|
||||
if len(cfg.PrivateKey) == 0 && len(cfg.KeyFile) > 0 {
|
||||
content, err := ioutil.ReadFile(cfg.KeyFile)
|
||||
content, err := os.ReadFile(cfg.KeyFile)
|
||||
if err != nil {
|
||||
return cfg, errors.Wrapf(err, "Failed to read keyfile %q", cfg.KeyFile)
|
||||
}
|
||||
|
|
@ -462,7 +461,7 @@ func (c *connection) Scp(src, dst string, host Host) error {
|
|||
}
|
||||
|
||||
func (c *connection) copyDirToRemote(src, dst string, scrErr *scpErr, host Host) {
|
||||
localFiles, err := ioutil.ReadDir(src)
|
||||
localFiles, err := os.ReadDir(src)
|
||||
if err != nil {
|
||||
logger.Log.Errorf("read local path dir %s failed %v", src, err)
|
||||
scrErr.err = err
|
||||
|
|
|
|||
|
|
@ -23,7 +23,6 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
|
@ -131,7 +130,7 @@ func WriteFile(fileName string, content []byte) error {
|
|||
}
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(fileName, content, common.FileMode0644); err != nil {
|
||||
if err := os.WriteFile(fileName, content, common.FileMode0644); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -19,7 +19,6 @@ package etcd
|
|||
import (
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
|
@ -245,12 +244,12 @@ func (f *FetchCertsForExternalEtcd) Execute(runtime connector.Runtime) error {
|
|||
dstCert := fmt.Sprintf("%s/%s", pkiPath, dstCertFileName)
|
||||
dstCertsFiles = append(dstCertsFiles, dstCertFileName)
|
||||
|
||||
data, err := ioutil.ReadFile(certPath)
|
||||
data, err := os.ReadFile(certPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to copy certificate content")
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(dstCert, data, 0600); err != nil {
|
||||
if err := os.WriteFile(dstCert, data, 0600); err != nil {
|
||||
return errors.Wrap(err, "failed to copy certificate content")
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ import (
|
|||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
|
|
@ -51,6 +50,7 @@ const (
|
|||
compose = "compose"
|
||||
containerd = "containerd"
|
||||
runc = "runc"
|
||||
calicoctl = "calicoctl"
|
||||
)
|
||||
|
||||
// KubeBinary Type field const
|
||||
|
|
@ -209,6 +209,13 @@ func NewKubeBinary(name, arch, version, prePath string, getCmd func(path, url st
|
|||
if component.Zone == "cn" {
|
||||
component.Url = fmt.Sprintf("https://kubernetes-release.pek3b.qingstor.com/opencontainers/runc/releases/download/%s/runc.%s", version, arch)
|
||||
}
|
||||
case calicoctl:
|
||||
component.Type = CNI
|
||||
component.FileName = calicoctl
|
||||
component.Url = fmt.Sprintf("https://github.com/projectcalico/calico/releases/download/%s/calicoctl-linux-%s", version, arch)
|
||||
if component.Zone == "cn" {
|
||||
component.Url = fmt.Sprintf("https://kubernetes-release.pek3b.qingstor.com/projectcalico/calico/releases/download/%s/calicoctl-linux-%s", version, arch)
|
||||
}
|
||||
default:
|
||||
logger.Log.Fatalf("unsupported kube binaries %s", name)
|
||||
}
|
||||
|
|
@ -313,7 +320,7 @@ func sha256sum(path string) (string, error) {
|
|||
}
|
||||
defer file.Close()
|
||||
|
||||
data, err := ioutil.ReadAll(file)
|
||||
data, err := io.ReadAll(file)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ package images
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
|
@ -179,7 +179,7 @@ func (s *SaveImages) Execute(runtime connector.Runtime) error {
|
|||
// Ex:
|
||||
// oci:./kubekey/artifact/images:kubesphere:kube-apiserver:v1.21.5-amd64
|
||||
// oci:./kubekey/artifact/images:kubesphere:kube-apiserver:v1.21.5-arm-v7
|
||||
destName := fmt.Sprintf("oci:%s:%s:%s-%s%s", dirName, imageFullName[1], imageFullName[2], arch, variant)
|
||||
destName := fmt.Sprintf("oci:%s:%s:%s-%s%s", dirName, imageFullName[1], suffixImageName(imageFullName[2:]), arch, variant)
|
||||
logger.Log.Infof("Source: %s", srcName)
|
||||
logger.Log.Infof("Destination: %s", destName)
|
||||
|
||||
|
|
@ -227,7 +227,7 @@ func (c *CopyImagesToRegistry) Execute(runtime connector.Runtime) error {
|
|||
imagesPath = filepath.Join(runtime.GetWorkDir(), "images")
|
||||
}
|
||||
|
||||
indexFile, err := ioutil.ReadFile(filepath.Join(imagesPath, "index.json"))
|
||||
indexFile, err := os.ReadFile(filepath.Join(imagesPath, "index.json"))
|
||||
if err != nil {
|
||||
return errors.Errorf("read index.json failed: %s", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -173,11 +173,18 @@ func NewManifestSpec(image string, entries []manifesttypes.ManifestEntry) manife
|
|||
|
||||
func validateImageName(imageFullName string) error {
|
||||
image := strings.Split(imageFullName, "/")
|
||||
if len(image) != 3 {
|
||||
return errors.Errorf("image %s is invalid, only the format \"registry/namespace/name:tag\" is supported", imageFullName)
|
||||
if len(image) < 3 {
|
||||
return errors.Errorf("image %s is invalid, image PATH need contain at least two slash-separated", imageFullName)
|
||||
}
|
||||
if len(strings.Split(image[2], ":")) != 2 {
|
||||
return errors.Errorf("image %s is invalid, only the format \"registry/namespace/name:tag\" is supported", imageFullName)
|
||||
if len(strings.Split(image[len(image)-1], ":")) != 2 {
|
||||
return errors.Errorf(`image %s is invalid, image PATH need contain ":"`, imageFullName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func suffixImageName(imageFullName []string) string {
|
||||
if len(imageFullName) >= 2 {
|
||||
return strings.Join(imageFullName, "/")
|
||||
}
|
||||
return imageFullName[0]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ package k3s
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
|
@ -117,7 +117,7 @@ func (k *K3sStatus) LoadKubeConfig(runtime connector.Runtime, kubeConf *common.K
|
|||
newServer := fmt.Sprintf("server: https://%s:%d", kubeConf.Cluster.ControlPlaneEndpoint.Address, kubeConf.Cluster.ControlPlaneEndpoint.Port)
|
||||
newKubeConfigStr := strings.Replace(k.KubeConfig, oldServer, newServer, -1)
|
||||
|
||||
if err := ioutil.WriteFile(kubeConfigPath, []byte(newKubeConfigStr), 0644); err != nil {
|
||||
if err := os.WriteFile(kubeConfigPath, []byte(newKubeConfigStr), 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -97,19 +97,22 @@ func (s *SyncKubeBinary) Execute(runtime connector.Runtime) error {
|
|||
}
|
||||
binariesMap := binariesMapObj.(map[string]*files.KubeBinary)
|
||||
|
||||
if err := SyncKubeBinaries(runtime, binariesMap); err != nil {
|
||||
if err := SyncKubeBinaries(s, runtime, binariesMap); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SyncKubeBinaries is used to sync kubernetes' binaries to each node.
|
||||
func SyncKubeBinaries(runtime connector.Runtime, binariesMap map[string]*files.KubeBinary) error {
|
||||
func SyncKubeBinaries(s *SyncKubeBinary, runtime connector.Runtime, binariesMap map[string]*files.KubeBinary) error {
|
||||
if err := utils.ResetTmpDir(runtime); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
binaryList := []string{"k3s", "helm", "kubecni"}
|
||||
if s.KubeConf.Cluster.Network.Plugin == "calico" {
|
||||
binaryList = append(binaryList, "calicoctl")
|
||||
}
|
||||
for _, name := range binaryList {
|
||||
binary, ok := binariesMap[name]
|
||||
if !ok {
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ package k8e
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
|
@ -117,7 +117,7 @@ func (k *K8eStatus) LoadKubeConfig(runtime connector.Runtime, kubeConf *common.K
|
|||
newServer := fmt.Sprintf("server: https://%s:%d", kubeConf.Cluster.ControlPlaneEndpoint.Address, kubeConf.Cluster.ControlPlaneEndpoint.Port)
|
||||
newKubeConfigStr := strings.Replace(k.KubeConfig, oldServer, newServer, -1)
|
||||
|
||||
if err := ioutil.WriteFile(kubeConfigPath, []byte(newKubeConfigStr), 0644); err != nil {
|
||||
if err := os.WriteFile(kubeConfigPath, []byte(newKubeConfigStr), 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ package kubernetes
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
|
@ -142,10 +142,13 @@ func (k *KubernetesStatus) LoadKubeConfig(runtime connector.Runtime, kubeConf *c
|
|||
kubeConfigStr := k.KubeConfig
|
||||
|
||||
oldServer := fmt.Sprintf("server: https://%s:%d", kubeConf.Cluster.ControlPlaneEndpoint.Domain, kubeConf.Cluster.ControlPlaneEndpoint.Port)
|
||||
if kubeConf.Cluster.ControlPlaneEndpoint.Address == "" {
|
||||
kubeConf.Cluster.ControlPlaneEndpoint.Address = runtime.GetHostsByRole(common.Master)[0].GetAddress()
|
||||
}
|
||||
newServer := fmt.Sprintf("server: https://%s:%d", kubeConf.Cluster.ControlPlaneEndpoint.Address, kubeConf.Cluster.ControlPlaneEndpoint.Port)
|
||||
newKubeConfigStr := strings.Replace(kubeConfigStr, oldServer, newServer, -1)
|
||||
|
||||
if err := ioutil.WriteFile(kubeConfigPath, []byte(newKubeConfigStr), 0644); err != nil {
|
||||
if err := os.WriteFile(kubeConfigPath, []byte(newKubeConfigStr), 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -149,6 +149,40 @@ func (i *InitKubernetesModule) Init() {
|
|||
Parallel: true,
|
||||
}
|
||||
|
||||
generateAuditPolicy := &task.RemoteTask{
|
||||
Name: "GenerateAduitPolicy",
|
||||
Desc: "Generate audit policy",
|
||||
Hosts: i.Runtime.GetHostsByRole(common.Master),
|
||||
Prepare: &prepare.PrepareCollection{
|
||||
new(common.EnableAudit),
|
||||
new(common.OnlyFirstMaster),
|
||||
&ClusterIsExist{Not: true},
|
||||
},
|
||||
Action: &action.Template{
|
||||
Template: templates.AuditPolicy,
|
||||
Dst: filepath.Join("/etc/kubernetes/audit", templates.AuditPolicy.Name()),
|
||||
},
|
||||
Parallel: true,
|
||||
Retry: 2,
|
||||
}
|
||||
|
||||
generateAuditWebhook := &task.RemoteTask{
|
||||
Name: "GenerateAduitWebhook",
|
||||
Desc: "Generate audit webhook",
|
||||
Hosts: i.Runtime.GetHostsByRole(common.Master),
|
||||
Prepare: &prepare.PrepareCollection{
|
||||
new(common.EnableAudit),
|
||||
new(common.OnlyFirstMaster),
|
||||
&ClusterIsExist{Not: true},
|
||||
},
|
||||
Action: &action.Template{
|
||||
Template: templates.AuditWebhook,
|
||||
Dst: filepath.Join("/etc/kubernetes/audit", templates.AuditWebhook.Name()),
|
||||
},
|
||||
Parallel: true,
|
||||
Retry: 2,
|
||||
}
|
||||
|
||||
kubeadmInit := &task.RemoteTask{
|
||||
Name: "KubeadmInit",
|
||||
Desc: "Init cluster using kubeadm",
|
||||
|
|
@ -190,6 +224,8 @@ func (i *InitKubernetesModule) Init() {
|
|||
|
||||
i.Tasks = []task.Interface{
|
||||
generateKubeadmConfig,
|
||||
generateAuditPolicy,
|
||||
generateAuditWebhook,
|
||||
kubeadmInit,
|
||||
copyKubeConfig,
|
||||
removeMasterTaint,
|
||||
|
|
@ -220,6 +256,38 @@ func (j *JoinNodesModule) Init() {
|
|||
Parallel: true,
|
||||
}
|
||||
|
||||
generateAuditPolicy := &task.RemoteTask{
|
||||
Name: "GenerateAduitPolicy",
|
||||
Desc: "Generate audit policy",
|
||||
Hosts: j.Runtime.GetHostsByRole(common.Master),
|
||||
Prepare: &prepare.PrepareCollection{
|
||||
new(common.EnableAudit),
|
||||
&NodeInCluster{Not: true},
|
||||
},
|
||||
Action: &action.Template{
|
||||
Template: templates.AuditPolicy,
|
||||
Dst: filepath.Join("/etc/kubernetes/audit", templates.AuditPolicy.Name()),
|
||||
},
|
||||
Parallel: true,
|
||||
Retry: 2,
|
||||
}
|
||||
|
||||
generateAuditWebhook := &task.RemoteTask{
|
||||
Name: "GenerateAduitWebhook",
|
||||
Desc: "Generate audit webhook",
|
||||
Hosts: j.Runtime.GetHostsByRole(common.Master),
|
||||
Prepare: &prepare.PrepareCollection{
|
||||
new(common.EnableAudit),
|
||||
&NodeInCluster{Not: true},
|
||||
},
|
||||
Action: &action.Template{
|
||||
Template: templates.AuditWebhook,
|
||||
Dst: filepath.Join("/etc/kubernetes/audit", templates.AuditWebhook.Name()),
|
||||
},
|
||||
Parallel: true,
|
||||
Retry: 2,
|
||||
}
|
||||
|
||||
joinMasterNode := &task.RemoteTask{
|
||||
Name: "JoinControlPlaneNode",
|
||||
Desc: "Join control-plane node",
|
||||
|
|
@ -281,6 +349,8 @@ func (j *JoinNodesModule) Init() {
|
|||
|
||||
j.Tasks = []task.Interface{
|
||||
generateKubeadmConfig,
|
||||
generateAuditPolicy,
|
||||
generateAuditWebhook,
|
||||
joinMasterNode,
|
||||
joinWorkerNode,
|
||||
copyKubeConfig,
|
||||
|
|
|
|||
|
|
@ -108,19 +108,22 @@ func (i *SyncKubeBinary) Execute(runtime connector.Runtime) error {
|
|||
}
|
||||
binariesMap := binariesMapObj.(map[string]*files.KubeBinary)
|
||||
|
||||
if err := SyncKubeBinaries(runtime, binariesMap); err != nil {
|
||||
if err := SyncKubeBinaries(i, runtime, binariesMap); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SyncKubeBinaries is used to sync kubernetes' binaries to each node.
|
||||
func SyncKubeBinaries(runtime connector.Runtime, binariesMap map[string]*files.KubeBinary) error {
|
||||
func SyncKubeBinaries(i *SyncKubeBinary, runtime connector.Runtime, binariesMap map[string]*files.KubeBinary) error {
|
||||
if err := utils.ResetTmpDir(runtime); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
binaryList := []string{"kubeadm", "kubelet", "kubectl", "helm", "kubecni"}
|
||||
if i.KubeConf.Cluster.Network.Plugin == "calico" {
|
||||
binaryList = append(binaryList, "calicoctl")
|
||||
}
|
||||
for _, name := range binaryList {
|
||||
binary, ok := binariesMap[name]
|
||||
if !ok {
|
||||
|
|
@ -249,7 +252,7 @@ func (g *GenerateKubeadmConfig) Execute(runtime connector.Runtime) error {
|
|||
}
|
||||
}
|
||||
|
||||
_, ApiServerArgs := util.GetArgs(v1beta2.GetApiServerArgs(g.WithSecurityEnhancement), g.KubeConf.Cluster.Kubernetes.ApiServerArgs)
|
||||
_, ApiServerArgs := util.GetArgs(v1beta2.GetApiServerArgs(g.WithSecurityEnhancement, g.KubeConf.Cluster.Kubernetes.EnableAudit()), g.KubeConf.Cluster.Kubernetes.ApiServerArgs)
|
||||
_, ControllerManagerArgs := util.GetArgs(v1beta2.GetControllermanagerArgs(g.KubeConf.Cluster.Kubernetes.Version, g.WithSecurityEnhancement), g.KubeConf.Cluster.Kubernetes.ControllerManagerArgs)
|
||||
_, SchedulerArgs := util.GetArgs(v1beta2.GetSchedulerArgs(g.WithSecurityEnhancement), g.KubeConf.Cluster.Kubernetes.SchedulerArgs)
|
||||
|
||||
|
|
@ -297,6 +300,7 @@ func (g *GenerateKubeadmConfig) Execute(runtime connector.Runtime) error {
|
|||
"NodeCidrMaskSize": g.KubeConf.Cluster.Kubernetes.NodeCidrMaskSize,
|
||||
"CriSock": g.KubeConf.Cluster.Kubernetes.ContainerRuntimeEndpoint,
|
||||
"ApiServerArgs": v1beta2.UpdateFeatureGatesConfiguration(ApiServerArgs, g.KubeConf),
|
||||
"EnableAudit": g.KubeConf.Cluster.Kubernetes.EnableAudit(),
|
||||
"ControllerManagerArgs": v1beta2.UpdateFeatureGatesConfiguration(ControllerManagerArgs, g.KubeConf),
|
||||
"SchedulerArgs": v1beta2.UpdateFeatureGatesConfiguration(SchedulerArgs, g.KubeConf),
|
||||
"KubeletConfiguration": v1beta2.GetKubeletConfiguration(runtime, g.KubeConf, g.KubeConf.Cluster.Kubernetes.ContainerRuntimeEndpoint, g.WithSecurityEnhancement),
|
||||
|
|
@ -1000,7 +1004,7 @@ func (s *SaveKubeConfig) Execute(runtime connector.Runtime) error {
|
|||
|
||||
clusterPublicAddress := s.KubeConf.Cluster.ControlPlaneEndpoint.Address
|
||||
master1 := runtime.GetHostsByRole(common.Master)[0]
|
||||
if clusterPublicAddress == master1.GetInternalAddress() {
|
||||
if clusterPublicAddress == master1.GetInternalAddress() || clusterPublicAddress == "" {
|
||||
clusterPublicAddress = master1.GetAddress()
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,168 @@
|
|||
/*
|
||||
Copyright 2021 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package templates
|
||||
|
||||
import (
|
||||
"github.com/lithammer/dedent"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
// AuditPolicy defines the template of kube-apiserver audit-policy.
|
||||
var AuditPolicy = template.Must(template.New("audit-policy.yaml").Parse(
|
||||
dedent.Dedent(`apiVersion: audit.k8s.io/v1
|
||||
kind: Policy
|
||||
rules:
|
||||
# The following requests were manually identified as high-volume and low-risk,
|
||||
# so drop them.
|
||||
- level: None
|
||||
users: ["system:kube-proxy"]
|
||||
verbs: ["watch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["endpoints", "services", "services/status"]
|
||||
- level: None
|
||||
users: ["system:unsecured"]
|
||||
namespaces: ["kube-system"]
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["configmaps"]
|
||||
- level: None
|
||||
users: ["kubelet"] # legacy kubelet identity
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes", "nodes/status"]
|
||||
- level: None
|
||||
userGroups: ["system:nodes"]
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes", "nodes/status"]
|
||||
- level: None
|
||||
users:
|
||||
- system:kube-controller-manager
|
||||
- system:kube-scheduler
|
||||
- system:serviceaccount:kube-system:endpoint-controller
|
||||
verbs: ["get", "update"]
|
||||
namespaces: ["kube-system"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["endpoints"]
|
||||
- level: None
|
||||
users: ["system:apiserver"]
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["namespaces", "namespaces/status", "namespaces/finalize"]
|
||||
# Don't log HPA fetching metrics.
|
||||
- level: None
|
||||
users:
|
||||
- system:kube-controller-manager
|
||||
verbs: ["get", "list"]
|
||||
resources:
|
||||
- group: "metrics.k8s.io"
|
||||
# Don't log these read-only URLs.
|
||||
- level: None
|
||||
nonResourceURLs:
|
||||
- /healthz*
|
||||
- /version
|
||||
- /swagger*
|
||||
# Don't log events requests.
|
||||
- level: None
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["events"]
|
||||
# Secrets, ConfigMaps, TokenRequest and TokenReviews can contain sensitive & binary data,
|
||||
# so only log at the Metadata level.
|
||||
- level: Metadata
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["secrets", "configmaps", "serviceaccounts/token"]
|
||||
- group: authentication.k8s.io
|
||||
resources: ["tokenreviews"]
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
# Get responses can be large; skip them.
|
||||
- level: Request
|
||||
verbs: ["get", "list", "watch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
- group: "admissionregistration.k8s.io"
|
||||
- group: "apiextensions.k8s.io"
|
||||
- group: "apiregistration.k8s.io"
|
||||
- group: "apps"
|
||||
- group: "authentication.k8s.io"
|
||||
- group: "authorization.k8s.io"
|
||||
- group: "autoscaling"
|
||||
- group: "batch"
|
||||
- group: "certificates.k8s.io"
|
||||
- group: "extensions"
|
||||
- group: "metrics.k8s.io"
|
||||
- group: "networking.k8s.io"
|
||||
- group: "policy"
|
||||
- group: "rbac.authorization.k8s.io"
|
||||
- group: "settings.k8s.io"
|
||||
- group: "storage.k8s.io"
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
# Default level for known APIs
|
||||
- level: RequestResponse
|
||||
resources:
|
||||
- group: "" # core
|
||||
- group: "admissionregistration.k8s.io"
|
||||
- group: "apiextensions.k8s.io"
|
||||
- group: "apiregistration.k8s.io"
|
||||
- group: "apps"
|
||||
- group: "authentication.k8s.io"
|
||||
- group: "authorization.k8s.io"
|
||||
- group: "autoscaling"
|
||||
- group: "batch"
|
||||
- group: "certificates.k8s.io"
|
||||
- group: "extensions"
|
||||
- group: "metrics.k8s.io"
|
||||
- group: "networking.k8s.io"
|
||||
- group: "policy"
|
||||
- group: "rbac.authorization.k8s.io"
|
||||
- group: "settings.k8s.io"
|
||||
- group: "storage.k8s.io"
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
# Default level for all other requests.
|
||||
- level: Metadata
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
`)))
|
||||
|
||||
// AuditWebhook defines the template of kube-apiserver audit-webhook.
|
||||
var AuditWebhook = template.Must(template.New("audit-webhook.yaml").Parse(
|
||||
dedent.Dedent(`apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- name: kube-auditing
|
||||
cluster:
|
||||
server: https://SHOULD_BE_REPLACED:6443/audit/webhook/event
|
||||
insecure-skip-tls-verify: true
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kube-auditing
|
||||
user: ""
|
||||
name: default-context
|
||||
current-context: default-context
|
||||
preferences: {}
|
||||
users: []
|
||||
`)))
|
||||
|
|
@ -86,6 +86,13 @@ apiServer:
|
|||
{{- range .CertSANs }}
|
||||
- "{{ . }}"
|
||||
{{- end }}
|
||||
{{- if .EnableAudit }}
|
||||
extraVolumes:
|
||||
- name: k8s-audit
|
||||
hostPath: /etc/kubernetes/audit
|
||||
mountPath: /etc/kubernetes/audit
|
||||
pathType: DirectoryOrCreate
|
||||
{{- end }}
|
||||
controllerManager:
|
||||
extraArgs:
|
||||
node-cidr-mask-size: "{{ .NodeCidrMaskSize }}"
|
||||
|
|
@ -165,17 +172,11 @@ var (
|
|||
}
|
||||
|
||||
ApiServerArgs = map[string]string{
|
||||
"bind-address": "0.0.0.0",
|
||||
"audit-log-maxage": "30",
|
||||
"audit-log-maxbackup": "10",
|
||||
"audit-log-maxsize": "100",
|
||||
"bind-address": "0.0.0.0",
|
||||
}
|
||||
ApiServerSecurityArgs = map[string]string{
|
||||
"bind-address": "0.0.0.0",
|
||||
"audit-log-maxage": "30",
|
||||
"audit-log-maxbackup": "10",
|
||||
"audit-log-maxsize": "100",
|
||||
"authorization-mode": "Node,RBAC",
|
||||
"bind-address": "0.0.0.0",
|
||||
"authorization-mode": "Node,RBAC",
|
||||
// --enable-admission-plugins=EventRateLimit must have a configuration file
|
||||
"enable-admission-plugins": "AlwaysPullImages,ServiceAccount,NamespaceLifecycle,NodeRestriction,LimitRanger,ResourceQuota,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,PodNodeSelector,PodSecurity",
|
||||
// "audit-log-path": "/var/log/apiserver/audit.log", // need audit policy
|
||||
|
|
@ -185,6 +186,13 @@ var (
|
|||
"tls-min-version": "VersionTLS12",
|
||||
"tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
|
||||
}
|
||||
auditArgs = map[string]string{
|
||||
"audit-log-format": "json",
|
||||
"audit-log-maxbackup": "2",
|
||||
"audit-log-maxsize": "200",
|
||||
"audit-policy-file": "/etc/kubernetes/audit/audit-policy.yaml",
|
||||
"audit-webhook-config-file": "/etc/kubernetes/audit/audit-webhook.yaml",
|
||||
}
|
||||
ControllermanagerArgs = map[string]string{
|
||||
"bind-address": "0.0.0.0",
|
||||
"cluster-signing-duration": "87600h",
|
||||
|
|
@ -205,10 +213,22 @@ var (
|
|||
}
|
||||
)
|
||||
|
||||
func GetApiServerArgs(securityEnhancement bool) map[string]string {
|
||||
func GetApiServerArgs(securityEnhancement bool, enableAudit bool) map[string]string {
|
||||
if securityEnhancement {
|
||||
if enableAudit {
|
||||
for k, v := range auditArgs {
|
||||
ApiServerSecurityArgs[k] = v
|
||||
}
|
||||
}
|
||||
return ApiServerSecurityArgs
|
||||
}
|
||||
|
||||
if enableAudit {
|
||||
for k, v := range auditArgs {
|
||||
ApiServerArgs[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
return ApiServerArgs
|
||||
}
|
||||
|
||||
|
|
@ -382,7 +402,7 @@ func GetKubeletCgroupDriver(runtime connector.Runtime, kubeConf *common.KubeConf
|
|||
cmd = "docker info | grep 'Cgroup Driver'"
|
||||
case common.Crio:
|
||||
cmd = "crio config | grep cgroup_manager"
|
||||
case common.Conatinerd:
|
||||
case common.Containerd:
|
||||
cmd = "containerd config dump | grep SystemdCgroup || echo 'SystemdCgroup = false'"
|
||||
case common.Isula:
|
||||
cmd = "isula info | grep 'Cgroup Driver'"
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ spec:
|
|||
containers:
|
||||
- name: haproxy
|
||||
image: {{ .HaproxyImage }}
|
||||
imagePullPolicy: Always
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
cpu: 25m
|
||||
|
|
|
|||
|
|
@ -130,7 +130,7 @@ spec:
|
|||
- name: prometheus_server
|
||||
value: :2112
|
||||
image: {{ .KubevipImage }}
|
||||
imagePullPolicy: Always
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: kube-vip
|
||||
resources: {}
|
||||
securityContext:
|
||||
|
|
@ -254,7 +254,7 @@ spec:
|
|||
- name: prometheus_server
|
||||
value: :2112
|
||||
image: {{ .KubevipImage }}
|
||||
imagePullPolicy: Always
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: kube-vip
|
||||
resources: {}
|
||||
securityContext:
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ spec:
|
|||
- name: prometheus_server
|
||||
value: :2112
|
||||
image: {{ .KubevipImage }}
|
||||
imagePullPolicy: Always
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: kube-vip
|
||||
resources: {}
|
||||
securityContext:
|
||||
|
|
@ -138,7 +138,7 @@ spec:
|
|||
- name: address
|
||||
value: {{ .KubeVip }}
|
||||
image: {{ .KubevipImage }}
|
||||
imagePullPolicy: Always
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: kube-vip
|
||||
resources: {}
|
||||
securityContext:
|
||||
|
|
|
|||
|
|
@ -189,9 +189,7 @@ func AddNodes(args common.Argument, downloadCmd string) error {
|
|||
return err
|
||||
}
|
||||
case common.Kubernetes:
|
||||
if err := NewAddNodesPipeline(runtime); err != nil {
|
||||
return err
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
if err := NewAddNodesPipeline(runtime); err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -137,9 +137,7 @@ func ArtifactExport(args common.ArtifactArgument, downloadCmd string) error {
|
|||
return err
|
||||
}
|
||||
case common.Kubernetes:
|
||||
if err := NewArtifactExportPipeline(runtime); err != nil {
|
||||
return err
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
if err := NewArtifactExportPipeline(runtime); err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -308,9 +308,7 @@ func CreateCluster(args common.Argument, downloadCmd string) error {
|
|||
return err
|
||||
}
|
||||
case common.Kubernetes:
|
||||
if err := NewCreateClusterPipeline(runtime); err != nil {
|
||||
return err
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
if err := NewCreateClusterPipeline(runtime); err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -117,9 +117,7 @@ func DeleteCluster(args common.Argument) error {
|
|||
return err
|
||||
}
|
||||
case common.Kubernetes:
|
||||
if err := NewDeleteClusterPipeline(runtime); err != nil {
|
||||
return err
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
if err := NewDeleteClusterPipeline(runtime); err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ spec:
|
|||
containers:
|
||||
- name: kube-kata
|
||||
image: {{ .KataDeployImage }}
|
||||
imagePullPolicy: Always
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ func (d *DeployPluginsModule) Init() {
|
|||
d.Name = "DeployPluginsModule"
|
||||
d.Desc = "Deploy plugins for cluster"
|
||||
|
||||
if d.KubeConf.Cluster.Kubernetes.EnableKataDeploy() && (d.KubeConf.Cluster.Kubernetes.ContainerManager == common.Conatinerd || d.KubeConf.Cluster.Kubernetes.ContainerManager == common.Crio) {
|
||||
if d.KubeConf.Cluster.Kubernetes.EnableKataDeploy() && (d.KubeConf.Cluster.Kubernetes.ContainerManager == common.Containerd || d.KubeConf.Cluster.Kubernetes.ContainerManager == common.Crio) {
|
||||
d.Tasks = append(d.Tasks, DeployKataTasks(d)...)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -141,6 +141,7 @@ func deployCalico(d *DeployNetworkPluginModule) []task.Interface {
|
|||
"IPIPMode": d.KubeConf.Cluster.Network.Calico.IPIPMode,
|
||||
"VXLANMode": d.KubeConf.Cluster.Network.Calico.VXLANMode,
|
||||
"ConatinerManagerIsIsula": d.KubeConf.Cluster.Kubernetes.ContainerManager == "isula",
|
||||
"IPV4POOLNATOUTGOING": d.KubeConf.Cluster.Network.Calico.EnableIPV4POOL_NAT_OUTGOING(),
|
||||
},
|
||||
},
|
||||
Parallel: true,
|
||||
|
|
@ -179,10 +180,10 @@ func deployFlannel(d *DeployNetworkPluginModule) []task.Interface {
|
|||
Template: templates.Flannel,
|
||||
Dst: filepath.Join(common.KubeConfigDir, templates.Flannel.Name()),
|
||||
Data: util.Data{
|
||||
"KubePodsCIDR": d.KubeConf.Cluster.Network.KubePodsCIDR,
|
||||
"FlannelImage": images.GetImage(d.Runtime, d.KubeConf, "flannel").ImageName(),
|
||||
"KubePodsCIDR": d.KubeConf.Cluster.Network.KubePodsCIDR,
|
||||
"FlannelImage": images.GetImage(d.Runtime, d.KubeConf, "flannel").ImageName(),
|
||||
"FlannelPluginImage": images.GetImage(d.Runtime, d.KubeConf, "flannel-cni-plugin").ImageName(),
|
||||
"BackendMode": d.KubeConf.Cluster.Network.Flannel.BackendMode,
|
||||
"BackendMode": d.KubeConf.Cluster.Network.Flannel.BackendMode,
|
||||
},
|
||||
},
|
||||
Parallel: true,
|
||||
|
|
|
|||
|
|
@ -4594,6 +4594,13 @@ spec:
|
|||
# Enable or Disable VXLAN on the default IP pool.
|
||||
- name: CALICO_IPV4POOL_VXLAN
|
||||
value: "{{ .VXLANMode }}"
|
||||
{{- if .IPV4POOLNATOUTGOING }}
|
||||
- name: CALICO_IPV4POOL_NAT_OUTGOING
|
||||
value: "true"
|
||||
{{- else }}
|
||||
- name: CALICO_IPV4POOL_NAT_OUTGOING
|
||||
value: "false"
|
||||
{{- end }}
|
||||
# Enable or Disable VXLAN on the default IPv6 IP pool.
|
||||
- name: CALICO_IPV6POOL_VXLAN
|
||||
value: "Never"
|
||||
|
|
@ -4834,5 +4841,4 @@ spec:
|
|||
---
|
||||
# Source: calico/templates/configure-canal.yaml
|
||||
|
||||
|
||||
`)))
|
||||
|
|
|
|||
|
|
@ -125,7 +125,7 @@ spec:
|
|||
serviceAccountName: openebs-maya-operator
|
||||
containers:
|
||||
- name: openebs-provisioner-hostpath
|
||||
imagePullPolicy: Always
|
||||
imagePullPolicy: IfNotPresent
|
||||
image: {{ .ProvisionerLocalPVImage }}
|
||||
env:
|
||||
# OPENEBS_IO_K8S_MASTER enables openebs provisioner to connect to K8s
|
||||
|
|
|
|||
|
|
@ -18,7 +18,6 @@ package registry
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
|
@ -81,12 +80,12 @@ func LookupCertsFile(path string) (ca string, cert string, key string, err error
|
|||
return
|
||||
}
|
||||
logger.Log.Debugf("Looking for TLS certificates and private keys in abs path %s", absPath)
|
||||
fs, err := ioutil.ReadDir(absPath)
|
||||
entries, err := os.ReadDir(absPath)
|
||||
if err != nil {
|
||||
return ca, cert, key, err
|
||||
}
|
||||
|
||||
for _, f := range fs {
|
||||
for _, f := range entries {
|
||||
fullPath := filepath.Join(path, f.Name())
|
||||
if strings.HasSuffix(f.Name(), ".crt") {
|
||||
logger.Log.Debugf(" crt: %s", fullPath)
|
||||
|
|
@ -96,7 +95,7 @@ func LookupCertsFile(path string) (ca string, cert string, key string, err error
|
|||
certName := f.Name()
|
||||
keyName := certName[:len(certName)-5] + ".key"
|
||||
logger.Log.Debugf(" cert: %s", fullPath)
|
||||
if !hasFile(fs, keyName) {
|
||||
if !hasFile(entries, keyName) {
|
||||
return ca, cert, key, errors.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName)
|
||||
}
|
||||
cert = fullPath
|
||||
|
|
@ -105,7 +104,7 @@ func LookupCertsFile(path string) (ca string, cert string, key string, err error
|
|||
keyName := f.Name()
|
||||
certName := keyName[:len(keyName)-4] + ".cert"
|
||||
logger.Log.Debugf(" key: %s", fullPath)
|
||||
if !hasFile(fs, certName) {
|
||||
if !hasFile(entries, certName) {
|
||||
return ca, cert, key, errors.Errorf("missing client certificate %s for key %s", certName, keyName)
|
||||
}
|
||||
key = fullPath
|
||||
|
|
@ -114,7 +113,7 @@ func LookupCertsFile(path string) (ca string, cert string, key string, err error
|
|||
return ca, cert, key, nil
|
||||
}
|
||||
|
||||
func hasFile(files []os.FileInfo, name string) bool {
|
||||
func hasFile(files []os.DirEntry, name string) bool {
|
||||
for _, f := range files {
|
||||
if f.Name() == name {
|
||||
return true
|
||||
|
|
|
|||
|
|
@ -217,3 +217,26 @@ var KsV332 = &KsInstaller{
|
|||
V321.String(),
|
||||
},
|
||||
}
|
||||
|
||||
var KsV340 = &KsInstaller{
|
||||
Version: V340.String(),
|
||||
CRDTemplate: templates.KsInstaller,
|
||||
ClusterConfigurationTemplate: templates.V340,
|
||||
K8sSupportVersions: []string{
|
||||
"v1.19",
|
||||
"v1.20",
|
||||
"v1.21",
|
||||
"v1.22",
|
||||
"v1.23",
|
||||
"v1.24",
|
||||
"v1.25",
|
||||
"v1.26",
|
||||
},
|
||||
UpgradeSupportVersions: []string{
|
||||
V332.String(),
|
||||
V331.String(),
|
||||
V330.String(),
|
||||
V320.String(),
|
||||
V321.String(),
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,234 @@
|
|||
/*
|
||||
Copyright 2022 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package templates
|
||||
|
||||
import (
|
||||
"text/template"
|
||||
|
||||
"github.com/lithammer/dedent"
|
||||
)
|
||||
|
||||
var V340 = template.Must(template.New("v3.4.0").Parse(
|
||||
dedent.Dedent(`
|
||||
---
|
||||
apiVersion: installer.kubesphere.io/v1alpha1
|
||||
kind: ClusterConfiguration
|
||||
metadata:
|
||||
name: ks-installer
|
||||
namespace: kubesphere-system
|
||||
labels:
|
||||
version: {{ .Tag }}
|
||||
spec:
|
||||
persistence:
|
||||
storageClass: ""
|
||||
authentication:
|
||||
jwtSecret: ""
|
||||
zone: ""
|
||||
local_registry: ""
|
||||
namespace_override: ""
|
||||
# dev_tag: ""
|
||||
etcd:
|
||||
monitoring: false
|
||||
endpointIps: localhost
|
||||
port: 2379
|
||||
tlsEnable: true
|
||||
common:
|
||||
core:
|
||||
console:
|
||||
enableMultiLogin: true
|
||||
port: 30880
|
||||
type: NodePort
|
||||
# apiserver:
|
||||
# resources: {}
|
||||
# controllerManager:
|
||||
# resources: {}
|
||||
redis:
|
||||
enabled: false
|
||||
enableHA: false
|
||||
volumeSize: 2Gi
|
||||
openldap:
|
||||
enabled: false
|
||||
volumeSize: 2Gi
|
||||
minio:
|
||||
volumeSize: 20Gi
|
||||
monitoring:
|
||||
# type: external
|
||||
endpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090
|
||||
GPUMonitoring:
|
||||
enabled: false
|
||||
gpu:
|
||||
kinds:
|
||||
- resourceName: "nvidia.com/gpu"
|
||||
resourceType: "GPU"
|
||||
default: true
|
||||
es:
|
||||
# master:
|
||||
# volumeSize: 4Gi
|
||||
# replicas: 1
|
||||
# resources: {}
|
||||
# data:
|
||||
# volumeSize: 20Gi
|
||||
# replicas: 1
|
||||
# resources: {}
|
||||
logMaxAge: 7
|
||||
elkPrefix: logstash
|
||||
basicAuth:
|
||||
enabled: false
|
||||
username: ""
|
||||
password: ""
|
||||
externalElasticsearchHost: ""
|
||||
externalElasticsearchPort: ""
|
||||
opensearch:
|
||||
# master:
|
||||
# volumeSize: 4Gi
|
||||
# replicas: 1
|
||||
# resources: {}
|
||||
# data:
|
||||
# volumeSize: 20Gi
|
||||
# replicas: 1
|
||||
# resources: {}
|
||||
enabled: true
|
||||
logMaxAge: 7
|
||||
opensearchPrefix: whizard
|
||||
basicAuth:
|
||||
enabled: true
|
||||
username: "admin"
|
||||
password: "admin"
|
||||
externalOpensearchHost: ""
|
||||
externalOpensearchPort: ""
|
||||
dashboard:
|
||||
enabled: false
|
||||
alerting:
|
||||
enabled: false
|
||||
# thanosruler:
|
||||
# replicas: 1
|
||||
# resources: {}
|
||||
auditing:
|
||||
enabled: false
|
||||
# operator:
|
||||
# resources: {}
|
||||
# webhook:
|
||||
# resources: {}
|
||||
devops:
|
||||
enabled: false
|
||||
ci:
|
||||
enabled: false
|
||||
cd:
|
||||
enabled: false
|
||||
type: argocd
|
||||
# resources: {}
|
||||
jenkinsMemoryLim: 8Gi
|
||||
jenkinsMemoryReq: 4Gi
|
||||
jenkinsVolumeSize: 8Gi
|
||||
events:
|
||||
enabled: false
|
||||
# operator:
|
||||
# resources: {}
|
||||
# exporter:
|
||||
# resources: {}
|
||||
# ruler:
|
||||
# enabled: true
|
||||
# replicas: 2
|
||||
# resources: {}
|
||||
logging:
|
||||
enabled: false
|
||||
logsidecar:
|
||||
enabled: true
|
||||
replicas: 2
|
||||
# resources: {}
|
||||
metrics_server:
|
||||
enabled: false
|
||||
monitoring:
|
||||
storageClass: ""
|
||||
node_exporter:
|
||||
port: 9100
|
||||
# resources: {}
|
||||
# kube_rbac_proxy:
|
||||
# resources: {}
|
||||
# kube_state_metrics:
|
||||
# resources: {}
|
||||
# prometheus:
|
||||
# replicas: 1
|
||||
# volumeSize: 20Gi
|
||||
# resources: {}
|
||||
# operator:
|
||||
# resources: {}
|
||||
# alertmanager:
|
||||
# replicas: 1
|
||||
# resources: {}
|
||||
# notification_manager:
|
||||
# resources: {}
|
||||
# operator:
|
||||
# resources: {}
|
||||
# proxy:
|
||||
# resources: {}
|
||||
gpu:
|
||||
nvidia_dcgm_exporter:
|
||||
enabled: false
|
||||
# resources: {}
|
||||
multicluster:
|
||||
clusterRole: none
|
||||
network:
|
||||
networkpolicy:
|
||||
enabled: false
|
||||
ippool:
|
||||
type: none
|
||||
topology:
|
||||
type: none
|
||||
openpitrix:
|
||||
store:
|
||||
enabled: false
|
||||
servicemesh:
|
||||
enabled: false
|
||||
istio:
|
||||
components:
|
||||
ingressGateways:
|
||||
- name: istio-ingressgateway
|
||||
enabled: false
|
||||
cni:
|
||||
enabled: false
|
||||
edgeruntime:
|
||||
enabled: false
|
||||
kubeedge:
|
||||
enabled: false
|
||||
cloudCore:
|
||||
cloudHub:
|
||||
advertiseAddress:
|
||||
- ""
|
||||
service:
|
||||
cloudhubNodePort: "30000"
|
||||
cloudhubQuicNodePort: "30001"
|
||||
cloudhubHttpsNodePort: "30002"
|
||||
cloudstreamNodePort: "30003"
|
||||
tunnelNodePort: "30004"
|
||||
# resources: {}
|
||||
# hostNetWork: false
|
||||
iptables-manager:
|
||||
enabled: true
|
||||
mode: "external"
|
||||
# resources: {}
|
||||
# edgeService:
|
||||
# resources: {}
|
||||
gatekeeper:
|
||||
enabled: false
|
||||
# controller_manager:
|
||||
# resources: {}
|
||||
# audit:
|
||||
# resources: {}
|
||||
terminal:
|
||||
timeout: 600
|
||||
`)))
|
||||
|
|
@ -315,7 +315,7 @@ spec:
|
|||
containers:
|
||||
- name: installer
|
||||
image: {{ .Repo }}/ks-installer:{{ .Tag }}
|
||||
imagePullPolicy: Always
|
||||
imagePullPolicy: IfNotPresent
|
||||
volumeMounts:
|
||||
- mountPath: /etc/localtime
|
||||
name: host-time
|
||||
|
|
|
|||
|
|
@ -35,6 +35,7 @@ const (
|
|||
V330
|
||||
V331
|
||||
V332
|
||||
V340
|
||||
)
|
||||
|
||||
var VersionList = []Version{
|
||||
|
|
@ -47,6 +48,7 @@ var VersionList = []Version{
|
|||
V330,
|
||||
V331,
|
||||
V332,
|
||||
V340,
|
||||
}
|
||||
|
||||
var VersionMap = map[string]*KsInstaller{
|
||||
|
|
@ -59,6 +61,7 @@ var VersionMap = map[string]*KsInstaller{
|
|||
V330.String(): KsV330,
|
||||
V331.String(): KsV331,
|
||||
V332.String(): KsV332,
|
||||
V340.String(): KsV340,
|
||||
}
|
||||
|
||||
var CNSource = map[string]bool{
|
||||
|
|
@ -69,6 +72,7 @@ var CNSource = map[string]bool{
|
|||
V330.String(): true,
|
||||
V331.String(): true,
|
||||
V332.String(): true,
|
||||
V340.String(): true,
|
||||
}
|
||||
|
||||
func (v Version) String() string {
|
||||
|
|
@ -91,6 +95,8 @@ func (v Version) String() string {
|
|||
return "v3.3.1"
|
||||
case V332:
|
||||
return "v3.3.2"
|
||||
case V340:
|
||||
return "v3.4.0"
|
||||
default:
|
||||
return "invalid option"
|
||||
}
|
||||
|
|
|
|||
|
|
@ -154,6 +154,12 @@ spec:
|
|||
description: PrivateKeyFile is the path to the private key
|
||||
for SSH authentication.
|
||||
type: string
|
||||
secret:
|
||||
description: Secret is the secret of the PrivateKey or Password
|
||||
for SSH authentication.It should in the same namespace as
|
||||
capkk. When Password is empty, replace it with data.password.
|
||||
When PrivateKey is empty, replace it with data.privateKey
|
||||
type: string
|
||||
timeout:
|
||||
description: Timeout is the timeout for establish an SSH connection.
|
||||
format: int64
|
||||
|
|
@ -193,6 +199,13 @@ spec:
|
|||
description: PrivateKeyFile is the path to the private
|
||||
key for SSH authentication.
|
||||
type: string
|
||||
secret:
|
||||
description: Secret is the secret of the PrivateKey
|
||||
or Password for SSH authentication.It should in the
|
||||
same namespace as capkk. When Password is empty, replace
|
||||
it with data.password. When PrivateKey is empty, replace
|
||||
it with data.privateKey
|
||||
type: string
|
||||
timeout:
|
||||
description: Timeout is the timeout for establish an
|
||||
SSH connection.
|
||||
|
|
|
|||
|
|
@ -175,6 +175,13 @@ spec:
|
|||
description: PrivateKeyFile is the path to the private
|
||||
key for SSH authentication.
|
||||
type: string
|
||||
secret:
|
||||
description: Secret is the secret of the PrivateKey
|
||||
or Password for SSH authentication.It should in
|
||||
the same namespace as capkk. When Password is empty,
|
||||
replace it with data.password. When PrivateKey is
|
||||
empty, replace it with data.privateKey
|
||||
type: string
|
||||
timeout:
|
||||
description: Timeout is the timeout for establish
|
||||
an SSH connection.
|
||||
|
|
@ -218,6 +225,13 @@ spec:
|
|||
description: PrivateKeyFile is the path to the
|
||||
private key for SSH authentication.
|
||||
type: string
|
||||
secret:
|
||||
description: Secret is the secret of the PrivateKey
|
||||
or Password for SSH authentication.It should
|
||||
in the same namespace as capkk. When Password
|
||||
is empty, replace it with data.password. When
|
||||
PrivateKey is empty, replace it with data.privateKey
|
||||
type: string
|
||||
timeout:
|
||||
description: Timeout is the timeout for establish
|
||||
an SSH connection.
|
||||
|
|
|
|||
|
|
@ -85,6 +85,12 @@ spec:
|
|||
description: PrivateKeyFile is the path to the private key for
|
||||
SSH authentication.
|
||||
type: string
|
||||
secret:
|
||||
description: Secret is the secret of the PrivateKey or Password
|
||||
for SSH authentication.It should in the same namespace as capkk.
|
||||
When Password is empty, replace it with data.password. When
|
||||
PrivateKey is empty, replace it with data.privateKey
|
||||
type: string
|
||||
timeout:
|
||||
description: Timeout is the timeout for establish an SSH connection.
|
||||
format: int64
|
||||
|
|
|
|||
|
|
@ -207,10 +207,11 @@ func (r *Reconciler) reconcileNormal(ctx context.Context, clusterScope *scope.Cl
|
|||
kkCluster := clusterScope.KKCluster
|
||||
|
||||
// If the KKCluster doesn't have our finalizer, add it.
|
||||
controllerutil.AddFinalizer(kkCluster, infrav1.ClusterFinalizer)
|
||||
// Register the finalizer immediately to avoid orphaning KK resources on delete
|
||||
if err := clusterScope.PatchObject(); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
if controllerutil.AddFinalizer(kkCluster, infrav1.ClusterFinalizer) {
|
||||
// Register the finalizer immediately to avoid orphaning KK resources on delete
|
||||
if err := clusterScope.PatchObject(); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := net.LookupIP(kkCluster.Spec.ControlPlaneLoadBalancer.Host); err != nil {
|
||||
|
|
|
|||
|
|
@ -24,8 +24,10 @@ import (
|
|||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/pkg/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/record"
|
||||
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
|
||||
|
|
@ -95,6 +97,19 @@ func (r *Reconciler) getSSHClient(scope *scope.InstanceScope) ssh.Interface {
|
|||
if r.sshClientFactory != nil {
|
||||
return r.sshClientFactory(scope)
|
||||
}
|
||||
if scope.KKInstance.Spec.Auth.Secret != "" {
|
||||
secret := &corev1.Secret{}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*15)
|
||||
defer cancel()
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: scope.Cluster.Namespace, Name: scope.KKInstance.Spec.Auth.Secret}, secret); err == nil {
|
||||
if scope.KKInstance.Spec.Auth.PrivateKey == "" { // replace PrivateKey by secret
|
||||
scope.KKInstance.Spec.Auth.PrivateKey = string(secret.Data["privateKey"])
|
||||
}
|
||||
if scope.KKInstance.Spec.Auth.Password == "" { // replace password by secret
|
||||
scope.KKInstance.Spec.Auth.Password = string(secret.Data["password"])
|
||||
}
|
||||
}
|
||||
}
|
||||
return ssh.NewClient(scope.KKInstance.Spec.Address, scope.KKInstance.Spec.Auth, &scope.Logger)
|
||||
}
|
||||
|
||||
|
|
@ -345,11 +360,12 @@ func (r *Reconciler) reconcileNormal(ctx context.Context, instanceScope *scope.I
|
|||
instanceScope.KKInstance.Labels[infrav1.KKClusterLabelName] = instanceScope.InfraCluster.InfraClusterName()
|
||||
|
||||
// If the KKMachine doesn't have our finalizer, add it.
|
||||
controllerutil.AddFinalizer(instanceScope.KKInstance, infrav1.InstanceFinalizer)
|
||||
// Register the finalizer after first read operation from KK to avoid orphaning KK resources on delete
|
||||
if err := instanceScope.PatchObject(); err != nil {
|
||||
instanceScope.Error(err, "unable to patch object")
|
||||
return ctrl.Result{}, err
|
||||
if controllerutil.AddFinalizer(instanceScope.KKInstance, infrav1.InstanceFinalizer) {
|
||||
// Register the finalizer after first read operation from KK to avoid orphaning KK resources on delete
|
||||
if err := instanceScope.PatchObject(); err != nil {
|
||||
instanceScope.Error(err, "unable to patch object")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
sshClient := r.getSSHClient(instanceScope)
|
||||
|
|
|
|||
|
|
@ -276,11 +276,12 @@ func (r *Reconciler) reconcileNormal(ctx context.Context, machineScope *scope.Ma
|
|||
}
|
||||
|
||||
// If the KKMachine doesn't have our finalizer, add it.
|
||||
controllerutil.AddFinalizer(machineScope.KKMachine, infrav1.MachineFinalizer)
|
||||
// Register the finalizer after first read operation from KK to avoid orphaning KK resources on delete
|
||||
if err := machineScope.PatchObject(); err != nil {
|
||||
machineScope.Error(err, "unable to patch object")
|
||||
return ctrl.Result{}, err
|
||||
if controllerutil.AddFinalizer(machineScope.KKMachine, infrav1.MachineFinalizer) {
|
||||
// Register the finalizer after first read operation from KK to avoid orphaning KK resources on delete
|
||||
if err := machineScope.PatchObject(); err != nil {
|
||||
machineScope.Error(err, "unable to patch object")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// Create new instance from KKCluster since providerId is nils.
|
||||
|
|
|
|||
|
|
@ -0,0 +1,172 @@
|
|||
# Cluster-Autoscaler for capkk
|
||||
refer to https://cluster-api.sigs.k8s.io/tasks/automated-machine-management/autoscaling.html
|
||||
capkk is the infrastructure cluster for clusterapi.Here is an example of deploying Cluster Autoscaler in capkk.
|
||||
|
||||
## Deployment plan
|
||||
|
||||
<img src="img/autoscaler-deployment.png" style="zoom:50%;" />
|
||||
|
||||
1. Install the autoscaler on the master cluster to manage the dynamic scaling of the workload cluster.
|
||||
2. Install the autoscaler and the cluster it manages in the same namespace.
|
||||
|
||||
## Scaling mechanism
|
||||
|
||||
<img src="img/autoscaler-mechanism.png" style="zoom:50%;" />
|
||||
|
||||
⚠️ **Automatic scaling is only supported for worker nodes.**
|
||||
|
||||
1. Configure all scalable machines on the nodes of the kkcluster.
|
||||
|
||||
```yaml
|
||||
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
|
||||
kind: KKCluster
|
||||
metadata:
|
||||
name: capkk-1
|
||||
namespace: default
|
||||
spec:
|
||||
component:
|
||||
zone: cn
|
||||
controlPlaneLoadBalancer:
|
||||
host: 172.31.53.163
|
||||
nodes:
|
||||
auth:
|
||||
password: "123456"
|
||||
user: root
|
||||
instances:
|
||||
- address: 172.31.53.163
|
||||
- address: 172.31.53.160
|
||||
- address: 172.31.53.122
|
||||
```
|
||||
|
||||
2. Configure annotations on the machinedeployment to allow the autoscaler to discover node groups.
|
||||
|
||||
```yaml
|
||||
apiVersion: cluster.x-k8s.io/v1beta1
|
||||
kind: MachineDeployment
|
||||
metadata:
|
||||
annotations:
|
||||
cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size: "3"
|
||||
cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size: "0"
|
||||
capacity.cluster-autoscaler.kubernetes.io/memory: "16G"
|
||||
capacity.cluster-autoscaler.kubernetes.io/cpu: "8"
|
||||
name: capkk-1-md-0
|
||||
namespace: default
|
||||
spec:
|
||||
clusterName: capkk-1
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels: null
|
||||
template:
|
||||
spec:
|
||||
bootstrap:
|
||||
configRef:
|
||||
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
|
||||
kind: KubeadmConfigTemplate
|
||||
name: capkk-1-md-0
|
||||
clusterName: capkk-1
|
||||
infrastructureRef:
|
||||
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
|
||||
kind: KKMachineTemplate
|
||||
name: capkk-1-md-0
|
||||
version: v1.25.4
|
||||
```
|
||||
|
||||
When `cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size` is set to "0", it indicates that the minimum number of worker nodes is 0. In this case, it is necessary to set `capacity.cluster-autoscaler.kubernetes.io/memory` and `capacity.cluster-autoscaler.kubernetes.io/cpu`.
|
||||
|
||||
3. Modify the startup parameters in the autoscaler deployment
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: cluster-autoscaler
|
||||
labels:
|
||||
app: cluster-autoscaler
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: cluster-autoscaler
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: cluster-autoscaler
|
||||
spec:
|
||||
containers:
|
||||
- image: registry.k8s.io/autoscaling/cluster-autoscaler:v1.26.3
|
||||
name: default
|
||||
command:
|
||||
- /cluster-autoscaler
|
||||
args:
|
||||
- --cloud-provider=clusterapi
|
||||
- --kubeconfig=/tmp/kubeconfig/workload.conf
|
||||
- --clusterapi-cloud-config-authoritative
|
||||
- --node-group-auto-discovery=clusterapi:namespace=${NAMESPACE}
|
||||
- --scale-down-enabled=false
|
||||
volumeMounts:
|
||||
- mountPath: /tmp/kubeconfig
|
||||
name: workload-kubeconfig
|
||||
serviceAccountName: cluster-autoscaler
|
||||
terminationGracePeriodSeconds: 10
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
volumes:
|
||||
- name: workload-kubeconfig
|
||||
secret:
|
||||
secretName: '${CLUSTER_NAME}-kubeconfig'
|
||||
optional: true
|
||||
items:
|
||||
- key: value
|
||||
path: workload.conf
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: cluster-autoscaler-management
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: cluster-autoscaler-management
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cluster-autoscaler
|
||||
namespace: '${NAMESPACE}'
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: cluster-autoscaler
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: cluster-autoscaler-management
|
||||
rules:
|
||||
- apiGroups:
|
||||
- cluster.x-k8s.io
|
||||
resources:
|
||||
- machinedeployments
|
||||
- machinedeployments/scale
|
||||
- machines
|
||||
- machinesets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- infrastructure.cluster.x-k8s.io
|
||||
resources:
|
||||
- kkmachinetemplates
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- watch
|
||||
```
|
||||
|
||||
**ScalingUp**:Triggered when there are no nodes available for scheduling pods.
|
||||
|
||||
**ScalingDown**: Configure it in the startup parameters of the autoscaler. For the complete configuration, please refer to the official website of the autoscaler.
|
||||
|
|
@ -67,4 +67,8 @@ $ kk create cluster -f config-sample.yaml
|
|||
Create a cluster from the specified configuration file and use the artifact to install operating system packages.
|
||||
```
|
||||
$ kk create cluster -f config-sample.yaml -a kubekey-artifact.tar.gz --with-packages
|
||||
```
|
||||
Create a cluster with the specified download command.
|
||||
```
|
||||
$ kk create cluster --download-cmd 'hd get -t 8 -o %s %s'
|
||||
```
|
||||
|
|
@ -23,8 +23,11 @@ spec:
|
|||
- node1
|
||||
- node[10:100] # All the nodes in your cluster that serve as the worker nodes.
|
||||
controlPlaneEndpoint:
|
||||
#Internal loadbalancer for apiservers. Support: haproxy, kube-vip [Default: ""]
|
||||
internalLoadbalancer: haproxy
|
||||
# Internal loadbalancer for apiservers. Support: haproxy, kube-vip [Default: ""]
|
||||
internalLoadbalancer: haproxy
|
||||
# Determines whether to use external dns to resolve the control-plane domain.
|
||||
# If 'externalDNS' is set to 'true', the 'address' needs to be set to "".
|
||||
externalDNS: false
|
||||
domain: lb.kubesphere.local
|
||||
# The IP address of your load balancer. If you use internalLoadblancer in "kube-vip" mode, a VIP is required here.
|
||||
address: ""
|
||||
|
|
@ -111,24 +114,24 @@ spec:
|
|||
# keyFile: /pki/etcd/etcd.key
|
||||
dataDir: "/var/lib/etcd"
|
||||
# Time (in milliseconds) of a heartbeat interval.
|
||||
heartbeatInterval: "250"
|
||||
heartbeatInterval: 250
|
||||
# Time (in milliseconds) for an election to timeout.
|
||||
electionTimeout: "5000"
|
||||
electionTimeout: 5000
|
||||
# Number of committed transactions to trigger a snapshot to disk.
|
||||
snapshotCount: "10000"
|
||||
snapshotCount: 10000
|
||||
# Auto compaction retention for mvcc key value store in hour. 0 means disable auto compaction.
|
||||
autoCompactionRetention: "8"
|
||||
autoCompactionRetention: 8
|
||||
# Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
|
||||
metrics: basic
|
||||
## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than
|
||||
## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check
|
||||
## etcd documentation for more information.
|
||||
# 8G is a suggested maximum size for normal environments and etcd warns at startup if the configured value exceeds it.
|
||||
quotaBackendBytes: "2147483648"
|
||||
quotaBackendBytes: 2147483648
|
||||
# Maximum client request size in bytes the server will accept.
|
||||
# etcd is designed to handle small key value pairs typical for metadata.
|
||||
# Larger requests will work, but may increase the latency of other requests
|
||||
maxRequestBytes: "1572864"
|
||||
maxRequestBytes: 1572864
|
||||
# Maximum number of snapshot files to retain (0 is unlimited)
|
||||
maxSnapshots: 5
|
||||
# Maximum number of wal files to retain (0 is unlimited)
|
||||
|
|
|
|||
Binary file not shown.
|
After Width: | Height: | Size: 77 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 136 KiB |
|
|
@ -0,0 +1,27 @@
|
|||
# HealthCheck for capkk
|
||||
|
||||
refer https://cluster-api.sigs.k8s.io/tasks/automated-machine-management/healthchecking.html
|
||||
|
||||
there is a sample for healthcheck
|
||||
|
||||
```yaml
|
||||
apiVersion: cluster.x-k8s.io/v1beta1
|
||||
kind: MachineHealthCheck
|
||||
metadata:
|
||||
name: hc-capkk-1
|
||||
spec:
|
||||
clusterName: capkk-1
|
||||
maxUnhealthy: 100%
|
||||
selector:
|
||||
matchLabels:
|
||||
cluster.x-k8s.io/cluster-name: capkk-1
|
||||
unhealthyConditions:
|
||||
- type: Ready
|
||||
status: Unknown
|
||||
timeout: 300s
|
||||
- type: Ready
|
||||
status: "False"
|
||||
timeout: 300s
|
||||
```
|
||||
|
||||
Capkk currently does not have a remediationTemplate.
|
||||
|
|
@ -37,6 +37,8 @@ spec:
|
|||
version: v0.9.1
|
||||
etcd:
|
||||
version: v3.4.13
|
||||
calicoctl:
|
||||
version: v3.23.2
|
||||
containerRuntimes:
|
||||
- type: docker
|
||||
version: 20.10.8
|
||||
|
|
@ -49,10 +51,10 @@ spec:
|
|||
docker-compose:
|
||||
version: v2.2.2
|
||||
images:
|
||||
- docker.io/calico/cni:v3.20.0
|
||||
- docker.io/calico/kube-controllers:v3.20.0
|
||||
- docker.io/calico/node:v3.20.0
|
||||
- docker.io/calico/pod2daemon-flexvol:v3.20.0
|
||||
- docker.io/calico/cni:v3.23.2
|
||||
- docker.io/calico/kube-controllers:v3.23.2
|
||||
- docker.io/calico/node:v3.23.2
|
||||
- docker.io/calico/pod2daemon-flexvol:v3.23.2
|
||||
- docker.io/coredns/coredns:1.8.0
|
||||
- docker.io/kubesphere/k8s-dns-node-cache:1.15.12
|
||||
- docker.io/kubesphere/kube-apiserver:v1.21.5
|
||||
|
|
@ -148,4 +150,4 @@ spec:
|
|||
skipTLSVerify: false # Allow contacting registries over HTTPS with failed TLS verification.
|
||||
plainHTTP: false # Allow contacting registries over HTTP.
|
||||
certsPath: "/etc/docker/certs.d/dockerhub.kubekey.local" # Use certificates at path (*.crt, *.cert, *.key) to connect to the registry.
|
||||
```
|
||||
```
|
||||
|
|
|
|||
|
|
@ -44,6 +44,7 @@ K3S_VERSION=${K3S_VERSION}
|
|||
CONTAINERD_VERSION=${CONTAINERD_VERSION}
|
||||
RUNC_VERSION=${RUNC_VERSION}
|
||||
COMPOSE_VERSION=${COMPOSE_VERSION}
|
||||
CALICO_VERSION=${CALICO_VERSION}
|
||||
|
||||
# qsctl
|
||||
QSCTL_ACCESS_KEY_ID=${QSCTL_ACCESS_KEY_ID}
|
||||
|
|
@ -156,7 +157,7 @@ if [ $CNI_VERSION ]; then
|
|||
curl -L -o binaries/cni/$CNI_VERSION/$arch/cni-plugins-linux-$arch-$CNI_VERSION.tgz \
|
||||
https://github.com/containernetworking/plugins/releases/download/$CNI_VERSION/cni-plugins-linux-$arch-$CNI_VERSION.tgz
|
||||
|
||||
qsctl cp binaries/etcd/$CNI_VERSION/$arch/cni-plugins-linux-$arch-$CNI_VERSION.tgz \
|
||||
qsctl cp binaries/cni/$CNI_VERSION/$arch/cni-plugins-linux-$arch-$CNI_VERSION.tgz \
|
||||
qs://containernetworking/plugins/releases/download/$CNI_VERSION/cni-plugins-linux-$arch-$CNI_VERSION.tgz \
|
||||
-c qsctl-config.yaml
|
||||
done
|
||||
|
|
@ -164,6 +165,24 @@ if [ $CNI_VERSION ]; then
|
|||
rm -rf binaries
|
||||
fi
|
||||
|
||||
# Sync CALICOCTL Binary
|
||||
if [ $CALICO_VERSION ]; then
|
||||
for arch in ${ARCHS[@]}
|
||||
do
|
||||
mkdir -p binaries/calicoctl/$CALICO_VERSION/$arch
|
||||
echo "Synchronizing calicoctl-$arch"
|
||||
|
||||
curl -L -o binaries/calicoctl/$CALICO_VERSION/$arch/calicoctl-linux-$arch \
|
||||
https://github.com/projectcalico/calico/releases/download/$CALICO_VERSION/calicoctl-linux-$arch
|
||||
|
||||
qsctl cp binaries/calicoctl/$CALICO_VERSION/$arch/calicoctl-linux-$arch \
|
||||
qs://kubernetes-release/projectcalico/calico/releases/download/$CALICO_VERSION/calicoctl-linux-$arch \
|
||||
-c qsctl-config.yaml
|
||||
done
|
||||
|
||||
rm -rf binaries
|
||||
fi
|
||||
|
||||
# Sync crictl Binary
|
||||
if [ $CRICTL_VERSION ]; then
|
||||
echo "access_key_id: $ACCESS_KEY_ID" > qsctl-config.yaml
|
||||
|
|
|
|||
|
|
@ -64,11 +64,17 @@ state = "/run/containerd"
|
|||
{{- if .PrivateRegistry }}
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.configs]
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.configs.{{ .PrivateRegistry }}.auth]
|
||||
username = {{ .Auth.Username }}
|
||||
password = {{ .Auth.Password}}
|
||||
username = "{{ .Auth.Username }}"
|
||||
password = "{{ .Auth.Password}}"
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.configs.{{ .PrivateRegistry }}.tls]
|
||||
ca_file = {{ .Auth.CAFile }}
|
||||
cert_file = {{ .Auth.CertFile }}
|
||||
key_file = {{ .Auth.KeyFile }}
|
||||
insecure_skip_verify = {{ .Auth.SkipTLSVerify }}
|
||||
{{- if .Auth.CAFile }}
|
||||
ca_file = "{{ .Auth.CAFile }}"
|
||||
{{- end}}
|
||||
{{- if .Auth.CertFile }}
|
||||
cert_file = "{{ .Auth.CertFile }}"
|
||||
{{- end}}
|
||||
{{- if .Auth.KeyFile }}
|
||||
key_file = "{{ .Auth.KeyFile }}"
|
||||
{{- end}}
|
||||
insecure_skip_verify = {{ .Auth.InsecureSkipVerify }}
|
||||
{{- end}}
|
||||
|
|
|
|||
|
|
@ -51,7 +51,6 @@ spec:
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: kube-vip
|
||||
namespace: kube-system
|
||||
spec:
|
||||
|
|
@ -180,4 +179,4 @@ spec:
|
|||
spec:
|
||||
joinConfiguration:
|
||||
nodeRegistration:
|
||||
criSocket: unix:///var/run/containerd/containerd.sock
|
||||
criSocket: unix:///var/run/containerd/containerd.sock
|
||||
|
|
|
|||
|
|
@ -977,5 +977,13 @@
|
|||
"v2.7.0": "1e1e79d451d04a9c9953934b966e5698362e1262a933d098bd3874529f80fd43",
|
||||
"v2.7.1": "b86f161f0b6f4c6b294e62797ff20c24a39c918f4d1fd63728864a0461b3cdc7"
|
||||
}
|
||||
},
|
||||
"calicoctl": {
|
||||
"amd64": {
|
||||
"v3.23.2": "3784200cdfc0106c9987df2048d219bb91147f0cc3fa365b36279ac82ea37c7a"
|
||||
},
|
||||
"arm64": {
|
||||
"v3.23.2": "232b992e6767c68c8c832cc7027a0d9aacb29901a9b5e8871e25baedbbb9c64c"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue