refactor images

This commit is contained in:
pixiake 2020-05-12 00:05:02 +08:00
parent 7894a67e35
commit 782a46140c
9 changed files with 351 additions and 101 deletions

View File

@ -5,7 +5,7 @@ import (
"fmt"
kubekeyapi "github.com/kubesphere/kubekey/pkg/apis/kubekey/v1alpha1"
"github.com/kubesphere/kubekey/pkg/cluster/kubernetes/tmpl"
"github.com/kubesphere/kubekey/pkg/plugins/dns/coredns"
"github.com/kubesphere/kubekey/pkg/plugins/dns"
"github.com/kubesphere/kubekey/pkg/util/manager"
"github.com/kubesphere/kubekey/pkg/util/ssh"
"github.com/pkg/errors"
@ -87,23 +87,21 @@ func initKubernetesCluster(mgr *manager.Manager, node *kubekeyapi.HostCfg, conn
fmt.Println(output)
return errors.Wrap(errors.WithStack(err2), "failed to init kubernetes cluster")
}
err3 := GetKubeConfig(mgr)
if err3 != nil {
if err3 := GetKubeConfig(mgr); err3 != nil {
return err3
}
err4 := removeMasterTaint(mgr, node)
if err4 != nil {
return err4
if err := removeMasterTaint(mgr, node); err != nil {
return err
}
err5 := addWorkerLabel(mgr, node)
if err5 != nil {
return err5
if err := addWorkerLabel(mgr, node); err != nil {
return err
}
err6 := coredns.OverrideCorednsService(mgr)
if err6 != nil {
return err6
if err := dns.OverrideCorednsService(mgr); err != nil {
return err
}
if err := dns.DeployNodelocaldns(mgr); err != nil {
return err
}
clusterIsExist = true
if err := getJoinNodesCmd(mgr); err != nil {
return err

View File

@ -142,6 +142,6 @@ func GenerateKubeadmCfg(mgr *manager.Manager) (string, error) {
"ServiceSubnet": mgr.Cluster.Network.KubeServiceCIDR,
"CertSANs": mgr.Cluster.GenerateCertSANs(),
"ExternalEtcd": externalEtcd,
"ClusterIP": mgr.Cluster.ClusterIP(),
"ClusterIP": "169.254.25.10",
})
}

40
pkg/images/default.go Normal file
View File

@ -0,0 +1,40 @@
package images
import (
kubekeyapi "github.com/kubesphere/kubekey/pkg/apis/kubekey/v1alpha1"
"github.com/kubesphere/kubekey/pkg/util/manager"
"strings"
)
const (
Etcd = "etcd"
Master = "master"
Worker = "worker"
K8s = "k8s"
Pause = "pause"
KubeApiserver = "kube-apiserver"
KubeControllerManager = "kube-controller-manager"
KubeScheduler = "kube-scheduler"
KubeProxy = "kube-proxy"
)
func GetImage(mgr *manager.Manager, name string) *Image {
var image Image
ImageList := map[string]Image{
"etcd": {RepoAddr: mgr.Cluster.Registry.PrivateRegistry, Namespace: "kubesphere", Repo: "etcd", Tag: kubekeyapi.DefaultEtcdVersion, Group: Etcd, Enable: true},
"pause": {RepoAddr: "", Namespace: kubekeyapi.DefaultKubeImageRepo, Repo: "pause", Tag: "3.1", Group: K8s, Enable: true},
"kube-apiserver": {RepoAddr: "", Namespace: kubekeyapi.DefaultKubeImageRepo, Repo: "kube-apiserver", Tag: mgr.Cluster.Kubernetes.Version, Group: Master, Enable: true},
"kube-controller-manager": {RepoAddr: "", Namespace: kubekeyapi.DefaultKubeImageRepo, Repo: "kube-controller-manager", Tag: mgr.Cluster.Kubernetes.Version, Group: Master, Enable: true},
"kube-scheduler": {RepoAddr: "", Namespace: kubekeyapi.DefaultKubeImageRepo, Repo: "kube-scheduler", Tag: mgr.Cluster.Kubernetes.Version, Group: Master, Enable: true},
"kube-proxy": {RepoAddr: "", Namespace: kubekeyapi.DefaultKubeImageRepo, Repo: "kube-proxy", Tag: mgr.Cluster.Kubernetes.Version, Group: K8s, Enable: true},
"coredns": {RepoAddr: mgr.Cluster.Registry.PrivateRegistry, Namespace: "coredns", Repo: "coredns", Tag: "1.6.0", Group: K8s, Enable: true},
"k8s-dns-node-cache": {RepoAddr: mgr.Cluster.Registry.PrivateRegistry, Namespace: "kubesphere", Repo: "k8s-dns-node-cache", Tag: "1.15.12", Group: K8s, Enable: true},
"calico-kube-controllers": {RepoAddr: mgr.Cluster.Registry.PrivateRegistry, Namespace: "calico", Repo: "kube-controllers", Tag: kubekeyapi.DefaultCalicoVersion, Group: K8s, Enable: strings.EqualFold(mgr.Cluster.Network.Plugin, "calico")},
"calico-cni": {RepoAddr: mgr.Cluster.Registry.PrivateRegistry, Namespace: "calico", Repo: "cni", Tag: kubekeyapi.DefaultCalicoVersion, Group: K8s, Enable: strings.EqualFold(mgr.Cluster.Network.Plugin, "calico")},
"calico-node": {RepoAddr: mgr.Cluster.Registry.PrivateRegistry, Namespace: "calico", Repo: "node", Tag: kubekeyapi.DefaultCalicoVersion, Group: K8s, Enable: strings.EqualFold(mgr.Cluster.Network.Plugin, "calico")},
"calico-flexvol": {RepoAddr: mgr.Cluster.Registry.PrivateRegistry, Namespace: "calico", Repo: "pod2daemon-flexvol", Tag: kubekeyapi.DefaultCalicoVersion, Group: K8s, Enable: strings.EqualFold(mgr.Cluster.Network.Plugin, "calico")},
"flannel": {RepoAddr: mgr.Cluster.Registry.PrivateRegistry, Namespace: "kubesphere", Repo: "flannel", Tag: kubekeyapi.DefaultFlannelVersion, Group: K8s, Enable: strings.EqualFold(mgr.Cluster.Network.Plugin, "flannel")},
}
image = ImageList[name]
return &image
}

View File

@ -6,82 +6,81 @@ import (
"github.com/kubesphere/kubekey/pkg/util/manager"
"github.com/kubesphere/kubekey/pkg/util/ssh"
"github.com/pkg/errors"
"strings"
)
type Image struct {
Prefix string
Repo string
Tag string
Group string
Enable bool
RepoAddr string
Namespace string
Repo string
Tag string
Group string
Enable bool
}
func (image *Image) NewImage() string {
return fmt.Sprintf("%s%s:%s", image.Prefix, image.Repo, image.Tag)
}
func GetImagePrefix(privateRegistry, ns string) string {
func (image *Image) ImageName() string {
var prefix string
if privateRegistry == "" {
if ns == "" {
if image.RepoAddr == "" {
if image.Namespace == "" {
prefix = ""
} else {
prefix = fmt.Sprintf("%s/", ns)
prefix = fmt.Sprintf("%s/", image.Namespace)
}
} else {
if ns == "" {
prefix = fmt.Sprintf("%s/library/", privateRegistry)
if image.Namespace == "" {
prefix = fmt.Sprintf("%s/library/", image.RepoAddr)
} else {
prefix = fmt.Sprintf("%s/%s/", privateRegistry, ns)
prefix = fmt.Sprintf("%s/%s/", image.RepoAddr, image.Namespace)
}
}
return prefix
return fmt.Sprintf("%s%s:%s", prefix, image.Repo, image.Tag)
}
func PreDownloadImages(mgr *manager.Manager) error {
mgr.Logger.Infoln("Pre-download images")
mgr.Logger.Infoln("Pre download images")
return mgr.RunTaskOnAllNodes(preDownloadImages, true)
}
func preDownloadImages(mgr *manager.Manager, node *kubekeyapi.HostCfg, conn ssh.Connection) error {
imagesList := []Image{
{Prefix: GetImagePrefix(mgr.Cluster.Registry.PrivateRegistry, kubekeyapi.DefaultKubeImageRepo), Repo: "etcd", Tag: kubekeyapi.DefaultEtcdVersion, Group: "etcd", Enable: true},
{Prefix: GetImagePrefix(mgr.Cluster.Registry.PrivateRegistry, kubekeyapi.DefaultKubeImageRepo), Repo: "pause", Tag: "3.1", Group: "k8s", Enable: true},
{Prefix: GetImagePrefix(mgr.Cluster.Registry.PrivateRegistry, kubekeyapi.DefaultKubeImageRepo), Repo: "kube-apiserver", Tag: mgr.Cluster.Kubernetes.Version, Group: "master", Enable: true},
{Prefix: GetImagePrefix(mgr.Cluster.Registry.PrivateRegistry, kubekeyapi.DefaultKubeImageRepo), Repo: "kube-controller-manager", Tag: mgr.Cluster.Kubernetes.Version, Group: "master", Enable: true},
{Prefix: GetImagePrefix(mgr.Cluster.Registry.PrivateRegistry, kubekeyapi.DefaultKubeImageRepo), Repo: "kube-scheduler", Tag: mgr.Cluster.Kubernetes.Version, Group: "master", Enable: true},
{Prefix: GetImagePrefix(mgr.Cluster.Registry.PrivateRegistry, kubekeyapi.DefaultKubeImageRepo), Repo: "kube-proxy", Tag: mgr.Cluster.Kubernetes.Version, Group: "k8s", Enable: true},
{Prefix: GetImagePrefix(mgr.Cluster.Registry.PrivateRegistry, "coredns"), Repo: "coredns", Tag: "1.6.0", Group: "k8s", Enable: true},
{Prefix: GetImagePrefix(mgr.Cluster.Registry.PrivateRegistry, "calico"), Repo: "kube-controllers", Tag: kubekeyapi.DefaultCalicoVersion, Group: "k8s", Enable: strings.EqualFold(mgr.Cluster.Network.Plugin, "calico")},
{Prefix: GetImagePrefix(mgr.Cluster.Registry.PrivateRegistry, "calico"), Repo: "cni", Tag: kubekeyapi.DefaultCalicoVersion, Group: "k8s", Enable: strings.EqualFold(mgr.Cluster.Network.Plugin, "calico")},
{Prefix: GetImagePrefix(mgr.Cluster.Registry.PrivateRegistry, "calico"), Repo: "node", Tag: kubekeyapi.DefaultCalicoVersion, Group: "k8s", Enable: strings.EqualFold(mgr.Cluster.Network.Plugin, "calico")},
{Prefix: GetImagePrefix(mgr.Cluster.Registry.PrivateRegistry, "calico"), Repo: "pod2daemon-flexvol", Tag: kubekeyapi.DefaultCalicoVersion, Group: "k8s", Enable: strings.EqualFold(mgr.Cluster.Network.Plugin, "calico")},
{Prefix: GetImagePrefix(mgr.Cluster.Registry.PrivateRegistry, "kubesphere"), Repo: "flannel", Tag: kubekeyapi.DefaultFlannelVersion, Group: "k8s", Enable: strings.EqualFold(mgr.Cluster.Network.Plugin, "flannel")},
imagesList := []*Image{
GetImage(mgr, "etcd"),
GetImage(mgr, "pause"),
GetImage(mgr, "kube-apiserver"),
GetImage(mgr, "kube-controller-manager"),
GetImage(mgr, "kube-scheduler"),
GetImage(mgr, "kube-proxy"),
GetImage(mgr, "coredns"),
GetImage(mgr, "k8s-dns-node-cache"),
GetImage(mgr, "calico-kube-controllers"),
GetImage(mgr, "calico-cni"),
GetImage(mgr, "calico-node"),
GetImage(mgr, "calico-flexvol"),
GetImage(mgr, "flannel"),
}
for _, image := range imagesList {
if node.IsMaster && image.Group == "master" && image.Enable {
fmt.Printf("[%s] Downloading image: %s\n", node.Name, image.NewImage())
_, err := mgr.Runner.RunCmd(fmt.Sprintf("sudo -E docker pull %s", image.NewImage()))
fmt.Printf("[%s] Downloading image: %s\n", node.Name, image.ImageName())
_, err := mgr.Runner.RunCmd(fmt.Sprintf("sudo -E docker pull %s", image.ImageName()))
if err != nil {
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("failed to download image: %s", image.NewImage()))
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("failed to download image: %s", image.ImageName()))
}
}
if (node.IsMaster || node.IsWorker) && image.Group == "k8s" && image.Enable {
fmt.Printf("[%s] Downloading image: %s\n", node.Name, image.NewImage())
_, err := mgr.Runner.RunCmd(fmt.Sprintf("sudo -E docker pull %s", image.NewImage()))
fmt.Printf("[%s] Downloading image: %s\n", node.Name, image.ImageName())
_, err := mgr.Runner.RunCmd(fmt.Sprintf("sudo -E docker pull %s", image.ImageName()))
if err != nil {
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("failed to download image: %s", image.NewImage()))
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("failed to download image: %s", image.ImageName()))
}
}
if node.IsEtcd && image.Group == "etcd" && image.Enable {
fmt.Printf("[%s] Downloading image: %s\n", node.Name, image.NewImage())
_, err := mgr.Runner.RunCmd(fmt.Sprintf("sudo -E docker pull %s", image.NewImage()))
fmt.Printf("[%s] Downloading image: %s\n", node.Name, image.ImageName())
_, err := mgr.Runner.RunCmd(fmt.Sprintf("sudo -E docker pull %s", image.ImageName()))
if err != nil {
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("failed to download image: %s", image.NewImage()))
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("failed to download image: %s", image.ImageName()))
}
}
}

View File

@ -9,7 +9,13 @@ import (
var (
KubeSphereTempl = template.Must(template.New("KubeSphere").Parse(
dedent.Dedent(`---
dedent.Dedent(`
---
apiVersion: v1
kind: Namespace
metadata:
name: kubesphere-system
---
apiVersion: v1
data:
ks-config.yaml: |
@ -225,7 +231,7 @@ spec:
serviceAccountName: ks-installer
containers:
- name: installer
image: kubespheredev/ks-installer:helm3-dev
image: kubespheredev/ks-installer:dev-helm3
imagePullPolicy: IfNotPresent
`)))
)

View File

@ -0,0 +1,47 @@
package dns
import (
"github.com/kubesphere/kubekey/pkg/util"
"github.com/kubesphere/kubekey/pkg/util/manager"
"github.com/lithammer/dedent"
"text/template"
)
var (
CorednsServiceTempl = template.Must(template.New("CorednsService").Parse(
dedent.Dedent(`---
apiVersion: v1
kind: Service
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "coredns"
addonmanager.kubernetes.io/mode: Reconcile
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
spec:
selector:
k8s-app: kube-dns
clusterIP: {{ .ClusterIP }}
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
`)))
)
func GenerateCorednsService(mgr *manager.Manager) (string, error) {
return util.Render(CorednsServiceTempl, util.Data{
"ClusterIP": mgr.Cluster.ClusterIP(),
})
}

View File

@ -1,54 +1,12 @@
package coredns
package dns
import (
"encoding/base64"
"fmt"
"github.com/kubesphere/kubekey/pkg/util"
"github.com/kubesphere/kubekey/pkg/util/manager"
"github.com/lithammer/dedent"
"github.com/pkg/errors"
"text/template"
)
var (
CorednsServiceTempl = template.Must(template.New("CorednsService").Parse(
dedent.Dedent(`---
apiVersion: v1
kind: Service
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "coredns"
addonmanager.kubernetes.io/mode: Reconcile
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
spec:
selector:
k8s-app: kube-dns
clusterIP: {{ .ClusterIP }}
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
`)))
)
func GenerateCorednsService(mgr *manager.Manager) (string, error) {
return util.Render(CorednsServiceTempl, util.Data{
"ClusterIP": mgr.Cluster.ClusterIP(),
})
}
func OverrideCorednsService(mgr *manager.Manager) error {
corednsSvc, err := GenerateCorednsService(mgr)
if err != nil {
@ -70,3 +28,20 @@ func OverrideCorednsService(mgr *manager.Manager) error {
}
return nil
}
func DeployNodelocaldns(mgr *manager.Manager) error {
nodelocaldns, err := GenerateNodelocaldnsService(mgr)
if err != nil {
return err
}
nodelocaldnsBase64 := base64.StdEncoding.EncodeToString([]byte(nodelocaldns))
_, err1 := mgr.Runner.RunCmd(fmt.Sprintf("sudo -E /bin/sh -c \"echo %s | base64 -d > /etc/kubernetes/nodelocaldns.yaml\"", nodelocaldnsBase64))
if err1 != nil {
return errors.Wrap(errors.WithStack(err1), "failed to generate nodelocaldns manifests")
}
_, err2 := mgr.Runner.RunCmd("/usr/local/bin/kubectl apply -f /etc/kubernetes/nodelocaldns.yaml")
if err2 != nil {
return errors.Wrap(errors.WithStack(err2), "failed to create nodelocaldns")
}
return nil
}

View File

@ -0,0 +1,185 @@
package dns
import (
"github.com/kubesphere/kubekey/pkg/images"
"github.com/kubesphere/kubekey/pkg/util"
"github.com/kubesphere/kubekey/pkg/util/manager"
"github.com/lithammer/dedent"
"text/template"
)
var (
nodelocaldnsServiceTempl = template.Must(template.New("Nodelocaldns").Parse(
dedent.Dedent(`---
apiVersion: v1
kind: ConfigMap
metadata:
name: nodelocaldns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
{{ .DndDomain }}:53 {
errors
cache {
success 9984 30
denial 9984 5
}
reload
loop
bind 169.254.25.10
forward . {{ .ForwardTarget }} {
force_tcp
}
prometheus :9253
health 169.254.25.10:9254
}
in-addr.arpa:53 {
errors
cache 30
reload
loop
bind 169.254.25.10
forward . {{ .ForwardTarget }} {
force_tcp
}
prometheus :9253
}
ip6.arpa:53 {
errors
cache 30
reload
loop
bind 169.254.25.10
forward . {{ .ForwardTarget }} {
force_tcp
}
prometheus :9253
}
.:53 {
errors
cache 30
reload
loop
bind 169.254.25.10
forward . /etc/resolv.conf
prometheus :9253
}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nodelocaldns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: nodelocaldns
namespace: kube-system
labels:
k8s-app: kube-dns
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: nodelocaldns
template:
metadata:
labels:
k8s-app: nodelocaldns
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: '9253'
spec:
priorityClassName: system-cluster-critical
serviceAccountName: nodelocaldns
hostNetwork: true
dnsPolicy: Default # Don't use cluster DNS.
tolerations:
- effect: NoSchedule
operator: "Exists"
- effect: NoExecute
operator: "Exists"
- key: "CriticalAddonsOnly"
operator: "Exists"
containers:
- name: node-cache
image: {{ .NodelocaldnsImage }}
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-localip", "169.254.25.10", "-conf", "/etc/coredns/Corefile", "-upstreamsvc", "coredns" ]
securityContext:
privileged: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9253
name: metrics
protocol: TCP
livenessProbe:
httpGet:
host: 169.254.25.10
path: /health
port: 9254
scheme: HTTP
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 10
readinessProbe:
httpGet:
host: 169.254.25.10
path: /health
port: 9254
scheme: HTTP
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 10
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
- name: xtables-lock
mountPath: /run/xtables.lock
volumes:
- name: config-volume
configMap:
name: nodelocaldns
items:
- key: Corefile
path: Corefile
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
updateStrategy:
rollingUpdate:
maxUnavailable: 20%
type: RollingUpdate
`)))
)
func GenerateNodelocaldnsService(mgr *manager.Manager) (string, error) {
return util.Render(nodelocaldnsServiceTempl, util.Data{
"ForwardTarget": mgr.Cluster.ClusterIP(),
"DndDomain": mgr.Cluster.Kubernetes.ClusterName,
"NodelocaldnsImage": images.GetImage(mgr, "k8s-dns-node-cache").ImageName(),
})
}

View File

@ -12,7 +12,7 @@ import (
)
func DeployNetworkPlugin(mgr *manager.Manager) error {
mgr.Logger.Infoln("Generate etcd certs")
mgr.Logger.Infoln("Deploy network plugin")
return mgr.RunTaskOnMasterNodes(deployNetworkPlugin, true)
}