mirror of
https://github.com/kubesphere/kubekey.git
synced 2025-12-25 17:12:50 +00:00
add local volume
This commit is contained in:
parent
e9eb8f085c
commit
f67956d147
|
|
@ -1,7 +1,7 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/pixiake/kubekey/pkg/config"
|
||||
"github.com/pixiake/kubekey/pkg/reset"
|
||||
"github.com/pixiake/kubekey/pkg/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
|
|
@ -25,5 +25,5 @@ func NewCmdResetCluster() *cobra.Command {
|
|||
}
|
||||
|
||||
func resetCluster(clusterCfgFile string, logger *log.Logger) {
|
||||
config.ParseClusterCfg(clusterCfgFile, logger)
|
||||
reset.ResetCluster(clusterCfgFile, logger)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -95,6 +95,7 @@ type LBKubeApiserverCfg struct {
|
|||
type RegistryConfig struct {
|
||||
RegistryMirrors []string `yaml:"registryMirrors" json:"registryMirrors,omitempty"`
|
||||
InsecureRegistries []string `yaml:"insecureRegistries" json:"insecureRegistries,omitempty"`
|
||||
PrivateRegistry string `yaml:"privateRegistry" json:"privateRegistry,omitempty"`
|
||||
}
|
||||
|
||||
type ExternalEtcd struct {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,39 @@
|
|||
package v1alpha1
|
||||
|
||||
type LocalVolume struct {
|
||||
IsDefaultClass bool
|
||||
}
|
||||
|
||||
type NfsClient struct {
|
||||
IsDefaultClass bool
|
||||
NfsServer string
|
||||
NfsPath string
|
||||
NfsVrs3Enabled bool
|
||||
NfsArchiveOnDelete bool
|
||||
}
|
||||
|
||||
type GlusterFS struct {
|
||||
IsDefaultClass bool
|
||||
RestAuthEnabled bool
|
||||
RestUrl string
|
||||
ClusterID string
|
||||
RestUser string
|
||||
SecretName string
|
||||
GidMin int
|
||||
GidMax int
|
||||
VolumeType string
|
||||
JwtAdminKey string
|
||||
}
|
||||
|
||||
type CephRBD struct {
|
||||
IsDefaultClass bool
|
||||
Monitors []string
|
||||
AdminID string
|
||||
AdminSecret string
|
||||
UserID string
|
||||
UserSecret string
|
||||
Pool string
|
||||
FsType string
|
||||
ImageFormat string
|
||||
ImageFeatures string
|
||||
}
|
||||
|
|
@ -128,6 +128,7 @@ func GenerateEtcdService(mgr *manager.Manager) error {
|
|||
}
|
||||
|
||||
func generateEtcdService(mgr *manager.Manager, node *kubekeyapi.HostCfg, conn ssh.Connection) error {
|
||||
PreDownloadEtcdImages(mgr, node)
|
||||
etcdService, err := tmpl.GenerateEtcdService(mgr, mgr.Runner.Index)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -187,17 +188,18 @@ func generateEtcdService(mgr *manager.Manager, node *kubekeyapi.HostCfg, conn ss
|
|||
}
|
||||
time.Sleep(time.Second * 5)
|
||||
}
|
||||
} else {
|
||||
checkMemberCmd := fmt.Sprintf("export ETCDCTL_API=2;export ETCDCTL_CERT_FILE='/etc/ssl/etcd/ssl/admin-%s.pem';export ETCDCTL_KEY_FILE='/etc/ssl/etcd/ssl/admin-%s-key.pem';export ETCDCTL_CA_FILE='/etc/ssl/etcd/ssl/ca.pem';%s/etcdctl --no-sync --endpoints=%s member list | grep -q %s", node.HostName, node.HostName, etcdBinDir, strings.Join(addrList, ","), fmt.Sprintf("https://%s:2379", node.InternalAddress))
|
||||
_, err := mgr.Runner.RunCmd(checkMemberCmd)
|
||||
if err != nil {
|
||||
joinMemberCmd := fmt.Sprintf("export ETCDCTL_API=2;export ETCDCTL_CERT_FILE='/etc/ssl/etcd/ssl/admin-%s.pem';export ETCDCTL_KEY_FILE='/etc/ssl/etcd/ssl/admin-%s-key.pem';export ETCDCTL_CA_FILE='/etc/ssl/etcd/ssl/ca.pem';%s/etcdctl --endpoints=%s member add %s %s", node.HostName, node.HostName, etcdBinDir, strings.Join(addrList, ","), fmt.Sprintf("etcd%d", mgr.Runner.Index+1), fmt.Sprintf("https://%s:2380", node.InternalAddress))
|
||||
_, err := mgr.Runner.RunCmd(joinMemberCmd)
|
||||
if err != nil {
|
||||
fmt.Println("failed to add etcd member")
|
||||
}
|
||||
}
|
||||
}
|
||||
//else {
|
||||
// checkMemberCmd := fmt.Sprintf("export ETCDCTL_API=2;export ETCDCTL_CERT_FILE='/etc/ssl/etcd/ssl/admin-%s.pem';export ETCDCTL_KEY_FILE='/etc/ssl/etcd/ssl/admin-%s-key.pem';export ETCDCTL_CA_FILE='/etc/ssl/etcd/ssl/ca.pem';%s/etcdctl --no-sync --endpoints=%s member list | grep -q %s", node.HostName, node.HostName, etcdBinDir, strings.Join(addrList, ","), fmt.Sprintf("https://%s:2379", node.InternalAddress))
|
||||
// _, err := mgr.Runner.RunCmd(checkMemberCmd)
|
||||
// if err != nil {
|
||||
// joinMemberCmd := fmt.Sprintf("export ETCDCTL_API=2;export ETCDCTL_CERT_FILE='/etc/ssl/etcd/ssl/admin-%s.pem';export ETCDCTL_KEY_FILE='/etc/ssl/etcd/ssl/admin-%s-key.pem';export ETCDCTL_CA_FILE='/etc/ssl/etcd/ssl/ca.pem';%s/etcdctl --endpoints=%s member add %s %s", node.HostName, node.HostName, etcdBinDir, strings.Join(addrList, ","), fmt.Sprintf("etcd%d", mgr.Runner.Index+1), fmt.Sprintf("https://%s:2380", node.InternalAddress))
|
||||
// _, err := mgr.Runner.RunCmd(joinMemberCmd)
|
||||
// if err != nil {
|
||||
// fmt.Println("failed to add etcd member")
|
||||
// }
|
||||
// }
|
||||
//}
|
||||
|
||||
for i := 20; i > 0; i-- {
|
||||
_, err := mgr.Runner.RunCmd(checkHealthCmd)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,19 @@
|
|||
package etcd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
kubekeyapi "github.com/pixiake/kubekey/pkg/apis/kubekey/v1alpha1"
|
||||
"github.com/pixiake/kubekey/pkg/images"
|
||||
"github.com/pixiake/kubekey/pkg/util/manager"
|
||||
)
|
||||
|
||||
func PreDownloadEtcdImages(mgr *manager.Manager, node *kubekeyapi.HostCfg) {
|
||||
imagesList := []images.Image{
|
||||
{Prefix: images.GetImagePrefix(mgr.Cluster.Registry.PrivateRegistry, kubekeyapi.DefaultKubeImageRepo), Repo: "etcd", Tag: kubekeyapi.DefaultEtcdVersion},
|
||||
}
|
||||
|
||||
for _, image := range imagesList {
|
||||
fmt.Printf("[%s] Download image: %s\n", node.HostName, image.NewImage())
|
||||
mgr.Runner.RunCmd(fmt.Sprintf("sudo -E /bin/sh \"docker pull %s\"", image.NewImage()))
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
kubekeyapi "github.com/pixiake/kubekey/pkg/apis/kubekey/v1alpha1"
|
||||
"github.com/pixiake/kubekey/pkg/images"
|
||||
"github.com/pixiake/kubekey/pkg/util/manager"
|
||||
)
|
||||
|
||||
func PreDownloadNodeImages(mgr *manager.Manager, node *kubekeyapi.HostCfg) {
|
||||
imagesList := []images.Image{
|
||||
{Prefix: images.GetImagePrefix(mgr.Cluster.Registry.PrivateRegistry, kubekeyapi.DefaultKubeImageRepo), Repo: "pause", Tag: "3.1"},
|
||||
{Prefix: images.GetImagePrefix(mgr.Cluster.Registry.PrivateRegistry, "coredns"), Repo: "coredns", Tag: "1.6.0"},
|
||||
{Prefix: images.GetImagePrefix(mgr.Cluster.Registry.PrivateRegistry, "node"), Repo: "coredns", Tag: "1.6.0"},
|
||||
}
|
||||
|
||||
for _, image := range imagesList {
|
||||
fmt.Printf("[%s] Download image: %s\n", node.HostName, image.NewImage())
|
||||
mgr.Runner.RunCmd(fmt.Sprintf("sudo -E /bin/sh \"docker pull %s\"", image.NewImage()))
|
||||
}
|
||||
}
|
||||
|
|
@ -1,11 +1,15 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
kubekeyapi "github.com/pixiake/kubekey/pkg/apis/kubekey/v1alpha1"
|
||||
"github.com/pixiake/kubekey/pkg/util"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gopkg.in/yaml.v2"
|
||||
"io/ioutil"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
)
|
||||
|
|
@ -14,20 +18,24 @@ func ParseClusterCfg(clusterCfgPath string, logger *log.Logger) (*kubekeyapi.K2C
|
|||
clusterCfg := kubekeyapi.K2Cluster{}
|
||||
|
||||
if len(clusterCfgPath) == 0 {
|
||||
return nil, errors.New("cluster configuration path not provided")
|
||||
}
|
||||
user, _ := user.Current()
|
||||
if user.Name != "root" {
|
||||
return nil, errors.New(fmt.Sprintf("Current user is %s, Please use root !", user.Name))
|
||||
}
|
||||
clusterCfg = AllinoneHost(user)
|
||||
} else {
|
||||
fp, err := filepath.Abs(clusterCfgPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to lookup current directory")
|
||||
}
|
||||
content, err := ioutil.ReadFile(fp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to read the given cluster configuration file")
|
||||
}
|
||||
|
||||
fp, err := filepath.Abs(clusterCfgPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to lookup current directory")
|
||||
}
|
||||
content, err := ioutil.ReadFile(fp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to read the given cluster configuration file")
|
||||
}
|
||||
|
||||
if err := yaml.Unmarshal(content, &clusterCfg); err != nil {
|
||||
return nil, errors.Wrap(err, "unable to convert credentials file to yaml")
|
||||
if err := yaml.Unmarshal(content, &clusterCfg); err != nil {
|
||||
return nil, errors.Wrap(err, "unable to convert file to yaml")
|
||||
}
|
||||
}
|
||||
|
||||
defaultK2Cluster := SetDefaultK2Cluster(&clusterCfg)
|
||||
|
|
@ -178,3 +186,25 @@ func SetDefaultClusterCfg(cfg *kubekeyapi.K2ClusterSpec) kubekeyapi.KubeCluster
|
|||
|
||||
return defaultClusterCfg
|
||||
}
|
||||
|
||||
func AllinoneHost(user *user.User) kubekeyapi.K2Cluster {
|
||||
allinoneCfg := kubekeyapi.K2Cluster{}
|
||||
if err := exec.Command("/bin/sh", "-c", "if [ ! -f \"$HOME/.ssh/id_rsa\" ]; then ssh-keygen -t rsa -P \"\" -f $HOME/.ssh/id_rsa && ls $HOME/.ssh;fi;").Run(); err != nil {
|
||||
log.Fatalf("Failed to generate public key: %v", err)
|
||||
}
|
||||
if out, err := exec.Command("/bin/sh", "-c", "echo \"$(cat $HOME/.ssh/id_rsa.pub)\" >> $HOME/.ssh/authorized_keys").CombinedOutput(); err != nil {
|
||||
log.Fatalf("Failed to copy public key to authorized_keys: %v\n%s", err, string(out))
|
||||
}
|
||||
|
||||
allinoneCfg.Spec.Hosts = append(allinoneCfg.Spec.Hosts, kubekeyapi.HostCfg{
|
||||
HostName: "ks-allinone",
|
||||
SSHAddress: "",
|
||||
InternalAddress: util.LocalIP(),
|
||||
Port: "",
|
||||
User: user.Name,
|
||||
Password: "",
|
||||
SSHKeyPath: fmt.Sprintf("%s/.ssh/id_rsa", user.HomeDir),
|
||||
Role: []string{"master", "worker", "etcd"},
|
||||
})
|
||||
return allinoneCfg
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,33 @@
|
|||
package images
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type Image struct {
|
||||
Prefix string
|
||||
Repo string
|
||||
Tag string
|
||||
}
|
||||
|
||||
func (image *Image) NewImage() string {
|
||||
return fmt.Sprintf("%s%s/%s", image.Prefix, image.Repo, image.Tag)
|
||||
}
|
||||
|
||||
func GetImagePrefix(privateRegistry, ns string) string {
|
||||
var prefix string
|
||||
if privateRegistry == "" {
|
||||
if ns == "" {
|
||||
prefix = ""
|
||||
} else {
|
||||
prefix = fmt.Sprintf("%s/", ns)
|
||||
}
|
||||
} else {
|
||||
if ns == "" {
|
||||
prefix = fmt.Sprintf("%s/library/", privateRegistry)
|
||||
} else {
|
||||
prefix = fmt.Sprintf("%s/%s/", privateRegistry, ns)
|
||||
}
|
||||
}
|
||||
return prefix
|
||||
}
|
||||
|
|
@ -0,0 +1,344 @@
|
|||
package Local
|
||||
|
||||
import (
|
||||
"github.com/lithammer/dedent"
|
||||
"github.com/pixiake/kubekey/pkg/util"
|
||||
"github.com/pixiake/kubekey/pkg/util/manager"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
var OpenebsTempl = template.Must(template.New("openebs").Parse(
|
||||
dedent.Dedent(`---
|
||||
#Sample storage classes for OpenEBS Local PV
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: local
|
||||
annotations:
|
||||
storageclass.kubesphere.io/supported_access_modes: '["ReadWriteOnce"]'
|
||||
storageclass.beta.kubernetes.io/is-default-class: "true"
|
||||
openebs.io/cas-type: local
|
||||
cas.openebs.io/config: |
|
||||
- name: StorageType
|
||||
value: "hostpath"
|
||||
- name: BasePath
|
||||
value: "/var/openebs/local/"
|
||||
provisioner: openebs.io/local
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
reclaimPolicy: Delete
|
||||
---
|
||||
# Create Maya Service Account
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: openebs-maya-operator
|
||||
namespace: kube-system
|
||||
---
|
||||
# Define Role that allows operations on K8s pods/deployments
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: openebs-maya-operator
|
||||
rules:
|
||||
- apiGroups: ["*"]
|
||||
resources: ["nodes", "nodes/proxy"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["*"]
|
||||
resources: ["namespaces", "services", "pods", "deployments", "events", "endpoints", "configmaps", "jobs"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["*"]
|
||||
resources: ["storageclasses", "persistentvolumeclaims", "persistentvolumes"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["volumesnapshot.external-storage.k8s.io"]
|
||||
resources: ["volumesnapshots", "volumesnapshotdatas"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||
- apiGroups: ["apiextensions.k8s.io"]
|
||||
resources: ["customresourcedefinitions"]
|
||||
verbs: [ "get", "list", "create", "update", "delete", "patch"]
|
||||
- apiGroups: ["*"]
|
||||
resources: [ "disks", "blockdevices", "blockdeviceclaims"]
|
||||
verbs: ["*" ]
|
||||
- apiGroups: ["*"]
|
||||
resources: [ "cstorpoolclusters", "storagepoolclaims", "storagepoolclaims/finalizers", "storagepools"]
|
||||
verbs: ["*" ]
|
||||
- apiGroups: ["*"]
|
||||
resources: [ "castemplates", "runtasks"]
|
||||
verbs: ["*" ]
|
||||
- apiGroups: ["*"]
|
||||
resources: [ "cstorpools", "cstorpools/finalizers", "cstorvolumereplicas", "cstorvolumes", "cstorvolumeclaims"]
|
||||
verbs: ["*" ]
|
||||
- apiGroups: ["*"]
|
||||
resources: [ "cstorbackups", "cstorrestores", "cstorcompletedbackups"]
|
||||
verbs: ["*" ]
|
||||
- nonResourceURLs: ["/metrics"]
|
||||
verbs: ["get"]
|
||||
- apiGroups: ["*"]
|
||||
resources: [ "upgradetasks"]
|
||||
verbs: ["*" ]
|
||||
---
|
||||
# Bind the Service Account with the Role Privileges.
|
||||
# TODO: Check if default account also needs to be there
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: openebs-maya-operator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: openebs-maya-operator
|
||||
namespace: kube-system
|
||||
- kind: User
|
||||
name: system:serviceaccount:default:default
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: openebs-maya-operator
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
# This is the node-disk-manager related config.
|
||||
# It can be used to customize the disks probes and filters
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: openebs-ndm-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
openebs.io/component-name: ndm-config
|
||||
data:
|
||||
# udev-probe is default or primary probe which should be enabled to run ndm
|
||||
# filterconfigs contails configs of filters - in their form fo include
|
||||
# and exclude comma separated strings
|
||||
node-disk-manager.config: |
|
||||
probeconfigs:
|
||||
- key: udev-probe
|
||||
name: udev probe
|
||||
state: true
|
||||
- key: seachest-probe
|
||||
name: seachest probe
|
||||
state: false
|
||||
- key: smart-probe
|
||||
name: smart probe
|
||||
state: true
|
||||
filterconfigs:
|
||||
- key: os-disk-exclude-filter
|
||||
name: os disk exclude filter
|
||||
state: true
|
||||
exclude: "/,/etc/hosts,/boot"
|
||||
- key: vendor-filter
|
||||
name: vendor filter
|
||||
state: true
|
||||
include: ""
|
||||
exclude: "CLOUDBYT,OpenEBS"
|
||||
- key: path-filter
|
||||
name: path filter
|
||||
state: true
|
||||
include: ""
|
||||
exclude: "loop,/dev/fd0,/dev/sr0,/dev/ram,/dev/dm-,/dev/md"
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: openebs-ndm
|
||||
namespace: kube-system
|
||||
labels:
|
||||
name: openebs-ndm
|
||||
openebs.io/component-name: ndm
|
||||
openebs.io/version: 1.1.0
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: openebs-ndm
|
||||
openebs.io/component-name: ndm
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: openebs-ndm
|
||||
openebs.io/component-name: ndm
|
||||
openebs.io/version: 1.1.0
|
||||
spec:
|
||||
# By default the node-disk-manager will be run on all kubernetes nodes
|
||||
# If you would like to limit this to only some nodes, say the nodes
|
||||
# that have storage attached, you could label those node and use
|
||||
# nodeSelector.
|
||||
#
|
||||
# e.g. label the storage nodes with - "openebs.io/nodegroup"="storage-node"
|
||||
# kubectl label node <node-name> "openebs.io/nodegroup"="storage-node"
|
||||
#nodeSelector:
|
||||
# "openebs.io/nodegroup": "storage-node"
|
||||
serviceAccountName: openebs-maya-operator
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: node-disk-manager
|
||||
image: kubesphere/node-disk-manager-amd64:v0.4.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /host/node-disk-manager.config
|
||||
subPath: node-disk-manager.config
|
||||
readOnly: true
|
||||
- name: udev
|
||||
mountPath: /run/udev
|
||||
- name: procmount
|
||||
mountPath: /host/proc
|
||||
readOnly: true
|
||||
- name: sparsepath
|
||||
mountPath: /var/openebs/sparse
|
||||
env:
|
||||
# namespace in which NDM is installed will be passed to NDM Daemonset
|
||||
# as environment variable
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
# pass hostname as env variable using downward API to the NDM container
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
# specify the directory where the sparse files need to be created.
|
||||
# if not specified, then sparse files will not be created.
|
||||
- name: SPARSE_FILE_DIR
|
||||
value: "/var/openebs/sparse"
|
||||
# Size(bytes) of the sparse file to be created.
|
||||
- name: SPARSE_FILE_SIZE
|
||||
value: "10737418240"
|
||||
# Specify the number of sparse files to be created
|
||||
- name: SPARSE_FILE_COUNT
|
||||
value: "1"
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- pgrep
|
||||
- ".*ndm"
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 60
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: openebs-ndm-config
|
||||
- name: udev
|
||||
hostPath:
|
||||
path: /run/udev
|
||||
type: Directory
|
||||
# mount /proc (to access mount file of process 1 of host) inside container
|
||||
# to read mount-point of disks and partitions
|
||||
- name: procmount
|
||||
hostPath:
|
||||
path: /proc
|
||||
type: Directory
|
||||
- name: sparsepath
|
||||
hostPath:
|
||||
path: /var/openebs/sparse
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: openebs-ndm-operator
|
||||
namespace: kube-system
|
||||
labels:
|
||||
name: openebs-ndm-operator
|
||||
openebs.io/component-name: ndm-operator
|
||||
openebs.io/version: 1.1.0
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: openebs-ndm-operator
|
||||
openebs.io/component-name: ndm-operator
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: openebs-ndm-operator
|
||||
openebs.io/component-name: ndm-operator
|
||||
openebs.io/version: 1.1.0
|
||||
spec:
|
||||
serviceAccountName: openebs-maya-operator
|
||||
containers:
|
||||
- name: node-disk-operator
|
||||
image: kubesphere/node-disk-operator-amd64:v0.4.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- stat
|
||||
- /tmp/operator-sdk-ready
|
||||
initialDelaySeconds: 4
|
||||
periodSeconds: 10
|
||||
failureThreshold: 1
|
||||
env:
|
||||
- name: WATCH_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: OPERATOR_NAME
|
||||
value: "node-disk-operator"
|
||||
- name: CLEANUP_JOB_IMAGE
|
||||
value: quay.azk8s.cn/openebs/linux-utils:3.9
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: openebs-localpv-provisioner
|
||||
namespace: kube-system
|
||||
labels:
|
||||
name: openebs-localpv-provisioner
|
||||
openebs.io/component-name: openebs-localpv-provisioner
|
||||
openebs.io/version: 1.1.0
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: openebs-localpv-provisioner
|
||||
openebs.io/component-name: openebs-localpv-provisioner
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: openebs-localpv-provisioner
|
||||
openebs.io/component-name: openebs-localpv-provisioner
|
||||
openebs.io/version: 1.1.0
|
||||
spec:
|
||||
serviceAccountName: openebs-maya-operator
|
||||
containers:
|
||||
- name: openebs-localpv-provisioner
|
||||
imagePullPolicy: IfNotPresent
|
||||
image: kubesphere/provisioner-localpv:1.1.0
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: OPENEBS_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: OPENEBS_IO_ENABLE_ANALYTICS
|
||||
value: "true"
|
||||
- name: OPENEBS_IO_HELPER_IMAGE
|
||||
value: quay.azk8s.cn/openebs/openebs-tools:3.8
|
||||
- name: OPENEBS_IO_INSTALLER_TYPE
|
||||
value: "openebs-operator-lite"
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- pgrep
|
||||
- ".*localpv"
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 60
|
||||
`)))
|
||||
|
||||
func GenerateOpenebsManifests(mgr *manager.Manager) (string, error) {
|
||||
return util.Render(OpenebsTempl, util.Data{
|
||||
"ClusterIP": mgr.Cluster.ClusterIP(),
|
||||
})
|
||||
}
|
||||
|
|
@ -0,0 +1,327 @@
|
|||
---
|
||||
#Sample storage classes for OpenEBS Local PV
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: local
|
||||
annotations:
|
||||
storageclass.kubesphere.io/supported_access_modes: '["ReadWriteOnce"]'
|
||||
storageclass.beta.kubernetes.io/is-default-class: "true"
|
||||
openebs.io/cas-type: local
|
||||
cas.openebs.io/config: |
|
||||
- name: StorageType
|
||||
value: "hostpath"
|
||||
- name: BasePath
|
||||
value: "/var/openebs/local/"
|
||||
provisioner: openebs.io/local
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
reclaimPolicy: Delete
|
||||
---
|
||||
# Create Maya Service Account
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: openebs-maya-operator
|
||||
namespace: kube-system
|
||||
---
|
||||
# Define Role that allows operations on K8s pods/deployments
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: openebs-maya-operator
|
||||
rules:
|
||||
- apiGroups: ["*"]
|
||||
resources: ["nodes", "nodes/proxy"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["*"]
|
||||
resources: ["namespaces", "services", "pods", "deployments", "events", "endpoints", "configmaps", "jobs"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["*"]
|
||||
resources: ["storageclasses", "persistentvolumeclaims", "persistentvolumes"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["volumesnapshot.external-storage.k8s.io"]
|
||||
resources: ["volumesnapshots", "volumesnapshotdatas"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||
- apiGroups: ["apiextensions.k8s.io"]
|
||||
resources: ["customresourcedefinitions"]
|
||||
verbs: [ "get", "list", "create", "update", "delete", "patch"]
|
||||
- apiGroups: ["*"]
|
||||
resources: [ "disks", "blockdevices", "blockdeviceclaims"]
|
||||
verbs: ["*" ]
|
||||
- apiGroups: ["*"]
|
||||
resources: [ "cstorpoolclusters", "storagepoolclaims", "storagepoolclaims/finalizers", "storagepools"]
|
||||
verbs: ["*" ]
|
||||
- apiGroups: ["*"]
|
||||
resources: [ "castemplates", "runtasks"]
|
||||
verbs: ["*" ]
|
||||
- apiGroups: ["*"]
|
||||
resources: [ "cstorpools", "cstorpools/finalizers", "cstorvolumereplicas", "cstorvolumes", "cstorvolumeclaims"]
|
||||
verbs: ["*" ]
|
||||
- apiGroups: ["*"]
|
||||
resources: [ "cstorbackups", "cstorrestores", "cstorcompletedbackups"]
|
||||
verbs: ["*" ]
|
||||
- nonResourceURLs: ["/metrics"]
|
||||
verbs: ["get"]
|
||||
- apiGroups: ["*"]
|
||||
resources: [ "upgradetasks"]
|
||||
verbs: ["*" ]
|
||||
---
|
||||
# Bind the Service Account with the Role Privileges.
|
||||
# TODO: Check if default account also needs to be there
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: openebs-maya-operator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: openebs-maya-operator
|
||||
namespace: kube-system
|
||||
- kind: User
|
||||
name: system:serviceaccount:default:default
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: openebs-maya-operator
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
# This is the node-disk-manager related config.
|
||||
# It can be used to customize the disks probes and filters
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: openebs-ndm-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
openebs.io/component-name: ndm-config
|
||||
data:
|
||||
# udev-probe is default or primary probe which should be enabled to run ndm
|
||||
# filterconfigs contails configs of filters - in their form fo include
|
||||
# and exclude comma separated strings
|
||||
node-disk-manager.config: |
|
||||
probeconfigs:
|
||||
- key: udev-probe
|
||||
name: udev probe
|
||||
state: true
|
||||
- key: seachest-probe
|
||||
name: seachest probe
|
||||
state: false
|
||||
- key: smart-probe
|
||||
name: smart probe
|
||||
state: true
|
||||
filterconfigs:
|
||||
- key: os-disk-exclude-filter
|
||||
name: os disk exclude filter
|
||||
state: true
|
||||
exclude: "/,/etc/hosts,/boot"
|
||||
- key: vendor-filter
|
||||
name: vendor filter
|
||||
state: true
|
||||
include: ""
|
||||
exclude: "CLOUDBYT,OpenEBS"
|
||||
- key: path-filter
|
||||
name: path filter
|
||||
state: true
|
||||
include: ""
|
||||
exclude: "loop,/dev/fd0,/dev/sr0,/dev/ram,/dev/dm-,/dev/md"
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: openebs-ndm
|
||||
namespace: kube-system
|
||||
labels:
|
||||
name: openebs-ndm
|
||||
openebs.io/component-name: ndm
|
||||
openebs.io/version: 1.1.0
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: openebs-ndm
|
||||
openebs.io/component-name: ndm
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: openebs-ndm
|
||||
openebs.io/component-name: ndm
|
||||
openebs.io/version: 1.1.0
|
||||
spec:
|
||||
# By default the node-disk-manager will be run on all kubernetes nodes
|
||||
# If you would like to limit this to only some nodes, say the nodes
|
||||
# that have storage attached, you could label those node and use
|
||||
# nodeSelector.
|
||||
#
|
||||
# e.g. label the storage nodes with - "openebs.io/nodegroup"="storage-node"
|
||||
# kubectl label node <node-name> "openebs.io/nodegroup"="storage-node"
|
||||
#nodeSelector:
|
||||
# "openebs.io/nodegroup": "storage-node"
|
||||
serviceAccountName: openebs-maya-operator
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: node-disk-manager
|
||||
image: kubesphere/node-disk-manager-amd64:v0.4.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /host/node-disk-manager.config
|
||||
subPath: node-disk-manager.config
|
||||
readOnly: true
|
||||
- name: udev
|
||||
mountPath: /run/udev
|
||||
- name: procmount
|
||||
mountPath: /host/proc
|
||||
readOnly: true
|
||||
- name: sparsepath
|
||||
mountPath: /var/openebs/sparse
|
||||
env:
|
||||
# namespace in which NDM is installed will be passed to NDM Daemonset
|
||||
# as environment variable
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
# pass hostname as env variable using downward API to the NDM container
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
# specify the directory where the sparse files need to be created.
|
||||
# if not specified, then sparse files will not be created.
|
||||
- name: SPARSE_FILE_DIR
|
||||
value: "/var/openebs/sparse"
|
||||
# Size(bytes) of the sparse file to be created.
|
||||
- name: SPARSE_FILE_SIZE
|
||||
value: "10737418240"
|
||||
# Specify the number of sparse files to be created
|
||||
- name: SPARSE_FILE_COUNT
|
||||
value: "1"
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- pgrep
|
||||
- ".*ndm"
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 60
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: openebs-ndm-config
|
||||
- name: udev
|
||||
hostPath:
|
||||
path: /run/udev
|
||||
type: Directory
|
||||
# mount /proc (to access mount file of process 1 of host) inside container
|
||||
# to read mount-point of disks and partitions
|
||||
- name: procmount
|
||||
hostPath:
|
||||
path: /proc
|
||||
type: Directory
|
||||
- name: sparsepath
|
||||
hostPath:
|
||||
path: /var/openebs/sparse
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: openebs-ndm-operator
|
||||
namespace: kube-system
|
||||
labels:
|
||||
name: openebs-ndm-operator
|
||||
openebs.io/component-name: ndm-operator
|
||||
openebs.io/version: 1.1.0
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: openebs-ndm-operator
|
||||
openebs.io/component-name: ndm-operator
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: openebs-ndm-operator
|
||||
openebs.io/component-name: ndm-operator
|
||||
openebs.io/version: 1.1.0
|
||||
spec:
|
||||
serviceAccountName: openebs-maya-operator
|
||||
containers:
|
||||
- name: node-disk-operator
|
||||
image: kubesphere/node-disk-operator-amd64:v0.4.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- stat
|
||||
- /tmp/operator-sdk-ready
|
||||
initialDelaySeconds: 4
|
||||
periodSeconds: 10
|
||||
failureThreshold: 1
|
||||
env:
|
||||
- name: WATCH_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: OPERATOR_NAME
|
||||
value: "node-disk-operator"
|
||||
- name: CLEANUP_JOB_IMAGE
|
||||
value: kubesphere/linux-utils:3.9
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: openebs-localpv-provisioner
|
||||
namespace: kube-system
|
||||
labels:
|
||||
name: openebs-localpv-provisioner
|
||||
openebs.io/component-name: openebs-localpv-provisioner
|
||||
openebs.io/version: 1.1.0
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: openebs-localpv-provisioner
|
||||
openebs.io/component-name: openebs-localpv-provisioner
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: openebs-localpv-provisioner
|
||||
openebs.io/component-name: openebs-localpv-provisioner
|
||||
openebs.io/version: 1.1.0
|
||||
spec:
|
||||
serviceAccountName: openebs-maya-operator
|
||||
containers:
|
||||
- name: openebs-localpv-provisioner
|
||||
imagePullPolicy: IfNotPresent
|
||||
image: kubesphere/provisioner-localpv:1.1.0
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: OPENEBS_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: OPENEBS_IO_ENABLE_ANALYTICS
|
||||
value: "true"
|
||||
- name: OPENEBS_IO_HELPER_IMAGE
|
||||
value: kubesphere/openebs-tools:3.8
|
||||
- name: OPENEBS_IO_INSTALLER_TYPE
|
||||
value: "openebs-operator-lite"
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- pgrep
|
||||
- ".*localpv"
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 60
|
||||
|
|
@ -12,7 +12,7 @@ import (
|
|||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func CreateCluster(clusterCfgFile string, logger *log.Logger, addons string, pkg string) error {
|
||||
func ResetCluster(clusterCfgFile string, logger *log.Logger) error {
|
||||
cfg, err := config.ParseClusterCfg(clusterCfgFile, logger)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to download cluster config")
|
||||
|
|
@ -41,13 +41,13 @@ func ExecTasks(mgr *manager.Manager) error {
|
|||
}
|
||||
|
||||
func ResetKubeCluster(mgr *manager.Manager) error {
|
||||
mgr.Logger.Infoln("Reset cluster")
|
||||
mgr.Logger.Infoln("Reset kube cluster")
|
||||
|
||||
return mgr.RunTaskOnK8sNodes(resetKubeCluster, true)
|
||||
}
|
||||
|
||||
func resetKubeCluster(mgr *manager.Manager, node *kubekeyapi.HostCfg, conn ssh.Connection) error {
|
||||
_, err := mgr.Runner.RunCmd("sudo -E /bin/sh -c \"/user/local/bin/kubeadm reset -f\"")
|
||||
_, err := mgr.Runner.RunCmd("sudo -E /bin/sh -c \"/usr/local/bin/kubeadm reset -f\"")
|
||||
if err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to reset kube cluster")
|
||||
}
|
||||
|
|
@ -57,9 +57,9 @@ func resetKubeCluster(mgr *manager.Manager, node *kubekeyapi.HostCfg, conn ssh.C
|
|||
var etcdFiles = []string{"/usr/local/bin/etcd", "/etc/ssl/etcd/ssl", "/var/lib/etcd", "/etc/etcd.env", "/etc/systemd/system/etcd.service"}
|
||||
|
||||
func ResetEtcdCluster(mgr *manager.Manager) error {
|
||||
mgr.Logger.Infoln("Reset cluster")
|
||||
mgr.Logger.Infoln("Clean etcd cluster")
|
||||
|
||||
return mgr.RunTaskOnEtcdNodes(resetKubeCluster, false)
|
||||
return mgr.RunTaskOnEtcdNodes(resetEtcdCluster, false)
|
||||
}
|
||||
|
||||
func resetEtcdCluster(mgr *manager.Manager, node *kubekeyapi.HostCfg, conn ssh.Connection) error {
|
||||
|
|
|
|||
|
|
@ -127,7 +127,6 @@ func NewConnection(cfg SSHCfg) (Connection, error) {
|
|||
if parseErr != nil {
|
||||
return nil, errors.Wrap(parseErr, "the given SSH key could not be parsed (note that password-protected keys are not supported)")
|
||||
}
|
||||
|
||||
authMethods = append(authMethods, ssh.PublicKeys(signer))
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -196,3 +196,26 @@ func intToIP(n int32) net.IP {
|
|||
binary.BigEndian.PutUint32(b, uint32(n))
|
||||
return net.IP(b)
|
||||
}
|
||||
|
||||
func GetLocalIP() (string, error) {
|
||||
addrs, err := net.InterfaceAddrs()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, addr := range addrs {
|
||||
if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
|
||||
if ipnet.IP.To4() != nil && ipnet.IP.IsGlobalUnicast() {
|
||||
return ipnet.IP.String(), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", errors.New("valid local IP not found !")
|
||||
}
|
||||
|
||||
func LocalIP() string {
|
||||
localIp, err := GetLocalIP()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get Local IP: %v", err)
|
||||
}
|
||||
return localIp
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue