upgrade kube-ovn to 1.10.5

This commit is contained in:
zhouqiu0103 2022-08-16 14:23:15 +08:00
parent 072fed09d6
commit 659b940e32
8 changed files with 3599 additions and 2937 deletions

View File

@ -50,7 +50,7 @@ const (
DefaultFlannelVersion = "v0.12.0"
DefaultCniVersion = "v0.9.1"
DefaultCiliumVersion = "v1.11.6"
DefaultKubeovnVersion = "v1.5.0"
DefaultKubeovnVersion = "v1.10.5"
DefalutMultusVersion = "v3.8"
DefaultHelmVersion = "v3.6.3"
DefaultDockerComposeVersion = "v2.2.2"
@ -76,12 +76,19 @@ const (
DefaultEtcdBackupPeriod = 30
DefaultKeepBackNumber = 5
DefaultEtcdBackupScriptDir = "/usr/local/bin/kube-scripts"
DefaultPodGateway = "10.233.64.1"
DefaultJoinCIDR = "100.64.0.0/16"
DefaultNetworkType = "geneve"
DefaultTunnelType = "geneve"
DefaultPodNicType = "veth-pair"
DefaultModules = "kube_ovn_fastpath.ko"
DefaultRPMs = "openvswitch-kmod"
DefaultVlanID = "100"
DefaultOvnLabel = "node-role.kubernetes.io/master"
DefaultOvnLabel = "node-role.kubernetes.io/control-plane"
DefaultDPDKVersion = "19.11"
DefaultDNSAddress = "114.114.114.114"
DefaultDpdkTunnelIface = "br-phy"
DefaultCNIConfigPriority = "01"
Docker = "docker"
Conatinerd = "containerd"
@ -225,23 +232,44 @@ func SetDefaultNetworkCfg(cfg *ClusterSpec) NetworkConfig {
cfg.Network.Flannel.BackendMode = DefaultBackendMode
}
// kube-ovn default config
if cfg.Network.Kubeovn.KubeOvnController.PodGateway == "" {
cfg.Network.Kubeovn.KubeOvnController.PodGateway = DefaultPodGateway
}
if cfg.Network.Kubeovn.JoinCIDR == "" {
cfg.Network.Kubeovn.JoinCIDR = DefaultJoinCIDR
}
if cfg.Network.Kubeovn.Label == "" {
cfg.Network.Kubeovn.Label = DefaultOvnLabel
}
if cfg.Network.Kubeovn.VlanID == "" {
cfg.Network.Kubeovn.VlanID = DefaultVlanID
if cfg.Network.Kubeovn.KubeOvnController.VlanID == "" {
cfg.Network.Kubeovn.KubeOvnController.VlanID = DefaultVlanID
}
if cfg.Network.Kubeovn.NetworkType == "" {
cfg.Network.Kubeovn.NetworkType = DefaultNetworkType
if cfg.Network.Kubeovn.KubeOvnController.NetworkType == "" {
cfg.Network.Kubeovn.KubeOvnController.NetworkType = DefaultNetworkType
}
if cfg.Network.Kubeovn.PingerExternalAddress == "" {
cfg.Network.Kubeovn.PingerExternalAddress = DefaultDNSAddress
if cfg.Network.Kubeovn.TunnelType == "" {
cfg.Network.Kubeovn.TunnelType = DefaultTunnelType
}
if cfg.Network.Kubeovn.DpdkVersion == "" {
cfg.Network.Kubeovn.DpdkVersion = DefaultDPDKVersion
if cfg.Network.Kubeovn.KubeOvnController.PodNicType == "" {
cfg.Network.Kubeovn.KubeOvnController.PodNicType = DefaultPodNicType
}
if cfg.Network.Kubeovn.KubeOvnCni.Modules == "" {
cfg.Network.Kubeovn.KubeOvnCni.Modules = DefaultModules
}
if cfg.Network.Kubeovn.KubeOvnCni.RPMs == "" {
cfg.Network.Kubeovn.KubeOvnCni.RPMs = DefaultRPMs
}
if cfg.Network.Kubeovn.KubeOvnPinger.PingerExternalAddress == "" {
cfg.Network.Kubeovn.KubeOvnPinger.PingerExternalAddress = DefaultDNSAddress
}
if cfg.Network.Kubeovn.Dpdk.DpdkVersion == "" {
cfg.Network.Kubeovn.Dpdk.DpdkVersion = DefaultDPDKVersion
}
if cfg.Network.Kubeovn.Dpdk.DpdkTunnelIface == "" {
cfg.Network.Kubeovn.Dpdk.DpdkTunnelIface = DefaultDpdkTunnelIface
}
if cfg.Network.Kubeovn.KubeOvnCni.CNIConfigPriority == "" {
cfg.Network.Kubeovn.KubeOvnCni.CNIConfigPriority = DefaultCNIConfigPriority
}
defaultNetworkCfg := cfg.Network

View File

@ -38,21 +38,91 @@ type FlannelCfg struct {
}
type KubeovnCfg struct {
JoinCIDR string `yaml:"joinCIDR" json:"joinCIDR,omitempty"`
NetworkType string `yaml:"networkType" json:"networkType,omitempty"`
Label string `yaml:"label" json:"label,omitempty"`
Iface string `yaml:"iface" json:"iface,omitempty"`
VlanInterfaceName string `yaml:"vlanInterfaceName" json:"vlanInterfaceName,omitempty"`
VlanID string `yaml:"vlanID" json:"vlanID,omitempty"`
DpdkMode bool `yaml:"dpdkMode" json:"dpdkMode,omitempty"`
EnableSSL bool `yaml:"enableSSL" json:"enableSSL,omitempty"`
EnableMirror bool `yaml:"enableMirror" json:"enableMirror,omitempty"`
HwOffload bool `yaml:"hwOffload" json:"hwOffload,omitempty"`
DpdkVersion string `yaml:"dpdkVersion" json:"dpdkVersion,omitempty"`
EnableSSL bool `yaml:"enableSSL" json:"enableSSL,omitempty"`
JoinCIDR string `yaml:"joinCIDR" json:"joinCIDR,omitempty"`
Label string `yaml:"label" json:"label,omitempty"`
TunnelType string `yaml:"tunnelType" json:"tunnelType,omitempty"`
SvcYamlIpfamilypolicy string `yaml:"svcYamlIpfamilypolicy" json:"svcYamlIpfamilypolicy,omitempty"`
Dpdk Dpdk `yaml:"dpdk" json:"dpdk,omitempty"`
OvsOvn OvsOvn `yaml:"ovs-ovn" json:"ovs-ovn,omitempty"`
KubeOvnController KubeOvnController `yaml:"kube-ovn-controller" json:"kube-ovn-controller,omitempty"`
KubeOvnCni KubeOvnCni `yaml:"kube-ovn-cni" json:"kube-ovn-cni,omitempty"`
KubeOvnPinger KubeOvnPinger `yaml:"kube-ovn-pinger" json:"kube-ovn-pinger,omitempty"`
}
type Dpdk struct {
DpdkMode bool `yaml:"dpdkMode" json:"dpdkMode,omitempty"`
DpdkTunnelIface string `yaml:"dpdkTunnelIface" json:"dpdkTunnelIface,omitempty"`
DpdkVersion string `yaml:"dpdkVersion" json:"dpdkVersion,omitempty"`
}
type OvsOvn struct {
HwOffload bool `yaml:"hwOffload" json:"hwOffload,omitempty"`
}
type KubeOvnController struct {
PodGateway string `yaml:"podGateway" json:"podGateway,omitempty"`
CheckGateway *bool `yaml:"checkGateway" json:"checkGateway,omitempty"`
LogicalGateway bool `yaml:"logicalGateway" json:"logicalGateway,omitempty"`
ExcludeIps string `yaml:"excludeIps" json:"excludeIps,omitempty"`
NetworkType string `yaml:"networkType" json:"networkType,omitempty"`
VlanInterfaceName string `yaml:"vlanInterfaceName" json:"vlanInterfaceName,omitempty"`
VlanID string `yaml:"vlanID" json:"vlanID,omitempty"`
PodNicType string `yaml:"podNicType" json:"podNicType,omitempty"`
EnableLB *bool `yaml:"enableLB" json:"enableLB,omitempty"`
EnableNP *bool `yaml:"enableNP" json:"enableNP,omitempty"`
EnableEipSnat *bool `yaml:"enableEipSnat" json:"enableEipSnat,omitempty"`
EnableExternalVPC *bool `yaml:"enableExternalVPC" json:"enableExternalVPC,omitempty"`
}
type KubeOvnCni struct {
EnableMirror bool `yaml:"enableMirror" json:"enableMirror,omitempty"`
Iface string `yaml:"iface" json:"iface,omitempty"`
CNIConfigPriority string `yaml:"CNIConfigPriority" json:"CNIConfigPriority,omitempty"`
Modules string `yaml:"modules" json:"modules,omitempty"`
RPMs string `yaml:"RPMs" json:"RPMs,omitempty"`
}
type KubeOvnPinger struct {
PingerExternalAddress string `yaml:"pingerExternalAddress" json:"pingerExternalAddress,omitempty"`
PingerExternalDomain string `yaml:"pingerExternalDomain" json:"pingerExternalDomain,omitempty"`
}
func (k *KubeovnCfg) KubeovnCheckGateway() bool {
if k.KubeOvnController.CheckGateway == nil {
return true
}
return *k.KubeOvnController.CheckGateway
}
func (k *KubeovnCfg) KubeovnEnableLB() bool {
if k.KubeOvnController.EnableLB == nil {
return true
}
return *k.KubeOvnController.EnableLB
}
func (k *KubeovnCfg) KubeovnEnableNP() bool {
if k.KubeOvnController.EnableNP == nil {
return true
}
return *k.KubeOvnController.EnableNP
}
func (k *KubeovnCfg) KubeovnEnableEipSnat() bool {
if k.KubeOvnController.EnableEipSnat == nil {
return true
}
return *k.KubeOvnController.EnableEipSnat
}
func (k *KubeovnCfg) KubeovnEnableExternalVPC() bool {
if k.KubeOvnController.EnableExternalVPC == nil {
return true
}
return *k.KubeOvnController.EnableExternalVPC
}
type MultusCNI struct {
Enabled *bool `yaml:"enabled" json:"enabled,omitempty"`
}

View File

@ -259,27 +259,12 @@ func deployKubeOVN(d *DeployNetworkPluginModule) []task.Interface {
Parallel: true,
}
generateKubeOVNOld := &task.RemoteTask{
Name: "GenerateKubeOVN",
Desc: "Generate kube-ovn",
Hosts: d.Runtime.GetHostsByRole(common.Master),
Prepare: &prepare.PrepareCollection{
new(common.OnlyFirstMaster),
new(OldK8sVersion),
},
Action: new(GenerateKubeOVNOld),
Parallel: true,
}
generateKubeOVNNew := &task.RemoteTask{
Name: "GenerateKubeOVN",
Desc: "Generate kube-ovn",
Hosts: d.Runtime.GetHostsByRole(common.Master),
Prepare: &prepare.PrepareCollection{
new(common.OnlyFirstMaster),
&OldK8sVersion{Not: true},
},
Action: new(GenerateKubeOVNNew),
generateKubeOVN := &task.RemoteTask{
Name: "GenerateKubeOVN",
Desc: "Generate kube-ovn",
Hosts: d.Runtime.GetHostsByRole(common.Master),
Prepare: new(common.OnlyFirstMaster),
Action: new(GenerateKubeOVN),
Parallel: true,
}
@ -312,24 +297,13 @@ func deployKubeOVN(d *DeployNetworkPluginModule) []task.Interface {
Parallel: true,
}
if K8sVersionAtLeast(d.KubeConf.Cluster.Kubernetes.Version, "v1.16.0") {
return []task.Interface{
label,
ssl,
generateKubeOVNNew,
deploy,
kubectlKo,
chmod,
}
} else {
return []task.Interface{
label,
ssl,
generateKubeOVNOld,
deploy,
kubectlKo,
chmod,
}
return []task.Interface{
label,
ssl,
generateKubeOVN,
deploy,
kubectlKo,
chmod,
}
}

View File

@ -19,6 +19,10 @@ package network
import (
"embed"
"fmt"
"io"
"os"
"path/filepath"
"github.com/kubesphere/kubekey/apis/kubekey/v1alpha2"
"github.com/kubesphere/kubekey/pkg/common"
"github.com/kubesphere/kubekey/pkg/core/action"
@ -27,9 +31,6 @@ import (
"github.com/kubesphere/kubekey/pkg/images"
"github.com/kubesphere/kubekey/pkg/plugins/network/templates"
"github.com/pkg/errors"
"io"
"os"
"path/filepath"
)
//go:embed cilium-1.11.6.tgz
@ -128,11 +129,6 @@ type LabelNode struct {
}
func (l *LabelNode) Execute(runtime connector.Runtime) error {
if _, err := runtime.GetRunner().SudoCmd(
"/usr/local/bin/kubectl label no -lbeta.kubernetes.io/os=linux kubernetes.io/os=linux --overwrite",
true); err != nil {
return errors.Wrap(errors.WithStack(err), "override node label failed")
}
if _, err := runtime.GetRunner().SudoCmd(
fmt.Sprintf("/usr/local/bin/kubectl label no -l%s kube-ovn/role=master --overwrite",
l.KubeConf.Cluster.Network.Kubeovn.Label),
@ -182,11 +178,11 @@ func (g *GenerateSSL) Execute(runtime connector.Runtime) error {
return nil
}
type GenerateKubeOVNOld struct {
type GenerateKubeOVN struct {
common.KubeAction
}
func (g *GenerateKubeOVNOld) Execute(runtime connector.Runtime) error {
func (g *GenerateKubeOVN) Execute(runtime connector.Runtime) error {
address, err := runtime.GetRunner().Cmd(
"/usr/local/bin/kubectl get no -lkube-ovn/role=master --no-headers -o wide | awk '{print $6}' | tr \\\\n ','",
true)
@ -206,82 +202,42 @@ func (g *GenerateKubeOVNOld) Execute(runtime connector.Runtime) error {
}
templateAction := action.Template{
Template: templates.KubeOVNOld,
Dst: filepath.Join(common.KubeConfigDir, templates.KubeOVNOld.Name()),
Template: templates.KubeOVN,
Dst: filepath.Join(common.KubeConfigDir, templates.KubeOVN.Name()),
Data: util.Data{
"Address": address,
"Count": count,
"KubeovnImage": images.GetImage(runtime, g.KubeConf, "kubeovn").ImageName(),
"PodCIDR": g.KubeConf.Cluster.Network.KubePodsCIDR,
"SvcCIDR": g.KubeConf.Cluster.Network.KubeServiceCIDR,
"JoinCIDR": g.KubeConf.Cluster.Network.Kubeovn.JoinCIDR,
"PingExternalAddress": g.KubeConf.Cluster.Network.Kubeovn.PingerExternalAddress,
"PingExternalDNS": g.KubeConf.Cluster.Network.Kubeovn.PingerExternalDomain,
"NetworkType": g.KubeConf.Cluster.Network.Kubeovn.NetworkType,
"VlanID": g.KubeConf.Cluster.Network.Kubeovn.VlanID,
"VlanInterfaceName": g.KubeConf.Cluster.Network.Kubeovn.VlanInterfaceName,
"Iface": g.KubeConf.Cluster.Network.Kubeovn.Iface,
"DpdkMode": g.KubeConf.Cluster.Network.Kubeovn.DpdkMode,
"DpdkVersion": g.KubeConf.Cluster.Network.Kubeovn.DpdkVersion,
"OvnVersion": v1alpha2.DefaultKubeovnVersion,
"EnableSSL": g.KubeConf.Cluster.Network.Kubeovn.EnableSSL,
"EnableMirror": g.KubeConf.Cluster.Network.Kubeovn.EnableMirror,
"HwOffload": g.KubeConf.Cluster.Network.Kubeovn.HwOffload,
},
}
templateAction.Init(nil, nil)
if err := templateAction.Execute(runtime); err != nil {
return err
}
return nil
}
type GenerateKubeOVNNew struct {
common.KubeAction
}
func (g *GenerateKubeOVNNew) Execute(runtime connector.Runtime) error {
address, err := runtime.GetRunner().Cmd(
"/usr/local/bin/kubectl get no -lkube-ovn/role=master --no-headers -o wide | awk '{print $6}' | tr \\\\n ','",
true)
if err != nil {
return errors.Wrap(errors.WithStack(err), "get kube-ovn label node address failed")
}
count, err := runtime.GetRunner().Cmd(
fmt.Sprintf("/usr/local/bin/kubectl get no -l%s --no-headers -o wide | wc -l | sed 's/ //g'",
g.KubeConf.Cluster.Network.Kubeovn.Label), true)
if err != nil {
return errors.Wrap(errors.WithStack(err), "count kube-ovn label nodes num failed")
}
if count == "0" {
return fmt.Errorf("no node with label: %s", g.KubeConf.Cluster.Network.Kubeovn.Label)
}
templateAction := action.Template{
Template: templates.KubeOVNNew,
Dst: filepath.Join(common.KubeConfigDir, templates.KubeOVNNew.Name()),
Data: util.Data{
"Address": address,
"Count": count,
"KubeovnImage": images.GetImage(runtime, g.KubeConf, "kubeovn").ImageName(),
"PodCIDR": g.KubeConf.Cluster.Network.KubePodsCIDR,
"SvcCIDR": g.KubeConf.Cluster.Network.KubeServiceCIDR,
"JoinCIDR": g.KubeConf.Cluster.Network.Kubeovn.JoinCIDR,
"PingExternalAddress": g.KubeConf.Cluster.Network.Kubeovn.PingerExternalAddress,
"PingExternalDNS": g.KubeConf.Cluster.Network.Kubeovn.PingerExternalDomain,
"NetworkType": g.KubeConf.Cluster.Network.Kubeovn.NetworkType,
"VlanID": g.KubeConf.Cluster.Network.Kubeovn.VlanID,
"VlanInterfaceName": g.KubeConf.Cluster.Network.Kubeovn.VlanInterfaceName,
"Iface": g.KubeConf.Cluster.Network.Kubeovn.Iface,
"DpdkMode": g.KubeConf.Cluster.Network.Kubeovn.DpdkMode,
"DpdkVersion": g.KubeConf.Cluster.Network.Kubeovn.DpdkVersion,
"OvnVersion": v1alpha2.DefaultKubeovnVersion,
"EnableSSL": g.KubeConf.Cluster.Network.Kubeovn.EnableSSL,
"EnableMirror": g.KubeConf.Cluster.Network.Kubeovn.EnableMirror,
"HwOffload": g.KubeConf.Cluster.Network.Kubeovn.HwOffload,
"Address": address,
"Count": count,
"KubeovnImage": images.GetImage(runtime, g.KubeConf, "kubeovn").ImageName(),
"PodCIDR": g.KubeConf.Cluster.Network.KubePodsCIDR,
"SvcCIDR": g.KubeConf.Cluster.Network.KubeServiceCIDR,
"JoinCIDR": g.KubeConf.Cluster.Network.Kubeovn.JoinCIDR,
"PodGateway": g.KubeConf.Cluster.Network.Kubeovn.KubeOvnController.PodGateway,
"CheckGateway": g.KubeConf.Cluster.Network.Kubeovn.KubeovnCheckGateway(),
"LogicalGateway": g.KubeConf.Cluster.Network.Kubeovn.KubeOvnController.LogicalGateway,
"PingExternalAddress": g.KubeConf.Cluster.Network.Kubeovn.KubeOvnPinger.PingerExternalAddress,
"PingExternalDNS": g.KubeConf.Cluster.Network.Kubeovn.KubeOvnPinger.PingerExternalDomain,
"NetworkType": g.KubeConf.Cluster.Network.Kubeovn.KubeOvnController.NetworkType,
"TunnelType": g.KubeConf.Cluster.Network.Kubeovn.TunnelType,
"ExcludeIps": g.KubeConf.Cluster.Network.Kubeovn.KubeOvnController.ExcludeIps,
"PodNicType": g.KubeConf.Cluster.Network.Kubeovn.KubeOvnController.PodNicType,
"VlanID": g.KubeConf.Cluster.Network.Kubeovn.KubeOvnController.VlanID,
"VlanInterfaceName": g.KubeConf.Cluster.Network.Kubeovn.KubeOvnController.VlanInterfaceName,
"Iface": g.KubeConf.Cluster.Network.Kubeovn.KubeOvnCni.Iface,
"DpdkMode": g.KubeConf.Cluster.Network.Kubeovn.Dpdk.DpdkMode,
"DpdkVersion": g.KubeConf.Cluster.Network.Kubeovn.Dpdk.DpdkVersion,
"OvnVersion": v1alpha2.DefaultKubeovnVersion,
"EnableSSL": g.KubeConf.Cluster.Network.Kubeovn.EnableSSL,
"EnableMirror": g.KubeConf.Cluster.Network.Kubeovn.KubeOvnCni.EnableMirror,
"EnableLB": g.KubeConf.Cluster.Network.Kubeovn.KubeovnEnableLB(),
"EnableNP": g.KubeConf.Cluster.Network.Kubeovn.KubeovnEnableNP(),
"EnableEipSnat": g.KubeConf.Cluster.Network.Kubeovn.KubeovnEnableEipSnat(),
"EnableExternalVPC": g.KubeConf.Cluster.Network.Kubeovn.KubeovnEnableExternalVPC(),
"HwOffload": g.KubeConf.Cluster.Network.Kubeovn.OvsOvn.HwOffload,
"SvcYamlIpfamilypolicy": g.KubeConf.Cluster.Network.Kubeovn.SvcYamlIpfamilypolicy,
"DpdkTunnelIface": g.KubeConf.Cluster.Network.Kubeovn.Dpdk.DpdkTunnelIface,
"CNIConfigPriority": g.KubeConf.Cluster.Network.Kubeovn.KubeOvnCni.CNIConfigPriority,
"Modules": g.KubeConf.Cluster.Network.Kubeovn.KubeOvnCni.Modules,
"RPMs": g.KubeConf.Cluster.Network.Kubeovn.KubeOvnCni.RPMs,
},
}

View File

@ -17,100 +17,324 @@
package templates
import (
"github.com/lithammer/dedent"
"text/template"
"github.com/lithammer/dedent"
)
var KubectlKo = template.Must(template.New("kubectl-ko").Parse(
dedent.Dedent(`#!/bin/bash
set -euo pipefail
KUBE_OVN_NS=kube-system
WITHOUT_KUBE_PROXY=false
OVN_NB_POD=
OVN_SB_POD=
KUBE_OVN_VERSION=
REGISTRY="kubeovn"
showHelp(){
echo "kubectl ko {subcommand} [option...]"
echo "Available Subcommands:"
echo " [nb|sb] [status|kick|backup|dbstatus|restore] ovn-db operations show cluster status, kick stale server, backup database, get db consistency status or restore ovn nb db when met 'inconsistent data' error"
echo " nbctl [ovn-nbctl options ...] invoke ovn-nbctl"
echo " sbctl [ovn-sbctl options ...] invoke ovn-sbctl"
echo " vsctl {nodeName} [ovs-vsctl options ...] invoke ovs-vsctl on selected node"
echo " vsctl {nodeName} [ovs-vsctl options ...] invoke ovs-vsctl on the specified node"
echo " ofctl {nodeName} [ovs-ofctl options ...] invoke ovs-ofctl on the specified node"
echo " dpctl {nodeName} [ovs-dpctl options ...] invoke ovs-dpctl on the specified node"
echo " appctl {nodeName} [ovs-appctl options ...] invoke ovs-appctl on the specified node"
echo " tcpdump {namespace/podname} [tcpdump options ...] capture pod traffic"
echo " trace {namespace/podname} {target ip address} {icmp|tcp|udp} [target tcp or udp port] trace ovn microflow of specific packet"
echo " diagnose {all|node} [nodename] diagnose connectivity of all nodes or a specific node"
echo " tuning {install-fastpath|local-install-fastpath|remove-fastpath|install-stt|local-install-stt|remove-stt} {centos7|centos8}} [kernel-devel-version] deploy kernel optimisation components to the system"
echo " reload restart all kube-ovn components"
echo " env-check check the environment configuration"
}
# usage: ipv4_to_hex 192.168.0.1
ipv4_to_hex(){
printf "%02x" ${1//./ }
}
# convert hex to dec (portable version)
hex2dec(){
for i in $(echo "$@"); do
printf "%d\n" "$(( 0x$i ))"
done
}
# https://github.com/chmduquesne/wg-ip
# usage: expand_ipv6 2001::1
expand_ipv6(){
local ip=$1
# prepend 0 if we start with :
echo $ip | grep -qs "^:" && ip="0${ip}"
# expand ::
if echo $ip | grep -qs "::"; then
local colons=$(echo $ip | sed 's/[^:]//g')
local missing=$(echo ":::::::::" | sed "s/$colons//")
local expanded=$(echo $missing | sed 's/:/:0/g')
ip=$(echo $ip | sed "s/::/$expanded/")
fi
local blocks=$(echo $ip | grep -o "[0-9a-f]\+")
set $blocks
printf "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n" \
$(hex2dec $@)
}
# convert an IPv6 address to bytes
ipv6_bytes(){
for x in $(expand_ipv6 $1 | tr ':' ' '); do
printf "%d %d " $((0x$x >> 8 & 0xff)) $((0x$x & 0xff))
done
echo
}
# usage: ipIsInCidr 192.168.0.1 192.168.0.0/24
# return: 0 for true, 1 for false
ipIsInCidr(){
local ip=$1
local cidr=$2
if [[ $ip =~ .*:.* ]]; then
# IPv6
cidr=${cidr#*,}
local network=${cidr%/*}
local prefix=${cidr#*/}
local ip_bytes=($(ipv6_bytes $ip))
local network_bytes=($(ipv6_bytes $network))
for ((i=0; i<${#ip_bytes[*]}; i++)); do
if [ ${ip_bytes[$i]} -eq ${network_bytes[$i]} ]; then
continue
fi
if [ $((($i+1)*8)) -le $prefix ]; then
return 1
fi
if [ $(($i*8)) -ge $prefix ]; then
return 0
fi
if [ $((($i+1)*8)) -le $prefix ]; then
return 1
fi
local bits=$(($prefix-$i*8))
local mask=$((0xff<<$bits & 0xff))
# TODO: check whether the IP is network/broadcast address
if [ $((${ip_bytes[$i]} & $mask)) -ne ${network_bytes[$i]} ]; then
return 1
fi
done
return 0
fi
# IPv4
cidr=${cidr%,*}
local network=${cidr%/*}
local prefix=${cidr#*/}
local ip_hex=$(ipv4_to_hex $ip)
local ip_dec=$((0x$ip_hex))
local network_hex=$(ipv4_to_hex $network)
local network_dec=$((0x$network_hex))
local broadcast_dec=$(($network_dec + 2**$prefix - 1))
# TODO: check whether the IP is network/broadcast address
if [ $ip_dec -gt $network_dec -a $ip_dec -lt $broadcast_dec ]; then
return 0
fi
return 1
}
tcpdump(){
namespacedPod="$1"; shift
namespace=$(echo "$namespacedPod" | cut -d "/" -f1)
podName=$(echo "$namespacedPod" | cut -d "/" -f2)
if [ "$podName" = "$namespacedPod" ]; then
nodeName=$(kubectl get pod "$podName" -o jsonpath={.spec.nodeName})
mac=$(kubectl get pod "$podName" -o jsonpath={.metadata.annotations.ovn\\.kubernetes\\.io/mac_address})
hostNetwork=$(kubectl get pod "$podName" -o jsonpath={.spec.hostNetwork})
else
nodeName=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.spec.nodeName})
hostNetwork=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.spec.hostNetwork})
namespace="default"
fi
nodeName=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.spec.nodeName})
hostNetwork=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.spec.hostNetwork})
if [ -z "$nodeName" ]; then
echo "Pod $namespacedPod not exists on any node"
exit 1
fi
ovnCni=$(kubectl get pod -n $KUBE_OVN_NS -o wide| grep kube-ovn-cni| grep " $nodeName " | awk '{print $1}')
if [ -z "$ovnCni" ]; then
echo "kube-ovn-cni not exist on node $nodeName"
exit 1
fi
if [ "$hostNetwork" = "true" ]; then
set -x
kubectl exec -it "$ovnCni" -n $KUBE_OVN_NS -- tcpdump -nn "$@"
kubectl exec "$ovnCni" -n $KUBE_OVN_NS -- tcpdump -nn "$@"
else
nicName=$(kubectl exec -it "$ovnCni" -n $KUBE_OVN_NS -- ovs-vsctl --data=bare --no-heading --columns=name find interface external-ids:iface-id="$podName"."$namespace" | tr -d '\r')
nicName=$(kubectl exec "$ovnCni" -n $KUBE_OVN_NS -- ovs-vsctl --data=bare --no-heading --columns=name find interface external-ids:iface-id="$podName"."$namespace" | tr -d '\r')
if [ -z "$nicName" ]; then
echo "nic doesn't exist on node $nodeName"
exit 1
fi
podNicType=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.metadata.annotations.ovn\\.kubernetes\\.io/pod_nic_type})
podNetNs=$(kubectl exec "$ovnCni" -n $KUBE_OVN_NS -- ovs-vsctl --data=bare --no-heading get interface "$nicName" external-ids:pod_netns | tr -d '\r' | sed -e 's/^"//' -e 's/"$//')
set -x
kubectl exec -it "$ovnCni" -n $KUBE_OVN_NS -- tcpdump -nn -i "$nicName" "$@"
if [ "$podNicType" = "internal-port" ]; then
kubectl exec "$ovnCni" -n $KUBE_OVN_NS -- nsenter --net="$podNetNs" tcpdump -nn -i "$nicName" "$@"
else
kubectl exec "$ovnCni" -n $KUBE_OVN_NS -- nsenter --net="$podNetNs" tcpdump -nn -i eth0 "$@"
fi
fi
}
trace(){
namespacedPod="$1"
namespace=$(echo "$1" | cut -d "/" -f1)
podName=$(echo "$1" | cut -d "/" -f2)
if [ "$podName" = "$1" ]; then
echo "namespace is required"
exit 1
fi
podIP=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.metadata.annotations.ovn\\.kubernetes\\.io/ip_address})
mac=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.metadata.annotations.ovn\\.kubernetes\\.io/mac_address})
ls=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.metadata.annotations.ovn\\.kubernetes\\.io/logical_switch})
hostNetwork=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.spec.hostNetwork})
nodeName=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.spec.nodeName})
if [ "$hostNetwork" = "true" ]; then
echo "Can not trace host network pod"
exit 1
fi
if [ -z "$ls" ]; then
echo "pod address not ready"
exit 1
fi
gwMac=$(kubectl exec -it $OVN_NB_POD -n $KUBE_OVN_NS -- ovn-nbctl --data=bare --no-heading --columns=mac find logical_router_port name=ovn-cluster-"$ls" | tr -d '\r')
if [ -z "$gwMac" ]; then
echo "get gw mac failed"
exit 1
namespace=$(echo "$namespacedPod" | cut -d "/" -f1)
podName=$(echo "$namespacedPod" | cut -d "/" -f2)
if [ "$podName" = "$namespacedPod" ]; then
namespace="default"
fi
dst="$2"
if [ -z "$dst" ]; then
echo "need a target ip address"
exit 1
fi
hostNetwork=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.spec.hostNetwork})
if [ "$hostNetwork" = "true" ]; then
echo "Can not trace host network pod"
exit 1
fi
af="4"
nw="nw"
proto=""
if [[ "$dst" =~ .*:.* ]]; then
af="6"
nw="ipv6"
proto="6"
fi
podIPs=($(kubectl get pod "$podName" -n "$namespace" -o jsonpath="{.status.podIPs[*].ip}"))
if [ ${#podIPs[@]} -eq 0 ]; then
podIPs=($(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.metadata.annotations.ovn\\.kubernetes\\.io/ip_address} | sed 's/,/ /g'))
if [ ${#podIPs[@]} -eq 0 ]; then
echo "pod address not ready"
exit 1
fi
fi
podIP=""
for ip in ${podIPs[@]}; do
if [ "$af" = "4" ]; then
if [[ ! "$ip" =~ .*:.* ]]; then
podIP=$ip
break
fi
elif [[ "$ip" =~ .*:.* ]]; then
podIP=$ip
break
fi
done
if [ -z "$podIP" ]; then
echo "Pod $namespacedPod has no IPv$af address"
exit 1
fi
ls=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.metadata.annotations.ovn\\.kubernetes\\.io/logical_switch})
if [ -z "$ls" ]; then
echo "pod address not ready"
exit 1
fi
local cidr=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.metadata.annotations.ovn\\.kubernetes\\.io/cidr})
mac=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.metadata.annotations.ovn\\.kubernetes\\.io/mac_address})
nodeName=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.spec.nodeName})
dstMac=""
if ipIsInCidr $dst $cidr; then
set +o pipefail
if [ $af -eq 4 ]; then
dstMac=$(kubectl exec $OVN_NB_POD -n $KUBE_OVN_NS -c ovn-central -- ovn-nbctl --data=bare --no-heading --columns=addresses list logical_switch_port | grep -w "$(echo $dst | tr . '\.')" | awk '{print $1}')
else
dstMac=$(kubectl exec $OVN_NB_POD -n $KUBE_OVN_NS -c ovn-central -- ovn-nbctl --data=bare --no-heading --columns=addresses list logical_switch_port | grep -i " $dst\$" | awk '{print $1}')
fi
set -o pipefail
fi
if [ -z "$dstMac" ]; then
vlan=$(kubectl get subnet "$ls" -o jsonpath={.spec.vlan})
logicalGateway=$(kubectl get subnet "$ls" -o jsonpath={.spec.logicalGateway})
if [ ! -z "$vlan" -a "$logicalGateway" != "true" ]; then
gateway=$(kubectl get subnet "$ls" -o jsonpath={.spec.gateway})
if [[ "$gateway" =~ .*,.* ]]; then
if [ "$af" = "4" ]; then
gateway=${gateway%%,*}
else
gateway=${gateway##*,}
fi
fi
ovnCni=$(kubectl get pod -n $KUBE_OVN_NS -o wide | grep -w kube-ovn-cni | grep " $nodeName " | awk '{print $1}')
if [ -z "$ovnCni" ]; then
echo "No kube-ovn-cni Pod running on node $nodeName"
exit 1
fi
nicName=$(kubectl exec "$ovnCni" -n $KUBE_OVN_NS -- ovs-vsctl --data=bare --no-heading --columns=name find interface external-ids:iface-id="$podName"."$namespace" | tr -d '\r')
if [ -z "$nicName" ]; then
echo "nic doesn't exist on node $nodeName"
exit 1
fi
podNicType=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.metadata.annotations.ovn\\.kubernetes\\.io/pod_nic_type})
podNetNs=$(kubectl exec "$ovnCni" -n $KUBE_OVN_NS -- ovs-vsctl --data=bare --no-heading get interface "$nicName" external-ids:pod_netns | tr -d '\r' | sed -e 's/^"//' -e 's/"$//')
if [ "$podNicType" != "internal-port" ]; then
nicName="eth0"
fi
if [[ "$gateway" =~ .*:.* ]]; then
cmd="ndisc6 -q $gateway $nicName"
output=$(kubectl exec "$ovnCni" -n $KUBE_OVN_NS -- nsenter --net="$podNetNs" ndisc6 -q "$gateway" "$nicName")
else
cmd="arping -c3 -C1 -i1 -I $nicName $gateway"
output=$(kubectl exec "$ovnCni" -n $KUBE_OVN_NS -- nsenter --net="$podNetNs" arping -c3 -C1 -i1 -I "$nicName" "$gateway")
fi
if [ $? -ne 0 ]; then
echo "failed to run '$cmd' in Pod's netns"
exit 1
fi
dstMac=$(echo "$output" | grep -o -E '([[:xdigit:]]{1,2}:){5}[[:xdigit:]]{1,2}')
else
lr=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.metadata.annotations.ovn\\.kubernetes\\.io/logical_router})
if [ -z "$lr" ]; then
lr=$(kubectl get subnet "$ls" -o jsonpath={.spec.vpc})
fi
dstMac=$(kubectl exec $OVN_NB_POD -n $KUBE_OVN_NS -c ovn-central -- ovn-nbctl --data=bare --no-heading --columns=mac find logical_router_port name="$lr"-"$ls" | tr -d '\r')
fi
fi
if [ -z "$dstMac" ]; then
echo "failed to get destination mac"
exit 1
fi
type="$3"
case $type in
icmp)
set -x
kubectl exec "$OVN_SB_POD" -n $KUBE_OVN_NS -- ovn-trace --ct=new "$ls" "inport == \"$podName.$namespace\" && ip.ttl == 64 && icmp && eth.src == $mac && ip4.src == $podIP && eth.dst == $gwMac && ip4.dst == $dst"
kubectl exec "$OVN_SB_POD" -n $KUBE_OVN_NS -c ovn-central -- ovn-trace --ct=new "$ls" "inport == \"$podName.$namespace\" && ip.ttl == 64 && icmp && eth.src == $mac && ip$af.src == $podIP && eth.dst == $dstMac && ip$af.dst == $dst"
;;
tcp|udp)
set -x
kubectl exec "$OVN_SB_POD" -n $KUBE_OVN_NS -- ovn-trace --ct=new "$ls" "inport == \"$podName.$namespace\" && ip.ttl == 64 && eth.src == $mac && ip4.src == $podIP && eth.dst == $gwMac && ip4.dst == $dst && $type.src == 10000 && $type.dst == $4"
kubectl exec "$OVN_SB_POD" -n $KUBE_OVN_NS -c ovn-central -- ovn-trace --ct=new "$ls" "inport == \"$podName.$namespace\" && ip.ttl == 64 && eth.src == $mac && ip$af.src == $podIP && eth.dst == $dstMac && ip$af.dst == $dst && $type.src == 10000 && $type.dst == $4"
;;
*)
echo "type $type not supported"
@ -118,25 +342,28 @@ trace(){
exit 1
;;
esac
set +x
echo "--------"
echo "Start OVS Tracing"
echo ""
echo ""
ovsPod=$(kubectl get pod -n $KUBE_OVN_NS -o wide | grep " $nodeName " | grep ovs-ovn | awk '{print $1}')
if [ -z "$ovsPod" ]; then
echo "ovs pod doesn't exist on node $nodeName"
exit 1
echo "ovs pod doesn't exist on node $nodeName"
exit 1
fi
inPort=$(kubectl exec "$ovsPod" -n $KUBE_OVN_NS -- ovs-vsctl --format=csv --data=bare --no-heading --columns=ofport find interface external_id:iface-id="$podName"."$namespace")
case $type in
case $type in
icmp)
set -x
kubectl exec "$ovsPod" -n $KUBE_OVN_NS -- ovs-appctl ofproto/trace br-int in_port="$inPort",icmp,nw_src="$podIP",nw_dst="$dst",dl_src="$mac",dl_dst="$gwMac"
kubectl exec "$ovsPod" -n $KUBE_OVN_NS -- ovs-appctl ofproto/trace br-int "in_port=$inPort,icmp$proto,nw_ttl=64,${nw}_src=$podIP,${nw}_dst=$dst,dl_src=$mac,dl_dst=$dstMac"
;;
tcp|udp)
set -x
kubectl exec "$ovsPod" -n $KUBE_OVN_NS -- ovs-appctl ofproto/trace br-int in_port="$inPort","$type",nw_src="$podIP",nw_dst="$dst",dl_src="$mac",dl_dst="$gwMac","$type"_src=1000,"$type"_dst="$4"
kubectl exec "$ovsPod" -n $KUBE_OVN_NS -- ovs-appctl ofproto/trace br-int "in_port=$inPort,$type$proto,nw_ttl=64,${nw}_src=$podIP,${nw}_dst=$dst,dl_src=$mac,dl_dst=$dstMac,${type}_src=1000,${type}_dst=$4"
;;
*)
echo "type $type not supported"
@ -145,30 +372,75 @@ trace(){
;;
esac
}
vsctl(){
xxctl(){
subcommand="$1"; shift
nodeName="$1"; shift
kubectl get no "$nodeName" > /dev/null
ovsPod=$(kubectl get pod -n $KUBE_OVN_NS -o wide | grep " $nodeName " | grep ovs-ovn | awk '{print $1}')
if [ -z "$ovsPod" ]; then
echo "ovs pod doesn't exist on node $nodeName"
exit 1
echo "ovs pod doesn't exist on node $nodeName"
exit 1
fi
kubectl exec "$ovsPod" -n $KUBE_OVN_NS -- ovs-vsctl "$@"
kubectl exec "$ovsPod" -n $KUBE_OVN_NS -- ovs-$subcommand "$@"
}
checkLeader(){
component="$1"; shift
count=$(kubectl get ep ovn-$component -n $KUBE_OVN_NS -o yaml | grep ip | wc -l)
if [ $count -eq 0 ]; then
echo "no ovn-$component exists !!"
exit 1
fi
if [ $count -gt 1 ]; then
echo "ovn-$component has more than one leader !!"
exit 1
fi
echo "ovn-$component leader check ok"
}
diagnose(){
kubectl get crd vpcs.kubeovn.io
kubectl get crd vpc-nat-gateways.kubeovn.io
kubectl get crd subnets.kubeovn.io
kubectl get crd ips.kubeovn.io
kubectl get svc kube-dns -n kube-system
kubectl get crd vlans.kubeovn.io
kubectl get crd provider-networks.kubeovn.io
set +eu
if ! kubectl get svc kube-dns -n kube-system ; then
echo "Warning: kube-dns doesn't exist, maybe there is coredns service."
fi
set -eu
kubectl get svc kubernetes -n default
kubectl get sa -n kube-system ovn
kubectl get clusterrole system:ovn
kubectl get clusterrolebinding ovn
kubectl get no -o wide
kubectl ko nbctl show
kubectl ko nbctl lr-policy-list ovn-cluster
kubectl ko nbctl lr-route-list ovn-cluster
kubectl ko nbctl ls-lb-list ovn-default
kubectl ko nbctl list address_set
kubectl ko nbctl list acl
kubectl ko sbctl show
checkDaemonSet kube-proxy
if [ "${WITHOUT_KUBE_PROXY}" = "false" ]; then
checkKubeProxy
fi
checkDeployment ovn-central
checkDeployment kube-ovn-controller
checkDaemonSet kube-ovn-cni
checkDaemonSet ovs-ovn
checkDeployment coredns
checkLeader nb
checkLeader sb
checkLeader northd
type="$1"
case $type in
all)
@ -177,15 +449,22 @@ diagnose(){
kubectl logs -n $KUBE_OVN_NS -l app=kube-ovn-controller --tail=100 | grep E$(date +%m%d)
set -e
echo ""
pingers=$(kubectl get pod -n $KUBE_OVN_NS | grep kube-ovn-pinger | awk '{print $1}')
pingers=$(kubectl -n $KUBE_OVN_NS get po --no-headers -o custom-columns=NAME:.metadata.name -l app=kube-ovn-pinger)
for pinger in $pingers
do
nodeName=$(kubectl get pod "$pinger" -n "$KUBE_OVN_NS" -o jsonpath={.spec.nodeName})
echo "### start to diagnose node $nodeName"
echo "#### ovn-controller log:"
kubectl exec -n $KUBE_OVN_NS -it "$pinger" -- tail /var/log/ovn/ovn-controller.log
kubectl exec -n $KUBE_OVN_NS "$pinger" -- tail /var/log/ovn/ovn-controller.log
echo ""
kubectl exec -n $KUBE_OVN_NS -it "$pinger" -- /kube-ovn/kube-ovn-pinger --mode=job
echo "#### ovs-vswitchd log:"
kubectl exec -n $KUBE_OVN_NS "$pinger" -- tail /var/log/openvswitch/ovs-vswitchd.log
echo ""
echo "#### ovs-vsctl show results:"
kubectl exec -n $KUBE_OVN_NS "$pinger" -- ovs-vsctl show
echo ""
echo "#### pinger diagnose results:"
kubectl exec -n $KUBE_OVN_NS "$pinger" -- /kube-ovn/kube-ovn-pinger --mode=job
echo "### finish diagnose node $nodeName"
echo ""
done
@ -193,13 +472,20 @@ diagnose(){
node)
nodeName="$2"
kubectl get no "$nodeName" > /dev/null
pinger=$(kubectl get pod -n $KUBE_OVN_NS -o wide | grep kube-ovn-pinger | grep " $nodeName " | awk '{print $1}')
echo "### start to diagnose node nodeName"
pinger=$(kubectl -n $KUBE_OVN_NS get po -l app=kube-ovn-pinger -o 'jsonpath={.items[?(@.spec.nodeName=="'$nodeName'")].metadata.name}')
if [ ! -n "$pinger" ]; then
echo "Error: No kube-ovn-pinger running on node $nodeName"
exit 1
fi
echo "### start to diagnose node $nodeName"
echo "#### ovn-controller log:"
kubectl exec -n $KUBE_OVN_NS -it "$pinger" -- tail /var/log/ovn/ovn-controller.log
kubectl exec -n $KUBE_OVN_NS "$pinger" -- tail /var/log/ovn/ovn-controller.log
echo ""
kubectl exec -n $KUBE_OVN_NS -it "$pinger" -- /kube-ovn/kube-ovn-pinger --mode=job
echo "### finish diagnose node nodeName"
echo "#### ovs-vswitchd log:"
kubectl exec -n $KUBE_OVN_NS "$pinger" -- tail /var/log/openvswitch/ovs-vswitchd.log
echo ""
kubectl exec -n $KUBE_OVN_NS "$pinger" -- /kube-ovn/kube-ovn-pinger --mode=job
echo "### finish diagnose node $nodeName"
echo ""
;;
*)
@ -208,6 +494,7 @@ diagnose(){
;;
esac
}
getOvnCentralPod(){
NB_POD=$(kubectl get pod -n $KUBE_OVN_NS -l ovn-nb-leader=true | grep ovn-central | head -n 1 | awk '{print $1}')
if [ -z "$NB_POD" ]; then
@ -221,7 +508,14 @@ getOvnCentralPod(){
exit 1
fi
OVN_SB_POD=$SB_POD
VERSION=$(kubectl -n kube-system get pods -l ovn-sb-leader=true -o yaml | grep "image: $REGISTRY/kube-ovn:" | head -n 1 | awk -F ':' '{print $3}')
if [ -z "$VERSION" ]; then
echo "kubeovn version not exists"
exit 1
fi
KUBE_OVN_VERSION=$VERSION
}
checkDaemonSet(){
name="$1"
currentScheduled=$(kubectl get ds -n $KUBE_OVN_NS "$name" -o jsonpath={.status.currentNumberScheduled})
@ -235,6 +529,7 @@ checkDaemonSet(){
exit 1
fi
}
checkDeployment(){
name="$1"
ready=$(kubectl get deployment -n $KUBE_OVN_NS "$name" -o jsonpath={.status.readyReplicas})
@ -248,22 +543,322 @@ checkDeployment(){
exit 1
fi
}
checkKubeProxy(){
if kubectl get ds -n kube-system --no-headers -o custom-columns=NAME:.metadata.name | grep -qw ^kube-proxy; then
checkDaemonSet kube-proxy
else
nodeIps=$(kubectl get node -o wide | grep -v "INTERNAL-IP" | awk '{print $6}')
for node in $nodeIps
do
healthResult=$(curl -g -6 -sL -w %{http_code} http://[$node]:10256/healthz -o /dev/null | grep -v 200 || true)
if [ -n "$healthResult" ]; then
echo "$node kube-proxy's health check failed"
exit 1
fi
done
fi
echo "kube-proxy ready"
}
dbtool(){
suffix=$(date +%m%d%H%M%s)
component="$1"; shift
action="$1"; shift
case $component in
nb)
case $action in
status)
kubectl exec "$OVN_NB_POD" -n $KUBE_OVN_NS -c ovn-central -- ovs-appctl -t /var/run/ovn/ovnnb_db.ctl cluster/status OVN_Northbound
kubectl exec "$OVN_NB_POD" -n $KUBE_OVN_NS -c ovn-central -- ovs-appctl -t /var/run/ovn/ovnnb_db.ctl ovsdb-server/get-db-storage-status OVN_Northbound
;;
kick)
kubectl exec "$OVN_NB_POD" -n $KUBE_OVN_NS -c ovn-central -- ovs-appctl -t /var/run/ovn/ovnnb_db.ctl cluster/kick OVN_Northbound "$1"
;;
backup)
kubectl exec "$OVN_NB_POD" -n $KUBE_OVN_NS -c ovn-central -- ovsdb-tool cluster-to-standalone /etc/ovn/ovnnb_db.$suffix.backup /etc/ovn/ovnnb_db.db
kubectl cp $KUBE_OVN_NS/$OVN_NB_POD:/etc/ovn/ovnnb_db.$suffix.backup $(pwd)/ovnnb_db.$suffix.backup
kubectl exec "$OVN_NB_POD" -n $KUBE_OVN_NS -c ovn-central -- rm -f /etc/ovn/ovnnb_db.$suffix.backup
echo "backup ovn-$component db to $(pwd)/ovnnb_db.$suffix.backup"
;;
dbstatus)
kubectl exec "$OVN_NB_POD" -n $KUBE_OVN_NS -c ovn-central -- ovn-appctl -t /var/run/ovn/ovnnb_db.ctl ovsdb-server/get-db-storage-status OVN_Northbound
;;
restore)
# set ovn-central replicas to 0
replicas=$(kubectl get deployment -n $KUBE_OVN_NS ovn-central -o jsonpath={.spec.replicas})
kubectl scale deployment -n $KUBE_OVN_NS ovn-central --replicas=0
echo "ovn-central original replicas is $replicas"
# backup ovn-nb db
declare nodeIpArray
declare podNameArray
declare nodeIps
if [[ $(kubectl get deployment -n kube-system ovn-central -o jsonpath='{.spec.template.spec.containers[0].env[1]}') =~ "NODE_IPS" ]]; then
nodeIpVals=$(kubectl get deployment -n kube-system ovn-central -o jsonpath='{.spec.template.spec.containers[0].env[1].value}')
nodeIps=(${nodeIpVals//,/ })
else
nodeIps=$(kubectl get node -lkube-ovn/role=master -o wide | grep -v "INTERNAL-IP" | awk '{print $6}')
fi
firstIP=${nodeIps[0]}
podNames=$(kubectl get pod -n $KUBE_OVN_NS | grep ovs-ovn | awk '{print $1}')
echo "first nodeIP is $firstIP"
i=0
for nodeIp in ${nodeIps[@]}
do
for pod in $podNames
do
hostip=$(kubectl get pod -n $KUBE_OVN_NS $pod -o jsonpath={.status.hostIP})
if [ $nodeIp = $hostip ]; then
nodeIpArray[$i]=$nodeIp
podNameArray[$i]=$pod
i=$(expr $i + 1)
echo "ovs-ovn pod on node $nodeIp is $pod"
break
fi
done
done
echo "backup nb db file"
kubectl exec -it -n $KUBE_OVN_NS ${podNameArray[0]} -- ovsdb-tool cluster-to-standalone /etc/ovn/ovnnb_db_standalone.db /etc/ovn/ovnnb_db.db
# mv all db files
for pod in ${podNameArray[@]}
do
kubectl exec -it -n $KUBE_OVN_NS $pod -- mv /etc/ovn/ovnnb_db.db /tmp
kubectl exec -it -n $KUBE_OVN_NS $pod -- mv /etc/ovn/ovnsb_db.db /tmp
done
# restore db and replicas
echo "restore nb db file, operate in pod ${podNameArray[0]}"
kubectl exec -it -n $KUBE_OVN_NS ${podNameArray[0]} -- mv /etc/ovn/ovnnb_db_standalone.db /etc/ovn/ovnnb_db.db
kubectl scale deployment -n $KUBE_OVN_NS ovn-central --replicas=$replicas
echo "finish restore nb db file and ovn-central replicas"
echo "recreate ovs-ovn pods"
kubectl delete pod -n $KUBE_OVN_NS -l app=ovs
;;
*)
echo "unknown action $action"
esac
;;
sb)
case $action in
status)
kubectl exec "$OVN_SB_POD" -n $KUBE_OVN_NS -c ovn-central -- ovs-appctl -t /var/run/ovn/ovnsb_db.ctl cluster/status OVN_Southbound
kubectl exec "$OVN_SB_POD" -n $KUBE_OVN_NS -c ovn-central -- ovs-appctl -t /var/run/ovn/ovnsb_db.ctl ovsdb-server/get-db-storage-status OVN_Southbound
;;
kick)
kubectl exec "$OVN_SB_POD" -n $KUBE_OVN_NS -c ovn-central -- ovs-appctl -t /var/run/ovn/ovnsb_db.ctl cluster/kick OVN_Southbound "$1"
;;
backup)
kubectl exec "$OVN_SB_POD" -n $KUBE_OVN_NS -c ovn-central -- ovsdb-tool cluster-to-standalone /etc/ovn/ovnsb_db.$suffix.backup /etc/ovn/ovnsb_db.db
kubectl cp $KUBE_OVN_NS/$OVN_SB_POD:/etc/ovn/ovnsb_db.$suffix.backup $(pwd)/ovnsb_db.$suffix.backup
kubectl exec "$OVN_SB_POD" -n $KUBE_OVN_NS -c ovn-central -- rm -f /etc/ovn/ovnsb_db.$suffix.backup
echo "backup ovn-$component db to $(pwd)/ovnsb_db.$suffix.backup"
;;
dbstatus)
kubectl exec "$OVN_NB_POD" -n $KUBE_OVN_NS -c ovn-central -- ovn-appctl -t /var/run/ovn/ovnsb_db.ctl ovsdb-server/get-db-storage-status OVN_Southbound
;;
restore)
echo "restore cmd is only used for nb db"
;;
*)
echo "unknown action $action"
esac
;;
*)
echo "unknown subcommand $component"
esac
}
tuning(){
action="$1"; shift
sys="$1"; shift
case $action in
install-fastpath)
case $sys in
centos7)
docker run -it --privileged -v /lib/modules:/lib/modules -v /usr/src:/usr/src -v /tmp/:/tmp/ $REGISTRY/centos7-compile:"$KUBE_OVN_VERSION" bash -c "./module.sh centos install"
while [ ! -f /tmp/kube_ovn_fastpath.ko ];
do
sleep 1
done
for i in $(kubectl -n kube-system get pods | grep ovn-cni | awk '{print $1}');
do
kubectl cp /tmp/kube_ovn_fastpath.ko kube-system/"$i":/tmp/
done
;;
centos8)
docker run -it --privileged -v /lib/modules:/lib/modules -v /usr/src:/usr/src -v /tmp/:/tmp/ $REGISTRY/centos8-compile:"$KUBE_OVN_VERSION" bash -c "./module.sh centos install"
while [ ! -f /tmp/kube_ovn_fastpath.ko ];
do
sleep 1
done
for i in $(kubectl -n kube-system get pods | grep ovn-cni | awk '{print $1}');
do
kubectl cp /tmp/kube_ovn_fastpath.ko kube-system/"$i":/tmp/
done
;;
*)
echo "unknown system $sys"
esac
;;
local-install-fastpath)
case $sys in
centos7)
# shellcheck disable=SC2145
docker run -it --privileged -v /lib/modules:/lib/modules -v /usr/src:/usr/src -v /tmp:/tmp $REGISTRY/centos7-compile:"$KUBE_OVN_VERSION" bash -c "./module.sh centos local-install $@"
for i in $(kubectl -n kube-system get pods | grep ovn-cni | awk '{print $1}');
do
kubectl cp /tmp/kube_ovn_fastpath.ko kube-system/"$i":/tmp/
done
;;
centos8)
# shellcheck disable=SC2145
docker run -it --privileged -v /lib/modules:/lib/modules -v /usr/src:/usr/src -v /tmp:/tmp $REGISTRY/centos8-compile:"$KUBE_OVN_VERSION" bash -c "./module.sh centos local-install $@"
for i in $(kubectl -n kube-system get pods | grep ovn-cni | awk '{print $1}');
do
kubectl cp /tmp/kube_ovn_fastpath.ko kube-system/"$i":/tmp/
done
;;
*)
echo "unknown system $sys"
esac
;;
remove-fastpath)
case $sys in
centos)
for i in $(kubectl -n kube-system get pods | grep ovn-cni | awk '{print $1}');
do
kubectl -n kube-system exec "$i" -- rm -f /tmp/kube_ovn_fastpath.ko
done
;;
*)
echo "unknown system $sys"
esac
;;
install-stt)
case $sys in
centos7)
# shellcheck disable=SC2145
docker run -it --privileged -v /lib/modules:/lib/modules -v /usr/src:/usr/src -v /tmp:/tmp $REGISTRY/centos7-compile:"$KUBE_OVN_VERSION" bash -c "./module.sh stt install"
for i in $(kubectl -n kube-system get pods | grep ovn-cni | awk '{print $1}');
do
for k in /tmp/*.rpm; do
kubectl cp "$k" kube-system/"$i":/tmp/
done
done
;;
centos8)
# shellcheck disable=SC2145
docker run -it --privileged -v /lib/modules:/lib/modules -v /usr/src:/usr/src -v /tmp:/tmp $REGISTRY/centos8-compile:"$KUBE_OVN_VERSION" bash -c "./module.sh stt install"
for i in $(kubectl -n kube-system get pods | grep ovn-cni | awk '{print $1}');
do
for k in /tmp/*.rpm; do
kubectl cp "$k" kube-system/"$i":/tmp/
done
done
;;
*)
echo "unknown system $sys"
esac
;;
local-install-stt)
case $sys in
centos7)
# shellcheck disable=SC2145
docker run -it --privileged -v /lib/modules:/lib/modules -v /usr/src:/usr/src -v /tmp:/tmp $REGISTRY/centos7-compile:"$KUBE_OVN_VERSION" bash -c "./module.sh stt local-install $@"
for i in $(kubectl -n kube-system get pods | grep ovn-cni | awk '{print $1}');
do
for k in /tmp/*.rpm; do
kubectl cp "$k" kube-system/"$i":/tmp/
done
done
;;
centos8)
# shellcheck disable=SC2145
docker run -it --privileged -v /lib/modules:/lib/modules -v /usr/src:/usr/src -v /tmp:/tmp $REGISTRY/centos8-compile:"$KUBE_OVN_VERSION" bash -c "./module.sh stt local-install $@"
for i in $(kubectl -n kube-system get pods | grep ovn-cni | awk '{print $1}');
do
for k in /tmp/*.rpm; do
kubectl cp "$k" kube-system/"$i":/tmp/
done
done
;;
*)
echo "unknown system $sys"
esac
;;
remove-stt)
case $sys in
centos)
for i in $(kubectl -n kube-system get pods | grep ovn-cni | awk '{print $1}');
do
kubectl -n kube-system exec "$i" -- rm -f /tmp/openvswitch-kmod*.rpm
done
;;
*)
echo "unknown system $sys"
esac
;;
*)
echo "unknown action $action"
esac
}
reload(){
kubectl delete pod -n kube-system -l app=ovn-central
kubectl rollout status deployment/ovn-central -n kube-system
kubectl delete pod -n kube-system -l app=ovs
kubectl delete pod -n kube-system -l app=kube-ovn-controller
kubectl rollout status deployment/kube-ovn-controller -n kube-system
kubectl delete pod -n kube-system -l app=kube-ovn-cni
kubectl rollout status daemonset/kube-ovn-cni -n kube-system
kubectl delete pod -n kube-system -l app=kube-ovn-pinger
kubectl rollout status daemonset/kube-ovn-pinger -n kube-system
kubectl delete pod -n kube-system -l app=kube-ovn-monitor
kubectl rollout status deployment/kube-ovn-monitor -n kube-system
}
env-check(){
set +e
KUBE_OVN_NS=kube-system
podNames=$(kubectl get pod --no-headers -n $KUBE_OVN_NS | grep kube-ovn-cni | awk '{print $1}')
for pod in $podNames
do
nodeName=$(kubectl get pod $pod -n $KUBE_OVN_NS -o jsonpath={.spec.nodeName})
echo "************************************************"
echo "Start environment check for Node $nodeName"
echo "************************************************"
kubectl exec -it -n $KUBE_OVN_NS $pod -c cni-server -- bash /kube-ovn/env-check.sh
done
}
if [ $# -lt 1 ]; then
showHelp
exit 0
else
subcommand="$1"; shift
fi
getOvnCentralPod
case $subcommand in
nbctl)
kubectl exec "$OVN_NB_POD" -n $KUBE_OVN_NS -- ovn-nbctl "$@"
kubectl exec "$OVN_NB_POD" -n $KUBE_OVN_NS -c ovn-central -- ovn-nbctl "$@"
;;
sbctl)
kubectl exec "$OVN_SB_POD" -n $KUBE_OVN_NS -- ovn-sbctl "$@"
kubectl exec "$OVN_SB_POD" -n $KUBE_OVN_NS -c ovn-central -- ovn-sbctl "$@"
;;
vsctl)
vsctl "$@"
vsctl|ofctl|dpctl|appctl)
xxctl "$subcommand" "$@"
;;
nb|sb)
dbtool "$subcommand" "$@"
;;
tcpdump)
tcpdump "$@"
@ -274,8 +869,17 @@ case $subcommand in
diagnose)
diagnose "$@"
;;
reload)
reload
;;
tuning)
tuning "$@"
;;
env-check)
env-check
;;
*)
showHelp
showHelp
;;
esac
`)))

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff