Merge pull request #1892 from pixiake/master

feat: Support for enabling kube-apiserver auditing
This commit is contained in:
KubeSphere CI Bot 2023-06-29 14:41:44 +08:00 committed by GitHub
commit 4d32a970fd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 340 additions and 57 deletions

View File

@ -127,7 +127,10 @@ func (cfg *ClusterSpec) GenerateCertSANs() []string {
extraCertSANs := make([]string, 0)
extraCertSANs = append(extraCertSANs, cfg.ControlPlaneEndpoint.Domain)
extraCertSANs = append(extraCertSANs, cfg.ControlPlaneEndpoint.Address)
if cfg.ControlPlaneEndpoint.Address != "" {
extraCertSANs = append(extraCertSANs, cfg.ControlPlaneEndpoint.Address)
}
for _, host := range cfg.Hosts {
extraCertSANs = append(extraCertSANs, host.Name)

View File

@ -184,16 +184,11 @@ func SetDefaultHostsCfg(cfg *ClusterSpec) []HostCfg {
}
func SetDefaultLBCfg(cfg *ClusterSpec, masterGroup []*KubeHost) ControlPlaneEndpoint {
//The detection is not an HA environment, and the address at LB does not need input
if len(masterGroup) == 1 && cfg.ControlPlaneEndpoint.Address != "" {
fmt.Println("When the environment is not HA, the LB address does not need to be entered, so delete the corresponding value.")
os.Exit(0)
}
//Check whether LB should be configured
if len(masterGroup) >= 3 && !cfg.ControlPlaneEndpoint.IsInternalLBEnabled() && cfg.ControlPlaneEndpoint.Address == "" {
fmt.Println("When the environment has at least three masters, You must set the value of the LB address or enable the internal loadbalancer.")
os.Exit(0)
if len(masterGroup) >= 2 && !cfg.ControlPlaneEndpoint.IsInternalLBEnabled() && cfg.ControlPlaneEndpoint.Address == "" {
fmt.Println()
fmt.Println("Warning: When there are at least two nodes in the control-plane, you should set the value of the LB address or enable the internal loadbalancer, if the 'ControlPlaneEndpoint.Domain' cannot be resolved in your dns server.")
fmt.Println()
}
// Check whether LB address and the internal LB are both enabled
@ -202,7 +197,7 @@ func SetDefaultLBCfg(cfg *ClusterSpec, masterGroup []*KubeHost) ControlPlaneEndp
os.Exit(0)
}
if cfg.ControlPlaneEndpoint.Address == "" || cfg.ControlPlaneEndpoint.Address == "127.0.0.1" {
if cfg.ControlPlaneEndpoint.Address == "127.0.0.1" {
cfg.ControlPlaneEndpoint.Address = masterGroup[0].InternalAddress
}
if cfg.ControlPlaneEndpoint.Domain == "" {

View File

@ -46,6 +46,7 @@ type Kubernetes struct {
FeatureGates map[string]bool `yaml:"featureGates" json:"featureGates,omitempty"`
KubeletConfiguration runtime.RawExtension `yaml:"kubeletConfiguration" json:"kubeletConfiguration,omitempty"`
KubeProxyConfiguration runtime.RawExtension `yaml:"kubeProxyConfiguration" json:"kubeProxyConfiguration,omitempty"`
Audit Audit `yaml:"audit" json:"audit,omitempty"`
}
// Kata contains the configuration for the kata in cluster
@ -58,6 +59,11 @@ type NodeFeatureDiscovery struct {
Enabled *bool `yaml:"enabled" json:"enabled,omitempty"`
}
// Audit contains the configuration for the kube-apiserver audit in cluster
type Audit struct {
Enabled *bool `yaml:"enabled" json:"enabled,omitempty"`
}
// EnableNodelocaldns is used to determine whether to deploy nodelocaldns.
func (k *Kubernetes) EnableNodelocaldns() bool {
if k.Nodelocaldns == nil {
@ -82,9 +88,18 @@ func (k *Kubernetes) EnableNodeFeatureDiscovery() bool {
return *k.NodeFeatureDiscovery.Enabled
}
// EnableAutoRenewCerts is used to determine whether to enable AutoRenewCerts.
func (k *Kubernetes) EnableAutoRenewCerts() bool {
if k.AutoRenewCerts == nil {
return false
}
return *k.AutoRenewCerts
}
// EnableAudit is used to determine whether to enable kube-apiserver audit.
func (k *Kubernetes) EnableAudit() bool {
if k.Audit.Enabled == nil {
return false
}
return *k.AutoRenewCerts
}

View File

@ -18,6 +18,7 @@ package templates
import (
"fmt"
"github.com/kubesphere/kubekey/v3/cmd/kk/pkg/bootstrap/registry"
"text/template"
"github.com/lithammer/dedent"
@ -63,41 +64,41 @@ echo 'net.bridge.bridge-nf-call-arptables = 1' >> /etc/sysctl.conf
echo 'net.bridge.bridge-nf-call-ip6tables = 1' >> /etc/sysctl.conf
echo 'net.bridge.bridge-nf-call-iptables = 1' >> /etc/sysctl.conf
echo 'net.ipv4.ip_local_reserved_ports = 30000-32767' >> /etc/sysctl.conf
echo 'vm.max_map_count = 262144' >> /etc/sysctl.conf
echo 'vm.swappiness = 0' >> /etc/sysctl.conf
echo 'fs.inotify.max_user_instances = 524288' >> /etc/sysctl.conf
echo 'kernel.pid_max = 65535' >> /etc/sysctl.conf
echo 'net.ipv4.tcp_tw_recycle = 0' >> /etc/sysctl.conf
echo 'net.ipv4.tcp_tw_reuse = 0' >> /etc/sysctl.conf
echo 'net.ipv4.conf.all.rp_filter = 1' >> /etc/sysctl.conf
echo 'net.ipv4.conf.default.rp_filter = 1' >> /etc/sysctl.conf
echo 'vm.overcommit_memory = 1' >> /etc/sysctl.conf
echo 'fs.inotify.max_user_watches = 524288' >> /etc/sysctl.conf
echo 'fs.pipe-max-size = 4194304' >> /etc/sysctl.conf
echo 'net.core.netdev_max_backlog = 65535' >> /etc/sysctl.conf
echo 'net.core.rmem_max = 33554432' >> /etc/sysctl.conf
echo 'net.core.wmem_max = 33554432' >> /etc/sysctl.conf
echo 'net.core.somaxconn = 32768' >> /etc/sysctl.conf
echo 'net.ipv4.tcp_max_syn_backlog = 1048576' >> /etc/sysctl.conf
echo 'net.ipv4.neigh.default.gc_thresh1 = 512' >> /etc/sysctl.conf
echo 'net.ipv4.neigh.default.gc_thresh2 = 2048' >> /etc/sysctl.conf
echo 'net.ipv4.neigh.default.gc_thresh3 = 4096' >> /etc/sysctl.conf
echo 'net.core.somaxconn = 32768' >> /etc/sysctl.conf
echo 'net.ipv4.conf.eth0.arp_accept = 1' >> /etc/sysctl.conf
echo 'fs.aio-max-nr = 262144' >> /etc/sysctl.conf
echo 'net.ipv4.tcp_retries2 = 15' >> /etc/sysctl.conf
echo 'net.ipv4.tcp_max_tw_buckets = 1048576' >> /etc/sysctl.conf
echo 'net.ipv4.tcp_max_orphans = 65535' >> /etc/sysctl.conf
echo 'net.ipv4.udp_rmem_min = 131072' >> /etc/sysctl.conf
echo 'net.ipv4.udp_wmem_min = 131072' >> /etc/sysctl.conf
echo 'net.ipv4.conf.all.rp_filter = 1' >> /etc/sysctl.conf
echo 'net.ipv4.conf.default.rp_filter = 1' >> /etc/sysctl.conf
echo 'net.ipv4.conf.all.arp_accept = 1' >> /etc/sysctl.conf
echo 'net.ipv4.conf.default.arp_accept = 1' >> /etc/sysctl.conf
echo 'net.ipv4.conf.all.arp_ignore = 1' >> /etc/sysctl.conf
echo 'net.ipv4.conf.default.arp_ignore = 1' >> /etc/sysctl.conf
echo 'vm.max_map_count = 262144' >> /etc/sysctl.conf
echo 'vm.swappiness = 0' >> /etc/sysctl.conf
echo 'vm.overcommit_memory = 1' >> /etc/sysctl.conf
echo 'fs.inotify.max_user_instances = 524288' >> /etc/sysctl.conf
echo 'fs.inotify.max_user_watches = 524288' >> /etc/sysctl.conf
echo 'fs.pipe-max-size = 4194304' >> /etc/sysctl.conf
echo 'fs.aio-max-nr = 262144' >> /etc/sysctl.conf
echo 'kernel.pid_max = 65535' >> /etc/sysctl.conf
echo 'kernel.watchdog_thresh = 5' >> /etc/sysctl.conf
echo 'kernel.hung_task_timeout_secs = 5' >> /etc/sysctl.conf
#See https://help.aliyun.com/document_detail/118806.html#uicontrol-e50-ddj-w0y
sed -r -i "s@#{0,}?net.ipv4.tcp_tw_recycle ?= ?(0|1|2)@net.ipv4.tcp_tw_recycle = 0@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.tcp_tw_reuse ?= ?(0|1)@net.ipv4.tcp_tw_reuse = 0@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.conf.all.rp_filter ?= ?(0|1|2)@net.ipv4.conf.all.rp_filter = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.conf.default.rp_filter ?= ?(0|1|2)@net.ipv4.conf.default.rp_filter = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.ip_forward ?= ?(0|1)@net.ipv4.ip_forward = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.bridge.bridge-nf-call-arptables ?= ?(0|1)@net.bridge.bridge-nf-call-arptables = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.bridge.bridge-nf-call-ip6tables ?= ?(0|1)@net.bridge.bridge-nf-call-ip6tables = 1@g" /etc/sysctl.conf
@ -127,6 +128,12 @@ sed -r -i "s@#{0,}?net.ipv4.udp_rmem_min ?= ?([0-9]{1,})@net.ipv4.udp_rmem_min
sed -r -i "s@#{0,}?net.ipv4.udp_wmem_min ?= ?([0-9]{1,})@net.ipv4.udp_wmem_min = 131072@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.conf.all.arp_ignore ?= ??(0|1|2)@net.ipv4.conf.all.arp_ignore = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.conf.default.arp_ignore ?= ??(0|1|2)@net.ipv4.conf.default.arp_ignore = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?kernel.watchdog_thresh ?= ?([0-9]{1,})@kernel.watchdog_thresh = 5@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?kernel.hung_task_timeout_secs ?= ?([0-9]{1,})@kernel.hung_task_timeout_secs = 5@g" /etc/sysctl.conf
tmpfile="$$.tmp"
awk ' !x[$0]++{print > "'$tmpfile'"}' /etc/sysctl.conf
mv $tmpfile /etc/sysctl.conf
# ulimit
echo "* soft nofile 1048576" >> /etc/security/limits.conf
@ -137,21 +144,15 @@ echo "* soft memlock unlimited" >> /etc/security/limits.conf
echo "* hard memlock unlimited" >> /etc/security/limits.conf
sed -r -i "s@#{0,}?\* soft nofile ?([0-9]{1,})@\* soft nofile 1048576@g" /etc/security/limits.conf
sed -r -i "s@#{0,}?\* hard nofile ?([0-9]{1,})@\* soft nofile 1048576@g" /etc/security/limits.conf
sed -r -i "s@#{0,}?\* soft nproc ?([0-9]{1,})@\* soft nofile 65536@g" /etc/security/limits.conf
sed -r -i "s@#{0,}?\* hard nproc ?([0-9]{1,})@\* soft nofile 65536@g" /etc/security/limits.conf
sed -r -i "s@#{0,}?\* hard nofile ?([0-9]{1,})@\* hard nofile 1048576@g" /etc/security/limits.conf
sed -r -i "s@#{0,}?\* soft nproc ?([0-9]{1,})@\* soft nproc 65536@g" /etc/security/limits.conf
sed -r -i "s@#{0,}?\* hard nproc ?([0-9]{1,})@\* hard nproc 65536@g" /etc/security/limits.conf
sed -r -i "s@#{0,}?\* soft memlock ?([0-9]{1,}([TGKM]B){0,1}|unlimited)@\* soft memlock unlimited@g" /etc/security/limits.conf
sed -r -i "s@#{0,}?\* hard memlock ?([0-9]{1,}([TGKM]B){0,1}|unlimited)@\* hard memlock unlimited@g" /etc/security/limits.conf
# kernel
echo never > /sys/kernel/mm/transparent_hugepage/enabled
echo never > /sys/kernel/mm/transparent_hugepage/defrag
echo 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' >> /etc/rc.local
echo 'echo never > /sys/kernel/mm/transparent_hugepage/defrag' >> /etc/rc.local
tmpfile="$$.tmp"
awk ' !x[$0]++{print > "'$tmpfile'"}' /etc/sysctl.conf
mv $tmpfile /etc/sysctl.conf
awk ' !x[$0]++{print > "'$tmpfile'"}' /etc/security/limits.conf
mv $tmpfile /etc/security/limits.conf
systemctl stop firewalld 1>/dev/null 2>/dev/null
systemctl disable firewalld 1>/dev/null 2>/dev/null
@ -203,6 +204,7 @@ cat >>/etc/hosts<<EOF
# kubekey hosts END
EOF
sync
echo 3 > /proc/sys/vm/drop_caches
# Make sure the iptables utility doesn't use the nftables backend.
@ -211,9 +213,6 @@ update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy >/dev/null 2>&1 |
update-alternatives --set arptables /usr/sbin/arptables-legacy >/dev/null 2>&1 || true
update-alternatives --set ebtables /usr/sbin/ebtables-legacy >/dev/null 2>&1 || true
ulimit -u 65535
ulimit -n 65535
`)))
func GenerateHosts(runtime connector.ModuleRuntime, kubeConf *common.KubeConf) []string {
@ -222,8 +221,6 @@ func GenerateHosts(runtime connector.ModuleRuntime, kubeConf *common.KubeConf) [
if kubeConf.Cluster.ControlPlaneEndpoint.Address != "" {
lbHost = fmt.Sprintf("%s %s", kubeConf.Cluster.ControlPlaneEndpoint.Address, kubeConf.Cluster.ControlPlaneEndpoint.Domain)
} else {
lbHost = fmt.Sprintf("%s %s", runtime.GetHostsByRole(common.Master)[0].GetInternalAddress(), kubeConf.Cluster.ControlPlaneEndpoint.Domain)
}
for _, host := range runtime.GetAllHosts() {
@ -237,7 +234,12 @@ func GenerateHosts(runtime connector.ModuleRuntime, kubeConf *common.KubeConf) [
}
if len(runtime.GetHostsByRole(common.Registry)) > 0 {
hostsList = append(hostsList, fmt.Sprintf("%s %s", runtime.GetHostsByRole(common.Registry)[0].GetInternalAddress(), kubeConf.Cluster.Registry.PrivateRegistry))
if kubeConf.Cluster.Registry.PrivateRegistry != "" {
hostsList = append(hostsList, fmt.Sprintf("%s %s", runtime.GetHostsByRole(common.Registry)[0].GetInternalAddress(), kubeConf.Cluster.Registry.PrivateRegistry))
} else {
hostsList = append(hostsList, fmt.Sprintf("%s %s", runtime.GetHostsByRole(common.Registry)[0].GetInternalAddress(), registry.RegistryCertificateBaseName))
}
}
hostsList = append(hostsList, lbHost)

View File

@ -128,3 +128,11 @@ func (e *EnableKubeProxy) PreCheck(_ connector.Runtime) (bool, error) {
}
return false, nil
}
type EnableAudit struct {
KubePrepare
}
func (e *EnableAudit) PreCheck(_ connector.Runtime) (bool, error) {
return e.KubeConf.Cluster.Kubernetes.EnableAudit(), nil
}

View File

@ -113,6 +113,7 @@ func (d *DefaultLoader) Load() (*kubekeyapiv1alpha2.Cluster, error) {
Worker: {hostname},
Registry: {hostname},
}
allInOne.Spec.ControlPlaneEndpoint.Address = "127.0.0.1"
if ver := normalizedBuildVersion(d.KubernetesVersion); ver != "" {
s := strings.Split(ver, "-")
if len(s) > 1 {

View File

@ -149,6 +149,40 @@ func (i *InitKubernetesModule) Init() {
Parallel: true,
}
generateAuditPolicy := &task.RemoteTask{
Name: "GenerateAduitPolicy",
Desc: "Generate audit policy",
Hosts: i.Runtime.GetHostsByRole(common.Master),
Prepare: &prepare.PrepareCollection{
new(common.EnableAudit),
new(common.OnlyFirstMaster),
&ClusterIsExist{Not: true},
},
Action: &action.Template{
Template: templates.AuditPolicy,
Dst: filepath.Join("/etc/kubernetes/audit", templates.AuditPolicy.Name()),
},
Parallel: true,
Retry: 2,
}
generateAuditWebhook := &task.RemoteTask{
Name: "GenerateAduitWebhook",
Desc: "Generate audit webhook",
Hosts: i.Runtime.GetHostsByRole(common.Master),
Prepare: &prepare.PrepareCollection{
new(common.EnableAudit),
new(common.OnlyFirstMaster),
&ClusterIsExist{Not: true},
},
Action: &action.Template{
Template: templates.AuditWebhook,
Dst: filepath.Join("/etc/kubernetes/audit", templates.AuditWebhook.Name()),
},
Parallel: true,
Retry: 2,
}
kubeadmInit := &task.RemoteTask{
Name: "KubeadmInit",
Desc: "Init cluster using kubeadm",
@ -190,6 +224,8 @@ func (i *InitKubernetesModule) Init() {
i.Tasks = []task.Interface{
generateKubeadmConfig,
generateAuditPolicy,
generateAuditWebhook,
kubeadmInit,
copyKubeConfig,
removeMasterTaint,
@ -220,6 +256,38 @@ func (j *JoinNodesModule) Init() {
Parallel: true,
}
generateAuditPolicy := &task.RemoteTask{
Name: "GenerateAduitPolicy",
Desc: "Generate audit policy",
Hosts: j.Runtime.GetHostsByRole(common.Master),
Prepare: &prepare.PrepareCollection{
new(common.EnableAudit),
&NodeInCluster{Not: true},
},
Action: &action.Template{
Template: templates.AuditPolicy,
Dst: filepath.Join("/etc/kubernetes/audit", templates.AuditPolicy.Name()),
},
Parallel: true,
Retry: 2,
}
generateAuditWebhook := &task.RemoteTask{
Name: "GenerateAduitWebhook",
Desc: "Generate audit webhook",
Hosts: j.Runtime.GetHostsByRole(common.Master),
Prepare: &prepare.PrepareCollection{
new(common.EnableAudit),
&NodeInCluster{Not: true},
},
Action: &action.Template{
Template: templates.AuditWebhook,
Dst: filepath.Join("/etc/kubernetes/audit", templates.AuditWebhook.Name()),
},
Parallel: true,
Retry: 2,
}
joinMasterNode := &task.RemoteTask{
Name: "JoinControlPlaneNode",
Desc: "Join control-plane node",
@ -281,6 +349,8 @@ func (j *JoinNodesModule) Init() {
j.Tasks = []task.Interface{
generateKubeadmConfig,
generateAuditPolicy,
generateAuditWebhook,
joinMasterNode,
joinWorkerNode,
copyKubeConfig,

View File

@ -252,7 +252,7 @@ func (g *GenerateKubeadmConfig) Execute(runtime connector.Runtime) error {
}
}
_, ApiServerArgs := util.GetArgs(v1beta2.GetApiServerArgs(g.WithSecurityEnhancement), g.KubeConf.Cluster.Kubernetes.ApiServerArgs)
_, ApiServerArgs := util.GetArgs(v1beta2.GetApiServerArgs(g.WithSecurityEnhancement, g.KubeConf.Cluster.Kubernetes.EnableAudit()), g.KubeConf.Cluster.Kubernetes.ApiServerArgs)
_, ControllerManagerArgs := util.GetArgs(v1beta2.GetControllermanagerArgs(g.KubeConf.Cluster.Kubernetes.Version, g.WithSecurityEnhancement), g.KubeConf.Cluster.Kubernetes.ControllerManagerArgs)
_, SchedulerArgs := util.GetArgs(v1beta2.GetSchedulerArgs(g.WithSecurityEnhancement), g.KubeConf.Cluster.Kubernetes.SchedulerArgs)
@ -300,6 +300,7 @@ func (g *GenerateKubeadmConfig) Execute(runtime connector.Runtime) error {
"NodeCidrMaskSize": g.KubeConf.Cluster.Kubernetes.NodeCidrMaskSize,
"CriSock": g.KubeConf.Cluster.Kubernetes.ContainerRuntimeEndpoint,
"ApiServerArgs": v1beta2.UpdateFeatureGatesConfiguration(ApiServerArgs, g.KubeConf),
"EnableAudit": g.KubeConf.Cluster.Kubernetes.EnableAudit(),
"ControllerManagerArgs": v1beta2.UpdateFeatureGatesConfiguration(ControllerManagerArgs, g.KubeConf),
"SchedulerArgs": v1beta2.UpdateFeatureGatesConfiguration(SchedulerArgs, g.KubeConf),
"KubeletConfiguration": v1beta2.GetKubeletConfiguration(runtime, g.KubeConf, g.KubeConf.Cluster.Kubernetes.ContainerRuntimeEndpoint, g.WithSecurityEnhancement),
@ -923,7 +924,7 @@ func (s *SaveKubeConfig) Execute(runtime connector.Runtime) error {
clusterPublicAddress := s.KubeConf.Cluster.ControlPlaneEndpoint.Address
master1 := runtime.GetHostsByRole(common.Master)[0]
if clusterPublicAddress == master1.GetInternalAddress() {
if clusterPublicAddress == master1.GetInternalAddress() || clusterPublicAddress == "" {
clusterPublicAddress = master1.GetAddress()
}

View File

@ -0,0 +1,168 @@
/*
Copyright 2021 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package templates
import (
"github.com/lithammer/dedent"
"text/template"
)
// AuditPolicy defines the template of kube-apiserver audit-policy.
var AuditPolicy = template.Must(template.New("audit-policy.yaml").Parse(
dedent.Dedent(`apiVersion: audit.k8s.io/v1
kind: Policy
rules:
# The following requests were manually identified as high-volume and low-risk,
# so drop them.
- level: None
users: ["system:kube-proxy"]
verbs: ["watch"]
resources:
- group: "" # core
resources: ["endpoints", "services", "services/status"]
- level: None
users: ["system:unsecured"]
namespaces: ["kube-system"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["configmaps"]
- level: None
users: ["kubelet"] # legacy kubelet identity
verbs: ["get"]
resources:
- group: "" # core
resources: ["nodes", "nodes/status"]
- level: None
userGroups: ["system:nodes"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["nodes", "nodes/status"]
- level: None
users:
- system:kube-controller-manager
- system:kube-scheduler
- system:serviceaccount:kube-system:endpoint-controller
verbs: ["get", "update"]
namespaces: ["kube-system"]
resources:
- group: "" # core
resources: ["endpoints"]
- level: None
users: ["system:apiserver"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["namespaces", "namespaces/status", "namespaces/finalize"]
# Don't log HPA fetching metrics.
- level: None
users:
- system:kube-controller-manager
verbs: ["get", "list"]
resources:
- group: "metrics.k8s.io"
# Don't log these read-only URLs.
- level: None
nonResourceURLs:
- /healthz*
- /version
- /swagger*
# Don't log events requests.
- level: None
resources:
- group: "" # core
resources: ["events"]
# Secrets, ConfigMaps, TokenRequest and TokenReviews can contain sensitive & binary data,
# so only log at the Metadata level.
- level: Metadata
resources:
- group: "" # core
resources: ["secrets", "configmaps", "serviceaccounts/token"]
- group: authentication.k8s.io
resources: ["tokenreviews"]
omitStages:
- "RequestReceived"
# Get responses can be large; skip them.
- level: Request
verbs: ["get", "list", "watch"]
resources:
- group: "" # core
- group: "admissionregistration.k8s.io"
- group: "apiextensions.k8s.io"
- group: "apiregistration.k8s.io"
- group: "apps"
- group: "authentication.k8s.io"
- group: "authorization.k8s.io"
- group: "autoscaling"
- group: "batch"
- group: "certificates.k8s.io"
- group: "extensions"
- group: "metrics.k8s.io"
- group: "networking.k8s.io"
- group: "policy"
- group: "rbac.authorization.k8s.io"
- group: "settings.k8s.io"
- group: "storage.k8s.io"
omitStages:
- "RequestReceived"
# Default level for known APIs
- level: RequestResponse
resources:
- group: "" # core
- group: "admissionregistration.k8s.io"
- group: "apiextensions.k8s.io"
- group: "apiregistration.k8s.io"
- group: "apps"
- group: "authentication.k8s.io"
- group: "authorization.k8s.io"
- group: "autoscaling"
- group: "batch"
- group: "certificates.k8s.io"
- group: "extensions"
- group: "metrics.k8s.io"
- group: "networking.k8s.io"
- group: "policy"
- group: "rbac.authorization.k8s.io"
- group: "settings.k8s.io"
- group: "storage.k8s.io"
omitStages:
- "RequestReceived"
# Default level for all other requests.
- level: Metadata
omitStages:
- "RequestReceived"
`)))
// AuditWebhook defines the template of kube-apiserver audit-webhook.
var AuditWebhook = template.Must(template.New("audit-webhook.yaml").Parse(
dedent.Dedent(`apiVersion: v1
kind: Config
clusters:
- name: kube-auditing
cluster:
server: https://SHOULD_BE_REPLACED:6443/audit/webhook/event
insecure-skip-tls-verify: true
contexts:
- context:
cluster: kube-auditing
user: ""
name: default-context
current-context: default-context
preferences: {}
users: []
`)))

View File

@ -86,6 +86,13 @@ apiServer:
{{- range .CertSANs }}
- "{{ . }}"
{{- end }}
{{- if .EnableAudit }}
extraVolumes:
- name: k8s-audit
hostPath: /etc/kubernetes/audit
mountPath: /etc/kubernetes/audit
pathType: DirectoryOrCreate
{{- end }}
controllerManager:
extraArgs:
node-cidr-mask-size: "{{ .NodeCidrMaskSize }}"
@ -165,17 +172,11 @@ var (
}
ApiServerArgs = map[string]string{
"bind-address": "0.0.0.0",
"audit-log-maxage": "30",
"audit-log-maxbackup": "10",
"audit-log-maxsize": "100",
"bind-address": "0.0.0.0",
}
ApiServerSecurityArgs = map[string]string{
"bind-address": "0.0.0.0",
"audit-log-maxage": "30",
"audit-log-maxbackup": "10",
"audit-log-maxsize": "100",
"authorization-mode": "Node,RBAC",
"bind-address": "0.0.0.0",
"authorization-mode": "Node,RBAC",
// --enable-admission-plugins=EventRateLimit must have a configuration file
"enable-admission-plugins": "AlwaysPullImages,ServiceAccount,NamespaceLifecycle,NodeRestriction,LimitRanger,ResourceQuota,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,PodNodeSelector,PodSecurity",
// "audit-log-path": "/var/log/apiserver/audit.log", // need audit policy
@ -185,6 +186,13 @@ var (
"tls-min-version": "VersionTLS12",
"tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
}
auditArgs = map[string]string{
"audit-log-format": "json",
"audit-log-maxbackup": "2",
"audit-log-maxsize": "200",
"audit-policy-file": "/etc/kubernetes/audit/audit-policy.yaml",
"audit-webhook-config-file": "/etc/kubernetes/audit/audit-webhook.yaml",
}
ControllermanagerArgs = map[string]string{
"bind-address": "0.0.0.0",
"cluster-signing-duration": "87600h",
@ -205,10 +213,22 @@ var (
}
)
func GetApiServerArgs(securityEnhancement bool) map[string]string {
func GetApiServerArgs(securityEnhancement bool, enableAudit bool) map[string]string {
if securityEnhancement {
if enableAudit {
for k, v := range auditArgs {
ApiServerSecurityArgs[k] = v
}
}
return ApiServerSecurityArgs
}
if enableAudit {
for k, v := range auditArgs {
ApiServerArgs[k] = v
}
}
return ApiServerArgs
}

View File

@ -44,7 +44,7 @@ K3S_VERSION=${K3S_VERSION}
CONTAINERD_VERSION=${CONTAINERD_VERSION}
RUNC_VERSION=${RUNC_VERSION}
COMPOSE_VERSION=${COMPOSE_VERSION}
CALICO_VERSION=${COMPOSE_VERSION}
CALICO_VERSION=${CALICO_VERSION}
# qsctl
QSCTL_ACCESS_KEY_ID=${QSCTL_ACCESS_KEY_ID}
@ -176,7 +176,7 @@ if [ $CALICO_VERSION ]; then
https://github.com/projectcalico/calico/releases/download/$CALICO_VERSION/calicoctl-linux-$arch
qsctl cp binaries/calicoctl/$CALICO_VERSION/$arch/calicoctl-linux-$arch \
qs://containernetworking/plugins/releases/download/$CNI_VERSION/calicoctl-linux-$arch \
qs://kubernetes-release/projectcalico/calico/releases/download/$CALICO_VERSION/calicoctl-linux-$arch \
-c qsctl-config.yaml
done