Kubernetes Secure Install

Page content

In this post I will show you how to install a Kubernetes cluster in a secure way with.

yum install -y epel-release
yum install -y nano wget

Selinux and Firewall Config

systemctl enable firewalld
systemctl start firewalld

# Check selinux enabled
sestatus

# Resoult
SELinux status:                 enabled
SELinuxfs mount:                /sys/fs/selinux
SELinux root directory:         /etc/selinux
Loaded policy name:             targeted
Current mode:                   enforcing
Mode from config file:          enforcing
Policy MLS status:              enabled
Policy deny_unknown status:     allowed
Memory protection checking:     actual (secure)
Max kernel policy version:      33
sudo firewall-cmd --add-port=9345/tcp --permanent
sudo firewall-cmd --add-port=6443/tcp --permanent
sudo firewall-cmd --add-port=10250/tcp --permanent
sudo firewall-cmd --add-port=2379/tcp --permanent
sudo firewall-cmd --add-port=2380/tcp --permanent
sudo firewall-cmd --add-port=30000-32767/tcp --permanent
# Used for the Monitoring
sudo firewall-cmd --add-port=9796/tcp --permanent
sudo firewall-cmd --add-port=19090/tcp --permanent
sudo firewall-cmd --add-port=6942/tcp --permanent
sudo firewall-cmd --add-port=9091/tcp --permanent
### CNI specific ports Cilium
# 4244/TCP is required when the Hubble Relay is enabled and therefore needs to connect to all agents to collect the flows
sudo firewall-cmd --add-port=4244/tcp --permanent
# Cilium healthcheck related permits:
sudo firewall-cmd --add-port=4240/tcp --permanent
sudo firewall-cmd --remove-icmp-block=echo-request --permanent
sudo firewall-cmd --remove-icmp-block=echo-reply --permanent
# Since we are using Cilium with GENEVE as overlay, we need the following port too:
sudo firewall-cmd --add-port=6081/udp --permanent
### Ingress Controller specific ports
sudo firewall-cmd --add-port=80/tcp --permanent
sudo firewall-cmd --add-port=443/tcp --permanent
### To get DNS resolution working, simply enable Masquerading.
sudo firewall-cmd --zone=public  --add-masquerade --permanent

### Finally apply all the firewall changes
sudo firewall-cmd --reload

Verification:

sudo firewall-cmd --list-all
public (active)
  target: default
  icmp-block-inversion: no
  interfaces: eno1
  sources: 
  services: cockpit dhcpv6-client ssh wireguard
  ports: 9345/tcp 6443/tcp 10250/tcp 2379/tcp 2380/tcp 30000-32767/tcp 4240/tcp 6081/udp 80/tcp 443/tcp 4244/tcp 9796/tcp 19090/tcp 6942/tcp 9091/tcp
  protocols: 
  masquerade: yes
  forward-ports: 
  source-ports: 
  icmp-blocks: 
  rich rules: 

Linux Configurations

Enable cgroupV2

sudo dnf install -y grubby
sudo grubby \
  --update-kernel=ALL \
  --args="systemd.unified_cgroup_hierarchy=1"

cat << EOF >> /etc/systemd/system.conf
DefaultCPUAccounting=yes
DefaultIOAccounting=yes
DefaultIPAccounting=yes
DefaultBlockIOAccounting=yes
EOF

init 6
# check for type cgroup2
$ mount -l|grep cgroup
cgroup2 on /sys/fs/cgroup type cgroup2 (rw,nosuid,nodev,noexec,relatime,seclabel,nsdelegate)

# check for cpu controller
$ cat /sys/fs/cgroup/cgroup.subtree_control
cpu io memory pids
cat <<EOF | sudo tee /etc/modules-load.d/crio.conf
overlay
br_netfilter
EOF

sudo modprobe overlay
sudo modprobe br_netfilter
cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables  = 1
net.ipv4.ip_forward                 = 1
#
# protectKernelDefaults
#
kernel.keys.root_maxbytes           = 25000000
kernel.keys.root_maxkeys            = 1000000
kernel.panic                        = 10
kernel.panic_on_oops                = 1
vm.overcommit_memory                = 1
vm.panic_on_oom                     = 0
EOF

sysctl --system

Ensure the eBFP filesystem is mounted (which should already be the case on RHEL 8.3):

mount | grep /sys/fs/bpf
# if present should output, e.g. "none on /sys/fs/bpf type bpf"...

If that’s not the case, mount it using the commands down here:

sudo mount bpffs -t bpf /sys/fs/bpf
sudo bash -c 'cat <<EOF >> /etc/fstab
none /sys/fs/bpf bpf rw,relatime 0 0
EOF'

etcd

useradd -r -c "etcd user" -s /sbin/nologin -M etcd

head -c 32 /dev/urandom | base64
nano /etc/kubernetes/etcd-encription.yaml
---
apiVersion: apiserver.config.k8s.io/v1
kind: EncryptionConfiguration
resources:
  - resources:
    - secrets
    providers:
    - identity: {}
    - aesgcm:
        keys:
        - name: key1
          secret: <BASE 64 ENCODED SECRET>

CRI-O

VERSION=1.28

sudo curl -L -o /etc/yum.repos.d/devel_kubic_libcontainers_stable.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/CentOS_8/devel:kubic:libcontainers:stable.repo
sudo curl -L -o /etc/yum.repos.d/devel_kubic_libcontainers_stable_cri-o_${VERSION}.repo https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/${VERSION}/CentOS_8/devel:kubic:libcontainers:stable:cri-o:${VERSION}.repo

yum install -y cri-o
# Configure User anmespacing in CRI-O
mkdir /etc/crio/crio.conf.d/

sed -i 's|^cgroup_manager|#cgroup_manager|' /etc/crio/crio.conf
sed -i 's|^conmon_cgroup|#conmon_cgroup|' /etc/crio/crio.conf

cat <<EOF > /etc/crio/crio.conf.d/01-crio-base.conf
[crio]
storage_driver = "overlay"
storage_option = ["overlay.mount_program=/usr/bin/fuse-overlayfs"]
[crio.runtime]
cgroup_manager = "cgroupfs"
conmon_cgroup = 'pod'
EOF

cat <<EOF > /etc/crio/crio.conf.d/02-userns-workload.conf
[crio.runtime.workloads.userns]
activation_annotation = "io.kubernetes.cri-o.userns-mode"
allowed_annotations = ["io.kubernetes.cri-o.userns-mode"]
EOF

# Test CRIO selinux config
egrep -r "selinux" /etc/crio/
/etc/crio/crio.conf:selinux = true

The CRI-O will run the containers with the containers user so I need to create /etc/subuid and /etc/subgid on nodes.

SubUID/GIDs are a range of user/group IDs that a user is allowed to use.

echo "containers:200000:268435456" >> /etc/subuid
echo "containers:200000:268435456" >> /etc/subgid

Kubeadm install

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF

CRIP_VERSION=$(crio --version | grep "^Version" | awk '{print $2}')
yum install kubelet-$CRIP_VERSION kubeadm-$CRIP_VERSION kubectl-$CRIP_VERSION cri-tools iproute-tc -y

echo "exclude=kubelet, kubectl, kubeadm, cri-o" >> /etc/yum.conf

Kubeadm init config

Set Kubernetes version:

# 010-kubeadm-conf-1-28-2.yaml
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
clusterName: k8s-main
kubernetesVersion: 1.28.2

Configure authentication and enable webhooks:

# 010-kubeadm-conf-1-28-2.yaml
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
  extraArgs:
    authorization-mode: "Node,RBAC"
    enable-admission-plugins: "NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook"

Set node ip and load-balancer ip if you use an external load-balancer for multi node infra:

# 010-kubeadm-conf-1-28-2.yaml
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
bootstrapTokens:
- token: "c2t0rj.cofbfnwwrb387890"
localAPIEndpoint:
  # local ip and port
  advertiseAddress: 192.168.56.12
  bindPort: 6443
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
# load-balancer ip or node ip and port
controlPlaneEndpoint: "192.168.56.12:6443"

Configure Certificate and rotation:

# 010-kubeadm-conf-1-28-2.yaml
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
nodeRegistration:
  kubeletExtraArgs:
    rotate-server-certificates: "true"
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
certificatesDir: /etc/kubernetes/pki
apiServer:
  extraArgs:
    kubelet-certificate-authority: "/etc/kubernetes/pki/ca.crt"
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
enableServer: true
serverTLSBootstrap: true
rotateCertificates: true
featureGates:
    RotateKubeletServerCertificate: true
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: JoinConfiguration
nodeRegistration:
  kubeletExtraArgs:
    rotate-server-certificates: "true"

Configure kubernetes to use CRI-O:

# 010-kubeadm-conf-1-28-2.yaml
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
nodeRegistration:
  criSocket: "unix:///var/run/crio/crio.sock"
  imagePullPolicy: IfNotPresent
  taints: null
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: JoinConfiguration
nodeRegistration:
  criSocket: "unix:///var/run/crio/crio.sock"
  imagePullPolicy: IfNotPresent
  taints: null

Set CNI Network in kubernetes config:

# 010-kubeadm-conf-1-28-2.yaml
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
networking:
  serviceSubnet: "10.96.0.0/12"
  podSubnet: "10.244.0.0/16"
  dnsDomain: "cluster.local"
apiServer:
  extraArgs:
    service-node-port-range: "30000-50000"
    anonymous-auth: "true"

Apply Pod Security Standards at the Cluster Level:

cat <<EOF > /etc/kubernetes/k8s-pss.yaml
apiVersion: apiserver.config.k8s.io/v1
kind: AdmissionConfiguration
plugins:
- name: PodSecurity
  configuration:
    apiVersion: pod-security.admission.config.k8s.io/v1beta1
    kind: PodSecurityConfiguration
    defaults:
      enforce: "restricted"
      enforce-version: "latest"
      audit: "restricted"
      audit-version: "latest"
      warn: "restricted"
      warn-version: "latest"
    exemptions:
      usernames: []
      runtimeClasses: []
      namespaces: [kube-system, cis-operator-system, cilium-spire, ceph-storage-system]
EOF
# Add your own exemption namespaces.
# 010-kubeadm-conf-1-28-2.yaml
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
  extraArgs:
    admission-control-config-file: /etc/kubernetes/k8s-pss.yaml
  extraVolumes:
  - name: "kubernetes-pss"
    hostPath: "/etc/kubernetes/k8s-pss.yaml"
    mountPath: "/etc/kubernetes/k8s-pss.yaml"
    pathType: "File"
    propagation: Bidirectional

Configure etcd folder and encryption:

# 010-kubeadm-conf-1-28-2.yaml
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
etcd:
  local:
    dataDir: /var/lib/etcd
apiServer:
  extraArgs:
    encryption-provider-config: "/etc/kubernetes/etcd-encription.yaml"
  extraVolumes:
  - name: "etc-kubernetes-etcd-enc"
    hostPath: "/etc/kubernetes/etcd-encription.yaml"
    mountPath: "/etc/kubernetes/etcd-encription.yaml"
    readOnly: true
    pathType: "File"
    propagation: HostToContainer

Configure default kernel option protection:

# 010-kubeadm-conf-1-28-2.yaml
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
nodeRegistration:
  kubeletExtraArgs:
    protect-kernel-defaults: "true"
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
protectKernelDefaults: true
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: JoinConfiguration
nodeRegistration:
  kubeletExtraArgs:
    protect-kernel-defaults: "true"

Enable Profiling:

# 010-kubeadm-conf-1-28-2.yaml
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
  extraArgs:
    profiling: "false"
scheduler:
  extraArgs:
    profiling: "false"
controllerManager:
  extraArgs:
    profiling: "false"

Enable secomp profiles:

# 010-kubeadm-conf-1-28-2.yaml
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
featureGates:
    SeccompDefault: true
seccompDefault: true

Enable swaping:

# 010-kubeadm-conf-1-28-2.yaml
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
failSwapOn: false
featureGates:
    NodeSwap: true

Enable KubeletInUserNamespace:

# 010-kubeadm-conf-1-28-2.yaml
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: "cgroupfs"
featureGates:
    KubeletInUserNamespace: true
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: "iptables"
# or "userspace"
conntrack:
# Skip setting sysctl value "net.netfilter.nf_conntrack_max"
  maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
  tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
  tcpCloseWaitTimeout: 0s

Enable audit logging:

# 010-kubeadm-conf-1-28-2.yaml
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
  extraArgs:
    audit-log-maxage: "30"
    audit-log-maxbackup: "10"
    audit-log-maxsize: "100"
    audit-log-path: "/var/log/kube-audit/audit.log"
    audit-policy-file: "/etc/kubernetes/audit-policy.yaml"
  extraVolumes:
  - name: "audit-config"
    hostPath: "/etc/kubernetes/audit-policy.yaml"
    mountPath: "/etc/kubernetes/audit-policy.yaml"
    readOnly: true
    pathType: "File"
  - name: "audit-log"
    hostPath: "/var/log/kube-audit"
    mountPath: "/var/log/kube-audit"
    pathType: "DirectoryOrCreate"

Configure audit-log policy:

cat << EOF >> /etc/kubernetes/audit-policy.yaml
apiVersion: audit.k8s.io/v1 # This is required.
kind: Policy
# Don't generate audit events for all requests in RequestReceived stage.
omitStages:
  - "RequestReceived"
rules:
  # Log pod changes at RequestResponse level
  - level: RequestResponse
    resources:
    - group: ""
      # Resource "pods" doesn't match requests to any subresource of pods,
      # which is consistent with the RBAC policy.
      resources: ["pods", "deployments"]

  - level: RequestResponse
    resources:
    - group: "rbac.authorization.k8s.io"
      # Resource "pods" doesn't match requests to any subresource of pods,
      # which is consistent with the RBAC policy.
      resources: ["clusterroles", "clusterrolebindings"]

  # Log "pods/log", "pods/status" at Metadata level
  - level: Metadata
    resources:
    - group: ""
      resources: ["pods/log", "pods/status"]

  # Don't log requests to a configmap called "controller-leader"
  - level: None
    resources:
    - group: ""
      resources: ["configmaps"]
      resourceNames: ["controller-leader"]

  # Don't log watch requests by the "system:kube-proxy" on endpoints or services
  - level: None
    users: ["system:kube-proxy"]
    verbs: ["watch"]
    resources:
    - group: "" # core API group
      resources: ["endpoints", "services"]

  # Don't log authenticated requests to certain non-resource URL paths.
  - level: None
    userGroups: ["system:authenticated"]
    nonResourceURLs:
    - "/api*" # Wildcard matching.
    - "/version"

  # Log the request body of configmap changes in kube-system.
  - level: Request
    resources:
    - group: "" # core API group
      resources: ["configmaps"]
    # This rule only applies to resources in the "kube-system" namespace.
    # The empty string "" can be used to select non-namespaced resources.
    namespaces: ["kube-system"]

  # Log configmap changes in all other namespaces at the RequestResponse level.
  - level: RequestResponse
    resources:
    - group: "" # core API group
      resources: ["configmaps"]

  # Log secret changes in all other namespaces at the Metadata level.
  - level: Metadata
    resources:
    - group: "" # core API group
      resources: ["secrets"]

  # Log all other resources in core and extensions at the Request level.
  - level: Request
    resources:
    - group: "" # core API group
    - group: "extensions" # Version of group should NOT be included.

  # A catch-all rule to log all other requests at the Metadata level.
  - level: Metadata
    # Long-running requests like watches that fall under this rule will not
    # generate an audit event in RequestReceived.
    omitStages:
      - "RequestReceived"
EOF

Install Kubernetes

systemctl enable --now crio
crictl info

kubeadm config images pull --config 010-kubeadm-conf-1-28-2.yaml

systemctl enable kubelet.service

kubeadm init --skip-phases=addon/kube-proxy --config 010-kubeadm-conf-1-28-2.yaml

Post Install

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

kubectl get csr --all-namespaces
kubectl get csr -oname | xargs kubectl certificate approve
kubectl apply -f 012-k8s-clusterrole.yaml

yum install -y https://harbottle.gitlab.io/harbottle-main/7/x86_64/harbottle-main-release.rpm
yum install -y kubectx

curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash
echo 'PATH=$PATH:/usr/local/bin' >> /etc/profile
export PATH=$PATH:/usr/local/bin

Install cilium as CNI

Generate Clilium configuration:

cat <<EOF > 031-cilium-helm-values.yaml
# Set kubeProxyReplacement to "strict" in order to prevent CVE-2020-8554 and fully remove kube-proxy.
# See https://cilium.io/blog/2020/12/11/kube-proxy-free-cve-mitigation for more information.
kubeProxyReplacement: "strict"

k8sServiceHost: 192.168.56.12
k8sServicePort: 6443
rollOutCiliumPods: true
priorityClassName: system-cluster-critical

ipv4:
  enabled: true
ipv6:
  enabled: false

bpf:
  masquerade: true

encryption:
  type: wireguard
  enabled: false
  nodeEncryption: false

# L7 policy
loadBalancer:
  l7:
    backend: envoy
envoy:
  enabled: true
  prometheus:
    enabled: true
    serviceMonitor:
      enabled: false

# L2 LoadBalancer service
l2announcements:
  enabled: true

# Api gateway
gatewayAPI:
  enabled: false

# Ingress controller
ingressController:
  enabled: false
  loadbalancerMode: shared

# mTLS
authentication:
  mode: required
  mutual:
    spire:
      enabled: false
      install:
        enabled: false
        server:
          dataStorage:
            enabled: false

endpointStatus:
  enabled: true
  status: policy

dashboards:
  enabled: false
  namespace: "monitoring-system"
  annotations:
    grafana_folder: "cilium"

hubble:
  enabled: true
  metrics:
    enableOpenMetrics: true
    enabled:
    - dns:query;ignoreAAAA
    - drop
    - tcp
    - flow:sourceContext=workload-name|reserved-identity;destinationContext=workload-name|reserved-identity
    - port-distribution
    - icmp
    - kafka:labelsContext=source_namespace,source_workload,destination_namespace,destination_workload,traffic_direction;sourceContext=workload-name|reserved-identity;destinationContext=workload-name|reserved-identity
    - policy:sourceContext=app|workload-name|pod|reserved-identity;destinationContext=app|workload-name|pod|dns|reserved-identity;labelsContext=source_namespace,destination_namespace
    - httpV2:exemplars=true;labelsContext=source_ip,source_namespace,source_workload,destination_ip,destination_namespace,destination_workload,traffic_direction
    serviceMonitor:
      enabled: false
    dashboards:
      enabled: false
      namespace: "monitoring-system"
      annotations:
        grafana_folder: "cilium"

  ui:
    enabled: true
    replicas: 1
    ingress:
      enabled: true
      hosts:
        - hubble.k8s.intra
      annotations:
        kubernetes.io/ingress.class: nginx
        cert-manager.io/cluster-issuer: ca-issuer
      tls:
      - secretName: hubble-ingress-tls
        hosts:
        - hubble.k8s.intra
    tolerations:
      - key: "node-role.kubernetes.io/master"
        operator: "Exists"
        effect: "NoSchedule"
      - key: "node-role.kubernetes.io/control-plane"
        operator: "Exists"
        effect: "NoSchedule"
    backend:
      resources:
        limits:
          cpu: 60m
          memory: 300Mi
        requests:
          cpu: 20m
          memory: 64Mi
    frontend:
      resources:
        limits:
          cpu: 1000m
          memory: 1024M
        requests:
          cpu: 100m
          memory: 64Mi
    proxy:
      resources:
        limits:
          cpu: 1000m
          memory: 1024M
        requests:
          cpu: 100m
          memory: 64Mi

  relay:
    enabled: true
    tolerations:
      - key: "node-role.kubernetes.io/master"
        operator: "Exists"
        effect: "NoSchedule"
      - key: "node-role.kubernetes.io/control-plane"
        operator: "Exists"
        effect: "NoSchedule"
    resources:
      limits:
        cpu: 100m
        memory: 500Mi
    prometheus:
      enabled: true
      serviceMonitor:
        enabled: false

operator:
  replicas: 1
  resources:
    limits:
      cpu: 1000m
      memory: 1Gi
    requests:
      cpu: 100m
      memory: 128Mi
  prometheus:
    enabled: true
    serviceMonitor:
      enabled: false
  dashboards:
    enabled: false
    namespace: "monitoring-system"
    annotations:
      grafana_folder: "cilium"

ipam:
  mode: "cluster-pool"
  operator:
    clusterPoolIPv4PodCIDRList: "10.43.0.0/16"
    clusterPoolIPv4MaskSize: 24
    clusterPoolIPv6PodCIDRList: "fd00::/104"
    clusterPoolIPv6MaskSize: 120

resources:
  limits:
    cpu: 4000m
    memory: 4Gi
  requests:
    cpu: 100m
    memory: 512Mi

prometheus:
  enabled: true
  # Default port value (9090) needs to be changed since the RHEL cockpit also listens on this port.
  port: 19090
  # Configure this serviceMonitor section AFTER Rancher Monitoring is enabled!
  serviceMonitor:
    enabled: false
EOF
kubectl taint nodes --all node-role.kubernetes.io/control-plane-

helm repo add cilium https://helm.cilium.io/
helm upgrade --install cilium cilium/cilium \
  --namespace kube-system \
  -f 031-cilium-helm-values.yaml

kubectl get pods -A

Harden Kubernetes

There is an opensource tool theat tests CISA’s best best practices on your clsuter. We vill use this to test the resoults.

# kube-bench
# https://github.com/aquasecurity/kube-bench/releases/
yum install -y https://github.com/aquasecurity/kube-bench/releases/download/v0.6.5/kube-bench_0.6.5_linux_amd64.rpm
useradd -r -c "etcd user" -s /sbin/nologin -M etcd
chown etcd:etcd /var/lib/etcd
chmod 700 /var/lib/etcd

# kube-bench
kube-bench
kube-bench | grep "\[FAIL\]"

join nodes

Firs we need to get the join command from the master:

# on master1
kubeadm token create --print-join-command
kubeadm join 192.168.56.12:6443 --token c2t0rj.cofbfnwwrb387890 \
 --discovery-token-ca-cert-hash sha256:a52f4c16a6ce9ef72e3d6172611d17d9752dfb1c3870cf7c8ad4ce3bcb97547e

If the next node is a worke we can just use the command what we get. If a next node is a master we need to generate a certificate-key. You need a separate certificate-key for every new master.

# on master1
## generate cert key
kubeadm certs certificate-key
29ab8a6013od73s8d3g4ba3a3b24679693e98acd796356eeb47df098c47f2773

## store cert key in secret
kubeadm init phase upload-certs --upload-certs --certificate-key=29ab8a6013od73s8d3g4ba3a3b24679693e98acd796356eeb47df098c47f2773
# on master2
kubeadm join 192.168.56.12:6443 --token c2t0rj.cofbfnwwrb387890 \
--discovery-token-ca-cert-hash sha256:a52f4c16a6ce9ef72e3d6172611d17d9752dfb1c3870cf7c8ad4ce3bcb97547e \
--control-plane --certificate-key 29ab8a6013od73s8d3g4ba3a3b24679693e98acd796356eeb47df098c47f2773
# on master3
kubeadm join 192.168.56.12:6443 --token c2t0rj.cofbfnwwrb387890 \
--discovery-token-ca-cert-hash sha256:a52f4c16a6ce9ef72e3d6172611d17d9752dfb1c3870cf7c8ad4ce3bcb97547e \
--control-plane --certificate-key 29ab8a6013od73s8d3g4ba3a3b24679693e98acd796356eeb47df098c47f2773

In the end withevery new node we need to approve the certificate requests for the node.

kubectl get csr -oname | xargs kubectl certificate approve

useradd -r -c "etcd user" -s /sbin/nologin -M etcd
chown etcd:etcd /var/lib/etcd
chmod 700 /var/lib/etcd