Configurre network wit nmstate operator

Page content

In this post I will show you how you can use nmstate operator to manage your network configurations on a Kubernetes host.

For this Demo I will use a VM with two separate network interface. I will use the first one for the kubernetes network and flannel and use the second one to configure a bridge on ot with nmstate operator.

K3S

curl -sLS https://get.k3sup.dev | sh

mkdir /root/.kube
IP=192.168.100.10

k3sup install --ip $IP --local   --context pik3s \
  --merge \
  --local-path $HOME/.kube/config

k3sup ready --context pik3s

kubernetes-nmstate

yum install NetworkManager -y
systemctl start NetworkManager

dnf copr enable nmstate/nmstate
dnf install nmstate
kubectl apply -f https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.80.1/nmstate.io_nmstates.yaml
kubectl apply -f https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.80.1/namespace.yaml
kubectl apply -f https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.80.1/service_account.yaml
kubectl apply -f https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.80.1/role.yaml
kubectl apply -f https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.80.1/role_binding.yaml
kubectl apply -f https://github.com/nmstate/kubernetes-nmstate/releases/download/v0.80.1/operator.yaml
cat <<EOF | kubectl create -f -
apiVersion: nmstate.io/v1
kind: NMState
metadata:
  name: nmstate
EOF
cat <<EOF | kubectl create -f -
apiVersion: nmstate.io/v1
kind: NodeNetworkConfigurationPolicy
metadata:
  name: br1-enp0s9
spec:
  desiredState:
    interfaces:
    - name: br1
      type: linux-bridge
      state: up
      ipv4:
        address:
        - ip: 192.168.200.10
          prefix-length: 24
	dhcp: false
        enabled: true
      bridge:
	port:
	- name: enp0s9
EOF

multus K3S

wget https://raw.githubusercontent.com/k8snetworkplumbingwg/multus-cni/master/deployments/multus-daemonset.yml

nano multus-daemonset.yml
---
...
containers:
  - name: kube-multus
    image: nfvpe/multus:v3.4.1
    command: ["/entrypoint.sh"]
    args:
      - "--multus-conf-file=auto"
      - "--cni-version=0.3.1"
      # Add the following arg
      - "--multus-kubeconfig-file-host=/var/lib/rancher/k3s/agent/etc/cni/net.d/multus.d/multus.kubeconfig"
...
volumes:
  - name: cni
    hostPath:
      path: /var/lib/rancher/k3s/agent/etc/cni/net.d
  - name: cnibin
    hostPath:
      path: /var/lib/rancher/k3s/data/current/bin
nano multus-bridge.yaml
---
apiVersion: k8s.cni.cncf.io/v1
kind: NetworkAttachmentDefinition
metadata:
  name: multus-br1
spec:
  config: |
    {
      "cniVersion": "0.3.1",
      "type": "bridge",
      "bridge": "br1",
      "ipam": {
        "type": "host-local",
        "subnet": "192.168.200.0/24",
         "rangeStart": "192.168.200.240",
         "rangeEnd": "192.168.200.250"
      }
    }    

Demo app

nano demo-app.yaml
---
apiVersion: v1
kind: Pod
metadata:
  name: net-pod
  annotations:
    k8s.v1.cni.cncf.io/networks: multus-br1
spec:
  containers:
  - name: netshoot-pod
    image: nicolaka/netshoot
    command: ["tail"]
    args: ["-f", "/dev/null"]
  terminationGracePeriodSeconds: 0
---
apiVersion: v1
kind: Pod
metadata:
  name: net-pod2
  annotations:
    k8s.v1.cni.cncf.io/networks: multus-br1
spec:
  containers:
  - name: netshoot-pod
    image: nicolaka/netshoot
    command: ["tail"]
    args: ["-f", "/dev/null"]
  terminationGracePeriodSeconds: 0
kubectl exec -it net-pod -- ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: eth0@if22: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UP group default
    link/ether 56:60:ac:ef:12:be brd ff:ff:ff:ff:ff:ff link-netnsid 0
    inet 10.42.0.14/24 brd 10.42.0.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::5460:acff:feef:12be/64 scope link
       valid_lft forever preferred_lft forever
3: net1@if24: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
    link/ether a2:0a:ec:07:64:6b brd ff:ff:ff:ff:ff:ff link-netnsid 0
    inet 192.168.200.240/24 brd 192.168.200.255 scope global net1
       valid_lft forever preferred_lft forever
    inet6 fe80::a00a:ecff:fe07:646b/64 scope link
       valid_lft forever preferred_lft forever

kubectl exec -it net-pod2 -- ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: eth0@if23: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UP group default
    link/ether 2e:b3:6e:d8:cf:35 brd ff:ff:ff:ff:ff:ff link-netnsid 0
    inet 10.42.0.15/24 brd 10.42.0.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::2cb3:6eff:fed8:cf35/64 scope link
       valid_lft forever preferred_lft forever
3: net1@if25: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
    link/ether 52:96:53:1a:58:15 brd ff:ff:ff:ff:ff:ff link-netnsid 0
    inet 192.168.200.241/24 brd 192.168.200.255 scope global net1
       valid_lft forever preferred_lft forever
    inet6 fe80::5096:53ff:fe1a:5815/64 scope link
       valid_lft forever preferred_lft forever
# ping own ip
kubectl exec -it net-pod -- ping -c 1 -I net1 192.168.200.241
PING 192.168.200.241 (192.168.200.241) from 192.168.200.240 net1: 56(84) bytes of data.
64 bytes from 192.168.200.241: icmp_seq=1 ttl=64 time=0.220 ms

--- 192.168.200.241 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.220/0.220/0.220/0.000 ms

# ping net-pod2's ip
kubectl exec -it net-pod2 -- ping -c 1 -I net1 192.168.200.240
PING 192.168.200.240 (192.168.200.240) from 192.168.200.241 net1: 56(84) bytes of data.
64 bytes from 192.168.200.240: icmp_seq=1 ttl=64 time=0.072 ms

--- 192.168.200.240 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.072/0.072/0.072/0.000 ms

# ping host
kubectl exec -it net-pod -- ping -c 1 -I net1 192.168.200.10
PING 192.168.200.10 (192.168.200.10) from 192.168.200.240 net1: 56(84) bytes of data.
64 bytes from 192.168.200.10: icmp_seq=1 ttl=64 time=0.082 ms

--- 192.168.200.10 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.082/0.082/0.082/0.000 ms