Use multus to separate metwork trafics

Page content

In this post I will show you how you can use Multus CNI and Calico to create Kubernetes pods in different networks.

Install default network

The kubernetes cluster is installed with kubeadm and --pod-network-cidr=10.244.0.0/16 option

kubectl apply -f https://docs.projectcalico.org/manifests/tigera-operator.yaml

nano custom-resources.yaml
---
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
  name: default
spec:
  # Configures Calico networking.
  calicoNetwork:
    bgp: Disabled
    # linuxDataplane: BPF
    # Note: The ipPools section cannot be modified post-install.
    ipPools:
    - blockSize: 26
      cidr: 10.244.0.0/16
      encapsulation: VXLANCrossSubnet
      natOutgoing: Enabled
      nodeSelector: all()

kubectl apply -f custom-resources.yaml

Install Secondary network

ip -d link show vxlan.calico
9: vxlan.calico: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN mode DEFAULT group default
    link/ether 66:2f:69:dc:0c:cc brd ff:ff:ff:ff:ff:ff promiscuity 0 minmtu 68 maxmtu 65535
    vxlan id 4096 local 192.168.200.10 dev enp0s9 srcport 0 0 dstport 4789 nolearning ttl auto ageing 300 udpcsum noudp6zerocsumtx noudp6zerocsumrx addrgenmode eui64 numtxqueues 1 numrxqueues 1 gso_max_size 65536 gso_max_segs 65535

ip a show vxlan.calico
9: vxlan.calico: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default
    link/ether 66:2f:69:dc:0c:cc brd ff:ff:ff:ff:ff:ff
    inet 10.250.58.192/32 scope global vxlan.calico
       valid_lft forever preferred_lft forever
    inet6 fe80::642f:69ff:fedc:ccc/64 scope link
       valid_lft forever preferred_lft forever

Deploy multus

kubectl apply -f https://raw.githubusercontent.com/k8snetworkplumbingwg/multus-cni/master/deployments/multus-daemonset-thick-plugin.yml

kubectl get pods --all-namespaces | grep -i multus
cat /etc/cni/net.d/00-multus.conf | jq

check multus config find calico as the default config.

cat <<EOF | kubectl apply -f -
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
  name: 22-calico
spec: 
  config: '{
    "cniVersion": "0.3.1",
    "type": "calico",
    "log_level": "info",
    "datastore_type": "kubernetes",
    "ipam": {
      "type": "host-local",
      "subnet": "172.22.0.0/16"
    },
    "policy": {
      "type": "k8s"
    },
    "kubernetes": {
      "kubeconfig": "/etc/cni/net.d/calico-kubeconfig"
    }
  }'
EOF

cat <<EOF | kubectl apply -f -
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
  name: 26-calico
  namespace: kube-system
spec: 
  config: '{
    "cniVersion": "0.3.1",
    "type": "calico",
    "log_level": "info",
    "datastore_type": "kubernetes",
    "ipam": {
      "type": "host-local",
      "subnet": "172.26.0.0/16"
    },
    "policy": {
      "type": "k8s"
    },
    "kubernetes": {
      "kubeconfig": "/etc/cni/net.d/calico-kubeconfig"
    }
  }'
EOF
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
  name: net-pod
  annotations:
    v1.multus-cni.io/default-network: default/26-calico
spec:
  containers:
  - name: netshoot-pod
    image: nicolaka/netshoot
    command: ["tail"]
    args: ["-f", "/dev/null"]
  terminationGracePeriodSeconds: 0
---
apiVersion: v1
kind: Pod
metadata:
  name: net-pod2
  annotations:
    v1.multus-cni.io/default-network: default/22-calico
spec:
  containers:
  - name: netshoot-pod
    image: nicolaka/netshoot
    command: ["tail"]
    args: ["-f", "/dev/null"]
  terminationGracePeriodSeconds: 0
EOF
kubectl exec -it net-pod -- ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
3: eth0@if66: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UP group default
    link/ether fa:26:21:4b:3c:94 brd ff:ff:ff:ff:ff:ff link-netnsid 0
    inet 172.26.0.4/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::f826:21ff:fe4b:3c94/64 scope link
       valid_lft forever preferred_lft forever

kubectl exec -it net-pod2 -- ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
3: eth0@if67: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UP group default
    link/ether fe:16:f1:9d:5e:40 brd ff:ff:ff:ff:ff:ff link-netnsid 0
    inet 172.22.0.6/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::fc16:f1ff:fe9d:5e40/64 scope link
       valid_lft forever preferred_lft forever
kubectl exec -it net-pod -- ping -c 1 172.22.0.6
PING 172.22.0.6 (172.22.0.6) 56(84) bytes of data.
64 bytes from 172.22.0.6: icmp_seq=1 ttl=63 time=0.099 ms

--- 172.22.0.6 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.099/0.099/0.099/0.000 ms