Create bridge on node (with IP):
cluster/cli.sh ssh node01
sudo echo "net.bridge.bridge-nf-call-iptables = 0" >> /etc/sysctl.conf
sudo sysctl -p
sudo ip link add red type bridge
sudo ip link set dev red up
# either give a local address to bridge or set real interface as master
sudo ip addr add 172.16.0.20/24 dev red
#sudo ip link set eth0 master red
ip link show red
exit
Create device plugin with its configuration:
cluster/kubectl.sh create configmap device-plugin-network-bridge --from-literal=bridges="red"
cluster/kubectl.sh create -f ~/projects/l2-net-demo/bridge-ds.yml
Create a VM connected to the "red" bridge:
vi ~/projects/l2-net-demo/vmi-bridge.yaml
cluster/kubectl.sh create -f ~/projects/l2-net-demo/vmi-bridge.yaml
See what happened on the launcher pod:
cluster/kubectl.sh get pods
cluster/kubectl.sh exec -it virt-launcher-vmi-bridge-<...> ip link
Add IP addresses to the VM on the L2 interfaces:
cluster/virtctl.sh console vmi-bridge
sudo ip link set dev eth1 up
sudo ip addr add 172.16.0.11/24 dev eth1
Check pinging:
cluster/virtctl.sh console vmi-bridge
ping -I eth1 172.16.0.20
cluster/cli.sh ssh node01
ping -I red 172.16.0.11
Check arp:
arp -i eth1