Multi-arch Bare Metal Kubernetes Cluster with Docker on Ubuntu 20.04

This example config uses 1 local computer, 1 master node, and 3 worker nodes.

Multi-arch Bare Metal Kubernetes Cluster with Docker on Ubuntu 20.04

This example config uses 1 local computer, 1 master node, and 3 worker nodes.

Setup

Plan out the setup and write it into ssh config file (on local computer)

vim $HOME/.ssh/config

# pi8
Host k8s-master
HostName k8s-master
User master
IdentityFile ~/.ssh/id_k8s

# pi4
Host k8s-w1
HostName k8s-w1
User master
IdentityFile ~/.ssh/id_k8s

# e7240
Host k8s-w2
HostName k8s-w2
User master
IdentityFile ~/.ssh/id_k8s

# n5050
Host k8s-w3
HostName k8s-w3
User master
IdentityFile ~/.ssh/id_k8s

Create master user (on each node)

sudo adduser master

groups

sudo usermod -aG adm,dialout,cdrom,floppy,sudo,audio,dip,video,plugdev,netdev,lxd master

sudo hostnamectl set-hostname <HOSTNAME>

On Raspi:
sudo sed -i 's/preserve_hostname: false/preserve_hostname: true/' /etc/cloud/cloud.cfg

sudo hostnamectl

sudo reboot

Create ssh key and distribute to each node (on local computer)

ssh-keygen -b 4096 -f $HOME/.ssh/k8s_rsa

ssh-copy-id -i $HOME/.ssh/id_k8s master@<HOSTNAME>

Test ssh keys and delete default ubuntu user

ssh <HOSTNAME>

sudo deluser --remove-home ubuntu

Enable cgroup memory (Raspi nodes)

vim /boot/firmware/cmdline.txt           # add to end of line
cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory

sudo reboot

If exposed on internet

sudo vim /etc/ssh/sshd_config

PermitRootLogin no
PasswordAuthentication no
PubkeyAuthentication yes

sudo /usr/sbin/sshd -t

sudo systemctl restart sshd.service

sudo reboot

Check for and disable swap (on each node)

free

sudo swapoff -a

sudo vim /etc/fstab

Setup Docker (on each node)

sudo modprobe overlay

sudo modprobe br_netfilter

echo 'net.bridge.bridge-nf-call-iptables = 1 
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1' | sudo tee -a /etc/sysctl.d/99-kubernetes-cri.conf

sudo sysctl --system

sudo apt-get update && sudo apt-get -y upgrade

sudo apt-get install -y docker.io

Install K8s (on each node)

sudo apt-get -y install apt-transport-https

curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -

echo "deb https://apt.kubernetes.io kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list

sudo apt-get update

sudo apt-get -y install kubeadm kubelet kubectl

sudo apt-mark hold kubelet kubeadm kubectl

Install K8s (on master)

mkdir -p $HOME/.kube

ip a

cat <<EOF > $HOME/.kube/kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
nodeRegistration:
  name: pi8
  criSocket: "/var/run/dockershim.sock"
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: stable
controlPlaneEndpoint: "pi8:6443"
networking:
  podSubnet: 10.244.0.0/12
  serviceSubnet: 10.96.0.0/12
EOF

echo '192.168.0.13 pi8' | sudo tee -a /etc/hosts

sudo kubeadm init --config=$HOME/.kube/kubeadm-config.yaml --upload-certs | tee $HOME/.kube/kubeadm-init.out

sudo cat /etc/kubernetes/admin.conf > $HOME/.kube/config

sudo chown $(id -u):$(id -g) $HOME/.kube/config

Install Calico (on master)

curl https://docs.projectcalico.org/manifests/calico.yaml -o $HOME/.kube/calico.yaml

vim $HOME/.kube/calico.yaml         # uncomment and change to podSubnet

 - name: CALICO_IPV4POOL_CIDR
   value: "10.244.0.0/12" 

kubectl apply -f $HOME/.kube/calico.yaml

kubectl -n kube-system set env daemonset/calico-node IP_AUTODETECTION_METHOD=can-reach=10.244.0.1

Join Network from Workers

echo '192.168.0.13 pi8' | sudo tee -a /etc/hosts

kubeadm join pi8:6443 --token <TOKEN> --discovery-token-ca-cert-hash <HASH>

Finish up on Master

kubectl get nodes

kubectl label node <NODE> node-role.kubernetes.io/worker=worker

kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl

echo 'alias k=kubectl 
complete -F __start_kubectl k' | tee -a $HOME/.bashrc

kubectl describe node | grep -i taint

kubectl taint nodes --all node-role.kubernetes.io/master-

Setup MetalLB (on master)

kubectl get configmap kube-proxy -n kube-system -o yaml | \
sed -e "s/strictARP: false/strictARP: true/" | \
kubectl apply -f - -n kube-system
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.9.3/manifests/namespace.yaml
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.9.3/manifests/metallb.yaml
# On first install only
kubectl create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)"
cat <<EOF > $HOME/.kube/metallb-config.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  namespace: metallb-system
  name: config
data:
  config: |
    address-pools:
    - name: default
      protocol: layer2
      addresses:
      - 192.168.1.240-192.168.1.250
EOF

kubectl apply -f $HOME/.kube/metallb-config.yaml

Test MetalLB

cat <<EOF > nginx-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  selector:
    matchLabels:
      app: nginx
  replicas: 4
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: arm64v8/nginx:latest
        ports:
        - containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
  name: nginx-service
spec:
  selector:
    app: nginx
  ports:
    - protocol: TCP
      port: 80
      targetPort: 80
  externalTrafficPolicy: Local
  type: LoadBalancer
EOF

kubectl apply -f nginx-deployment.yaml

kubectl get all -o wide

kubectl delete -f nginx-deployment.yaml

Delete nodes

kubectl drain <NODE> --ignore-daemonsets --delete-local-data
kubectl delete node <NODE>

Reset Script

sudo kubeadm reset

If that doesn't work...

cat <<EOF > $HOME/.kube/k8sReset.sh
#!/bin/bash
# script to reset k8s/kubeadm/calico
# Requires net-tools (sudo apt install -y net-tools)
sudo apt purge -y kubeadm kubectl kubelet kubernetes-cni  --allow-change-held-packages
sudo rm -rf /var/lib/cni/ /var/lib/calico/ /var/lib/kubelet/ /var/lib/etcd/ /etc/kubernetes/ /etc/cni/ /run/kubernetes/ $HOME/.kube/config
sudo iptables -F && sudo iptables -t nat -F && sudo iptables -t mangle -F && sudo iptables -X

sudo apt install -y kubeadm kubectl kubelet kubernetes-cni
sudo apt-mark hold kubelet kubeadm kubectl

sudo systemctl daemon-reload
sudo systemctl restart kubelet

sudo netstat -lnp | grep 6443
sudo netstat -lnp | grep 10259
sudo netstat -lnp | grep 10257
sudo netstat -lnp | grep 10250
sudo netstat -lnp | grep 2379
sudo netstat -lnp | grep 2380
EOF

sudo chmod 700 $HOME/.kube/k8sReset.sh

sudo kubeadm reset  && $HOME/.kube/k8sReset.sh