install Kubernetes on Debian 12 Bookworm

Sources:
https://kubernetes.io/docs/setup/
https://www.server-world.info/en/note?os=Debian_12&p=kubernetes&f=1

  • prepare Hosts [all nodes]
    • disable swap
      edit /etc/fstab && swapoff -a
    • add cluster DNS to /etc/hosts
      • e.g. onprem.cloud
        echo “127.0.0.1 onprem.cloud” >> /etc/hosts
apt install -y curl gpg
# edit /etc/sysctl.conf
# net.ipv4.ip_forward=1
# net.bridge.bridge-nf-call-iptables=1
# net.bridge.bridge-nf-call-ip6tables=1
sysctl -w net.ipv4.ip_forward=1
sysctl -w net.bridge.bridge-nf-call-iptables=1
sysctl -w net.bridge.bridge-nf-call-ip6tables=1
# add kernel modules
echo -e "overlay\nbr_netfilter" | sudo tee -a /etc/modules
modprobe br_netfilter
export KUBECONFIG=/etc/kubernetes/admin.conf
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> /root/.bashrc
  • add K8s Repo [all nodes]
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.31/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg

echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.31/deb/ /" | sudo tee /etc/apt/sources.list.d/kubernetes.list
  • add CRI-O Repo [all nodes]
curl -fsSL https://pkgs.k8s.io/addons:/cri-o:/stable:/v1.31/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/cri-o-apt-keyring.gpg

echo "deb [signed-by=/etc/apt/keyrings/cri-o-apt-keyring.gpg] https://pkgs.k8s.io/addons:/cri-o:/stable:/v1.31/deb/ /" | sudo tee /etc/apt/sources.list.d/cri-o.list 
  • install K8s (https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/) and Container Runtime CRI-O (https://cri-o.io/) [all nodes]
sudo apt update
sudo apt install -y cri-o kubelet kubeadm kubectl

sudo systemctl start crio.service
sudo systemctl enable crio.service

sudo apt-mark hold cri-o kubelet kubeadm kubectl

install CNI Cillium (https://docs.cilium.io/en/stable/gettingstarted/k8s-install-default/) [all nodes]

# CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt)
CILIUM_CLI_VERSION=v0.16.18
curl -L --fail --remote-name-all https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-amd64.tar.gz{,.sha256sum}
sha256sum --check cilium-linux-amd64.tar.gz.sha256sum
sudo tar xzvfC cilium-linux-amd64.tar.gz /usr/local/bin
rm cilium-linux-amd64.tar.gz{,.sha256sum}
# HUBBLE_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/hubble/master/stable.txt)
HUBBLE_VERSION=v1.16.1
curl -L --fail --remote-name-all https://github.com/cilium/hubble/releases/download/$HUBBLE_VERSION/hubble-linux-amd64.tar.gz{,.sha256sum}
sha256sum --check hubble-linux-amd64.tar.gz.sha256sum
sudo tar xzvfC hubble-linux-amd64.tar.gz /usr/local/bin
rm hubble-linux-amd64.tar.gz{,.sha256sum}
  • Create Cluster [control plane]
sudo kubeadm config images pull
sudo kubeadm init --control-plane-endpoint=onprem.cloud --v=5 --pod-network-cidr=10.244.0.0/16
# wait for node to get 'ready'
watch --color 'sudo kubectl get nodes'

sudo cilium install
sudo cilium status
  • Join another control plane [control plane]
kubeadm token create --print-join-command
sudo kubeadm join --control-plane

sudo cilium install
sudo cilium status
  • Join Nodes [worker nodes]
kubeadm token create --print-join-command
  • reset K8s Node
kubeadm reset
curl https://baltocdn.com/helm/signing.asc | gpg --dearmor | sudo tee /usr/share/keyrings/helm.gpg > /dev/null

echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/helm.gpg] https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list


sudo apt update
sudo apt install helm
helm repo add longhorn https://charts.longhorn.io
helm repo update
helm install longhorn longhorn/longhorn --namespace longhorn-system --create-namespace --version 1.7.1

kubectl -n longhorn-system get pod
  • test Cluster
kubectl create namespace test
kubectl config set-context --current --namespace=test
kubectl create deployment whoami --image=traefik/whoami:latest
#kubectl expose deployment whoami --name whoami-service --port 80 --target-port=30080 --type NodePort
kubectl create service nodeport whoami --node-port=30080 --tcp=80:80

https://kubernetes.io/de/docs/reference/kubectl/cheatsheet/

https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard

helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/
helm upgrade --install kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard --create-namespace --namespace kubernetes-dashboard
kubectl -n kubernetes-dashboard port-forward svc/kubernetes-dashboard-kong-proxy 8443:443
kubectl create serviceaccount admin-user -n kubernetes-dashboard
kubectl create clusterrolebinding admin-user --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:admin-user
kubectl -n kubernetes-dashboard create token admin-user

https://github.com/kubernetes/dashboard/blob/master/docs/user/access-control/creating-sample-user.md

Leave a Reply

Your email address will not be published. Required fields are marked *

This site uses Akismet to reduce spam. Learn how your comment data is processed.