install Kubernetes on Debian 12 Bookworm

Sources:
https://kubernetes.io/docs/setup/
https://www.server-world.info/en/note?os=Debian_12&p=kubernetes&f=1

  • prepare Hosts [all nodes]
    • disable swap
      edit /etc/fstab && swapoff -a
    • add cluster DNS to /etc/hosts
      • e.g. onprem.cloud
        echo ‘127.0.0.1 onprem.cloud’ >> /etc/hosts
apt install -y curl gpg iptables
# edit /etc/sysctl.conf
# net.ipv4.ip_forward=1
# net.bridge.bridge-nf-call-iptables=1
# net.bridge.bridge-nf-call-ip6tables=1
sysctl -w net.ipv4.ip_forward=1
sysctl -w net.bridge.bridge-nf-call-iptables=1
sysctl -w net.bridge.bridge-nf-call-ip6tables=1
# add kernel modules
echo -e "overlay\nbr_netfilter" | tee -a /etc/modules
modprobe br_netfilter
export KUBECONFIG=/etc/kubernetes/admin.conf
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> /root/.bashrc
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.33/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg

echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.33/deb/ /" | tee /etc/apt/sources.list.d/kubernetes.list
curl -fsSL https://download.opensuse.org/repositories/isv:/cri-o:/stable:/v1.32/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/cri-o-apt-keyring.gpg

echo "deb [signed-by=/etc/apt/keyrings/cri-o-apt-keyring.gpg] https://download.opensuse.org/repositories/isv:/cri-o:/stable:/v1.32/deb/ /" | tee /etc/apt/sources.list.d/cri-o.list
  • install K8s (https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/) and Container Runtime CRI-O (https://cri-o.io/) [all nodes]
apt update
apt install -y cri-o kubelet kubeadm kubectl

systemctl start crio.service
systemctl enable crio.service

apt-mark hold cri-o kubelet kubeadm kubectl

install CNI Cillium (https://docs.cilium.io/en/stable/gettingstarted/k8s-install-default/) [all nodes]

# CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt)
CILIUM_CLI_VERSION=v0.18.3
curl -L --fail --remote-name-all https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-amd64.tar.gz{,.sha256sum}
sha256sum --check cilium-linux-amd64.tar.gz.sha256sum
tar xzvfC cilium-linux-amd64.tar.gz /usr/local/bin
rm cilium-linux-amd64.tar.gz{,.sha256sum}
# HUBBLE_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/hubble/master/stable.txt)
HUBBLE_VERSION=v1.17.2
curl -L --fail --remote-name-all https://github.com/cilium/hubble/releases/download/$HUBBLE_VERSION/hubble-linux-amd64.tar.gz{,.sha256sum}
sha256sum --check hubble-linux-amd64.tar.gz.sha256sum
tar xzvfC hubble-linux-amd64.tar.gz /usr/local/bin
rm hubble-linux-amd64.tar.gz{,.sha256sum}
  • Create Cluster [control plane]
kubeadm config images pull
kubeadm init --control-plane-endpoint=onprem.cloud --v=5 --pod-network-cidr=10.244.0.0/16
# wait for node to get 'ready'
watch --color 'kubectl get nodes'

cilium install
cilium status
  • Join another control plane [control plane]
kubeadm token create --print-join-command
kubeadm join --control-plane

cilium install
cilium status
  • Join Nodes [worker nodes]
kubeadm token create --print-join-command
  • reset K8s Node
kubeadm reset
  • make control-plane also a worker node (optional – single Node Cluster)
    kubectl taint nodes --all node-role.kubernetes.io/control-plane-
    kubectl taint nodes --all node-role.kubernetes.io/master:NoSchedule-
  • install Helm (https://helm.sh/docs/intro/install/)
curl https://baltocdn.com/helm/signing.asc | gpg --dearmor | tee /usr/share/keyrings/helm.gpg > /dev/null

echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/helm.gpg] https://baltocdn.com/helm/stable/debian/ all main" | tee /etc/apt/sources.list.d/helm-stable-debian.list


apt update
apt install helm
helm repo add longhorn https://charts.longhorn.io
helm repo update
helm install longhorn longhorn/longhorn --namespace longhorn-system --create-namespace --version 1.7.1

kubectl -n longhorn-system get pod
  • test Cluster
kubectl create namespace test
kubectl config set-context --current --namespace=test
kubectl create deployment whoami -n test --image=traefik/whoami:latest
#kubectl expose deployment whoami --name whoami-service --port 80 --target-port=30080 --type NodePort
kubectl create service nodeport whoami -n test --node-port=30080 --tcp=80:80

https://kubernetes.io/de/docs/reference/kubectl/cheatsheet/

https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard

helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/
helm upgrade --install kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard --create-namespace --namespace kubernetes-dashboard
kubectl -n kubernetes-dashboard port-forward svc/kubernetes-dashboard-kong-proxy 8443:443
kubectl create serviceaccount admin-user -n kubernetes-dashboard
kubectl create clusterrolebinding admin-user --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:admin-user
kubectl -n kubernetes-dashboard create token admin-user

https://github.com/kubernetes/dashboard/blob/master/docs/user/access-control/creating-sample-user.md

Leave a Reply

Your email address will not be published. Required fields are marked *

This site uses Akismet to reduce spam. Learn how your comment data is processed.