gsettings set org.gnome.mutter check-alive-timeout 15000
Add K8s User
Source: https://kubernetes.io/docs/tasks/tls/certificate-issue-client-csr/
# Create a private key
openssl genrsa -out myuser.key 4096
# Create an X.509 certificate signing request
# Change the common name "myuser" to the actual username that you want to use
openssl req -new -key myuser.key -out myuser.csr -subj "/CN=myuser"
# Create a Kubernetes CertificateSigningRequest
cat myuser.csr | base64 | tr -d "\n"
cat <<EOF | kubectl apply -f -
apiVersion: certificates.k8s.io/v1
kind: CertificateSigningRequest
metadata:
name: myuser # example
spec:
# This is an encoded CSR. Change this to the base64-encoded contents of myuser.csr
request: <myuser.csr in base64>
signerName: kubernetes.io/kube-apiserver-client
expirationSeconds: 86400 # one day
usages:
- client auth
EOF
# Approve the CertificateSigningRequest
kubectl certificate approve myuser
# Get the Certificate
kubectl get csr myuser -o jsonpath='{.status.certificate}'| base64 -d > myuser.crt
# Configure the certificate into kubeconfig
kubectl config set-credentials myuser --client-key=myuser.key --client-certificate=myuser.crt --embed-certs=true
# Create Role and RoleBinding
kubectl create role developer --verb=create --verb=get --verb=list --verb=update --verb=delete --resource=pods --namespace=my-space
kubectl create rolebinding developer-binding-myuser --role=developer --user=myuser
kubectl create clusterrolebinding user-cluster-admin --clusterrole=cluster-admin --user=myuser
https://kubernetes.io/docs/reference/access-authn-authz/rbac
Debian 12 Harbor install
Harbor Installation Prerequisites
| Resource | Minimum | Recommended |
|---|---|---|
| CPU | 2 CPU | 4 CPU |
| Mem | 4 GB | 8 GB |
| Disk | 40 GB | 160 GB |
apt install podman podman-compose podman-docker openssl
apt install docker.io docker-compose openssl
official releases
wget https://github.com/goharbor/harbor/releases/download/v2.13.1/harbor-offline-installer-v2.13.1.tgz
tar xzvf harbor-offline-installer-version.tgz
openssl genrsa -out ca.key 4096
openssl req -x509 -new -nodes -sha512 -days 3650 \
-subj "/C=DE/ST=STATE/L=LOCATION/O=ORGANIZATION/OU=Registry/CN=harbor.local" \
-key ca.key \
-out ca.crt
openssl genrsa -out harbor.local.key 4096
openssl req -sha512 -new \
-subj "/C=DE/ST=STATE/L=LOCATION/O=ORGANIZATION/OU=Registry/CN=harbor.local" \
-key harbor.local.key \
-out harbor.local.csr
cat > v3.ext <<-EOF
authorityKeyIdentifier=keyid,issuer
basicConstraints=CA:FALSE
keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment
extendedKeyUsage = serverAuth
subjectAltName = @alt_names
[alt_names]
DNS.1=harbor.registry.internal
DNS.2=harbor.local
DNS.3=harbor
EOF
openssl x509 -req -sha512 -days 3650 \
-extfile v3.ext \
-CA ca.crt -CAkey ca.key -CAcreateserial \
-in harbor.local.csr \
-out harbor.local.crt
openssl x509 -inform PEM -in harbor.local.crt -out harbor.local.cert
cp harbor/harbor.yml.tmpl harbor/harbor.yml
edit:
hostname: harbor.local
…
certificate: /path/to/harbor.local.cert
private_key: /path/to/harbor.local.key
cd harbor && bash install.sh
open Browser and login with User admin and Password Harbor12345
Windows verify checksum
certutil -hashfile File.ext sha256
sha256sum File.ext
OPNSense Bridge
- Interfaces: Assignments » add igb[n]
- Interfaces: Other Types: Bridge » create Bridge
- Interfaces: [OPT(n)] » enable Device
- Interfaces: Other Types: Bridge » add member Device(s)
- Interfaces: Assignments » change LAN device to Bridge
- (add old LAN Interface to Bridge)
install Kubernetes on Debian 12 Bookworm
Sources:
https://kubernetes.io/docs/setup/
https://www.server-world.info/en/note?os=Debian_12&p=kubernetes&f=1
- prepare Hosts [all nodes]
- disable swap
edit /etc/fstab && swapoff -a - add cluster DNS to /etc/hosts
- e.g. onprem.cloud
echo ‘127.0.0.1 onprem.cloud’ >> /etc/hosts
- e.g. onprem.cloud
- disable swap
apt install -y curl gpg iptables
# edit /etc/sysctl.conf
# net.ipv4.ip_forward=1
# net.bridge.bridge-nf-call-iptables=1
# net.bridge.bridge-nf-call-ip6tables=1
sysctl -w net.ipv4.ip_forward=1
sysctl -w net.bridge.bridge-nf-call-iptables=1
sysctl -w net.bridge.bridge-nf-call-ip6tables=1
# add kernel modules
echo -e "overlay\nbr_netfilter" | tee -a /etc/modules
modprobe br_netfilter
export KUBECONFIG=/etc/kubernetes/admin.conf
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> /root/.bashrc
- add K8s Repo (https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#install-using-native-package-management) [all nodes]
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.33/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.33/deb/ /" | tee /etc/apt/sources.list.d/kubernetes.list
- add CRI-O Repo (https://cri-o.io/) [all nodes]
curl -fsSL https://download.opensuse.org/repositories/isv:/cri-o:/stable:/v1.32/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/cri-o-apt-keyring.gpg
echo "deb [signed-by=/etc/apt/keyrings/cri-o-apt-keyring.gpg] https://download.opensuse.org/repositories/isv:/cri-o:/stable:/v1.32/deb/ /" | tee /etc/apt/sources.list.d/cri-o.list
- install K8s (https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/) and Container Runtime CRI-O (https://cri-o.io/) [all nodes]
apt update
apt install -y cri-o kubelet kubeadm kubectl
systemctl start crio.service
systemctl enable crio.service
apt-mark hold cri-o kubelet kubeadm kubectl
install CNI Cillium (https://docs.cilium.io/en/stable/gettingstarted/k8s-install-default/) [all nodes]
# CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt)
CILIUM_CLI_VERSION=v0.18.3
curl -L --fail --remote-name-all https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-amd64.tar.gz{,.sha256sum}
sha256sum --check cilium-linux-amd64.tar.gz.sha256sum
tar xzvfC cilium-linux-amd64.tar.gz /usr/local/bin
rm cilium-linux-amd64.tar.gz{,.sha256sum}
- install Hubble Observability (https://docs.cilium.io/en/stable/observability/hubble/index.html) [all nodes]
# HUBBLE_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/hubble/master/stable.txt)
HUBBLE_VERSION=v1.17.2
curl -L --fail --remote-name-all https://github.com/cilium/hubble/releases/download/$HUBBLE_VERSION/hubble-linux-amd64.tar.gz{,.sha256sum}
sha256sum --check hubble-linux-amd64.tar.gz.sha256sum
tar xzvfC hubble-linux-amd64.tar.gz /usr/local/bin
rm hubble-linux-amd64.tar.gz{,.sha256sum}
- Create Cluster [control plane]
kubeadm config images pull
kubeadm init --control-plane-endpoint=onprem.cloud --v=5 --pod-network-cidr=10.244.0.0/16
# wait for node to get 'ready'
watch --color 'kubectl get nodes'
cilium install
cilium status
- Join another control plane [control plane]
kubeadm token create --print-join-command
kubeadm join --control-plane
cilium install
cilium status
- Join Nodes [worker nodes]
kubeadm token create --print-join-command
- reset K8s Node
kubeadm reset
- make control-plane also a worker node (optional – single Node Cluster)
kubectl taint nodes --all node-role.kubernetes.io/control-plane-
kubectl taint nodes --all node-role.kubernetes.io/master:NoSchedule- - install Helm (https://helm.sh/docs/intro/install/)
curl https://baltocdn.com/helm/signing.asc | gpg --dearmor | tee /usr/share/keyrings/helm.gpg > /dev/null
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/helm.gpg] https://baltocdn.com/helm/stable/debian/ all main" | tee /etc/apt/sources.list.d/helm-stable-debian.list
apt update
apt install helm
- install Longhorn (https://longhorn.io/) [all nodes]
helm repo add longhorn https://charts.longhorn.iohelm repo updatehelm install longhorn longhorn/longhorn --namespace longhorn-system --create-namespace --version 1.7.1kubectl -n longhorn-system get pod
- test Cluster
kubectl create namespace test
kubectl config set-context --current --namespace=test
kubectl create deployment whoami -n test --image=traefik/whoami:latest
#kubectl expose deployment whoami --name whoami-service --port 80 --target-port=30080 --type NodePort
kubectl create service nodeport whoami -n test --node-port=30080 --tcp=80:80
https://kubernetes.io/de/docs/reference/kubectl/cheatsheet/
https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard
helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/
helm upgrade --install kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard --create-namespace --namespace kubernetes-dashboard
kubectl -n kubernetes-dashboard port-forward svc/kubernetes-dashboard-kong-proxy 8443:443
kubectl create serviceaccount admin-user -n kubernetes-dashboard
kubectl create clusterrolebinding admin-user --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:admin-user
kubectl -n kubernetes-dashboard create token admin-user
https://github.com/kubernetes/dashboard/blob/master/docs/user/access-control/creating-sample-user.md
use physical Port in KVM
https://wiki.debian.org/BridgeNetworkConnections
apt install bridge-utils
ip link show
brctl addbr br42
brctl addif br42 enp5s2
ip link set enp5s2 up
ip link set enp5s2 master br42
ip link set br42 up
ip link show
cat /etc/network/interfaces
iface eno2 inet static
address 192.168.168.123
broadcast 192.168.168.255
netmask 255.255.255.0
gateway 192.168.168.254
iface enp0s25 inet manual
auto br42
iface br42 inet dhcp
bridge_ports enp0s25
bridge_stp off # disable Spanning Tree Protocol
bridge_waitport 0 # no delay before a port becomes available
bridge_fd 0 # no forwarding delay
cat bridged-network.xml
<network>
<name>bridged-network</name>
<forward mode="bridge" />
<bridge name="br42" />
</network>
virsh net-define bridged-network.xml
virsh net-start bridged-network
virsh net-autostart bridged-network
systemctl restart networking.service
virsh net-list –all
virsh net-autostart default
tinygrad on debian 12
apt install git clang python3-venv
git clone https://github.com/tinygrad/tinygrad.git
cd tinygrad
python3 -m venv .venv
source .venv/bin/activate
python3 -m pip install -e . python3 examples/stable_diffusion.py
OpenBSD in KVM
install Kubernetes on Debian 11 Bullseye
- prepare Hosts [all nodes]
- disable swap
swapoff -a
- disable swap
- add cluster DNS to /etc/hosts
- e.g. onprem.cloud
echo “127.0.0.1 onprem.cloud” >> /etc/hosts
- e.g. onprem.cloud
apt install -y curl gpg
# edit /etc/sysctl.conf
# net.ipv4.ip_forward=1
# net.bridge.bridge-nf-call-iptables=1
sysctl -w net.ipv4.ip_forward=1
sysctl -w net.bridge.bridge-nf-call-iptables=1
# edit /etc/modules
# br_netfilter
modprobe br_netfilter
export KUBECONFIG=/etc/kubernetes/admin.conf
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> /root/.bashrc
- install Container Runtime CRI-O (https://cri-o.io/) [all nodes]
export OS=Debian_11 export VERSION=1.25 echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/ /" > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list echo "deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/$VERSION/$OS/ /" > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable:cri-o:$VERSION.list curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/$VERSION/$OS/Release.key | apt-key add - curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/Release.key | apt-key add - apt update apt install -y cri-o cri-o-runc systemctl enable crio.service systemctl start crio.service
- install K8s (https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/) [all nodes]
apt install -y apt-transport-https ca-certificates curl mkdir /etc/apt/keyrings curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | gpg --dearmor | dd status=none of=/etc/apt/keyrings/kubernetes-archive-keyring.gpg echo "deb [signed-by=/etc/apt/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | tee /etc/apt/sources.list.d/kubernetes.list apt update apt install -y kubelet kubeadm kubectl apt-mark hold kubelet kubeadm kubectl
- install CNI Cillium (https://docs.cilium.io/en/stable/gettingstarted/k8s-install-default/) [all nodes]
CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/master/stable.txt)
CLI_ARCH=amd64
if [ "$(uname -m)" = "aarch64" ]; then CLI_ARCH=arm64; fi
curl -L --fail --remote-name-all https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
sha256sum --check cilium-linux-${CLI_ARCH}.tar.gz.sha256sum
tar xzvfC cilium-linux-${CLI_ARCH}.tar.gz /usr/local/bin
rm cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
Create Cluster [control plane]
kubeadm init --control-plane-endpoint=onprem.cloud --v=5 --pod-network-cidr=10.244.0.0/16 # wait for node to get 'ready' watch --color 'kubectl get nodes' cilium install cilium status
- Join Nodes [worker nodes]
kubeadm token create --print-join-command
- reset K8s Node
kubeadm reset
- expose K8s Port
iptables -t nat -A PREROUTING -d 10.10.10.172/24 -i eno1 -p tcp --dport 6443 -j DNAT --to-destination 192.168.122.234:6443
iptables -I FORWARD -p tcp -d 192.168.122.234/24 --dport 6443 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT
- test Cluster
kubectl create namespace test kubectl config set-context --current --namespace=test kubectl create deployment whoami --image=traefik/whoami:latest #kubectl expose deployment whoami --name whoami-service --port 80 --target-port=30080 --type NodePort kubectl create service nodeport whoami --node-port=30080 --tcp=80:80