Howto kubernetes archlinux: Difference between revisions

From Vidalinux Wiki
Jump to navigation Jump to search
Line 217: Line 217:
show all pods running in all namespaces:
show all pods running in all namespaces:
  kubectl get pods -A
  kubectl get pods -A
copy certifcates to seafile data ssl directory:
copy geniune ssl certifcates to seafile ssl directory:
  cp domain.crt /root/kubernetes/seafile/ssl/seafile.domain.com.key
  cp domain.crt /root/kubernetes/seafile/ssl/seafile.domain.com.key
  cp domain.key /root/kubernetes/seafile/ssl/seafile.domain.com.crt
  cp domain.key /root/kubernetes/seafile/ssl/seafile.domain.com.crt

Revision as of 16:28, 16 June 2023

configure os

download latest archlinux cloud image:

wget https://linuximages.de/openstack/arch/arch-openstack-LATEST-image-bootstrap.qcow2

make sure we have libguestfs installed:

pacman -S libguestfs guestfs-tools

resize image:

cp arch-openstack-LATEST-image-bootstrap.qcow2 arch-openstack-LATEST-image-bootstrap_100G.qcow2
qemu-img resize arch-openstack-LATEST-image-bootstrap_100G.qcow2 +99G

expand image:

virt-resize --expand /dev/sda1 arch-openstack-LATEST-image-bootstrap.qcow2 arch-openstack-LATEST-image-bootstrap_100G.qcow2

change password:

virt-sysprep -a arch-openstack-LATEST-image-bootstrap_100G.qcow2 -q --root-password password:vidalinux

uninstall cloud-init:

virt-sysprep -a arch-openstack-LATEST-image-bootstrap_100G.qcow2 --run-command "pacman -R cloud-init --noconfirm"

on centos copy the image to this directory:

cd /var/lib/libvirt/images/

configure hostname:

hostnamectl set-hostname archlinux.ovoxcloud.com

configure timezone:

timedatectl set-timezone America/Puerto_Rico

configure /etc/hosts:

cat > /etc/hosts << EOF
127.0.0.1 localhost
192.168.24.10 archlinux.ovoxcloud.com
EOF

network configuration:

ip addr add 192.168.24.10/24 dev eth0
ip route add default via 192.168.24.254
echo "nameserver 4.2.2.1" > /etc/resolv.conf

stop and disable systemd-resolved:

systemctl disable systemd-resolved && systemctl stop systemd-resolved

edit /etc/ssh/sshd_config:

sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config
sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/g' /etc/ssh/sshd_config

restart sshd service:

systemctl restart sshd

update entire os:

pacman -Syuu --noconfirm

install packages:

pacman -Sy --noconfirm curl vim screen nano net-tools bind-tools containerd networkmanager ebtables ethtool wget unzip socat cni-plugins conntrack-tools cri-o

add the following registries to /etc/containers/registries.conf

cat >> /etc/containers/registries.conf << "EOF"
[registries.search]
registries = ['docker.io']
EOF

add the following config to crio:

cat > 00-plugin-dir.conf << EOF 
[crio.network]
plugin_dirs = [
  "/opt/cni/bin/",
]
EOF

start and enable cri-o:

systemctl enable crio && systemctl start crio

start and enable containerd:

systemctl enable containerd && systemctl start containerd

configure networkmanager:

systemctl start NetworkManager && systemctl enable NetworkManager
nmcli con del eth0
nmcli con del Wired\ connection\ 1
nmcli con add con-name eth0 ipv4.method manual type ethernet ifname eth0 ipv4.addresses 192.168.24.10/24 ipv4.gateway 192.168.24.254 ipv4.dns 4.2.2.1,4.2.2.2 autoconnect yes

load kernel module:

modprobe br_netfilter

add the following file to load this module at boot:

cat > /etc/modprobe.d/netfilter.conf << EOF
br_netfilter
EOF

ensure net.bridge.bridge-nf-call-iptables is set to 1 in your sysctl config:

cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
sysctl --system

install chrony:

pacman -S chrony --noconfirm

enable and start chrony:

systemctl enable chronyd && systemctl start chronyd

install kubernetes

installing CNI:

CNI_VERSION="v1.3.0"
mkdir -p /opt/cni/bin
curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-linux-amd64-${CNI_VERSION}.tgz" | tar -C /opt/cni/bin -xz

installing CRI:

CRICTL_VERSION="v1.27.0"
mkdir -p /opt/bin
curl -L "https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-amd64.tar.gz" | tar -C /opt/bin -xz

installing kubeadm, kubelet, kubectl

RELEASE="$(curl -sSL https://dl.k8s.io/release/stable.txt)"
mkdir -p /opt/bin
cd /opt/bin
curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/amd64/{kubeadm,kubelet,kubectl}
chmod +x {kubeadm,kubelet,kubectl}
curl -sSL "https://raw.githubusercontent.com/kubernetes/release/master/cmd/kubepkg/templates/latest/deb/kubelet/lib/systemd/system/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service
mkdir -p /etc/systemd/system/kubelet.service.d
curl -sSL "https://raw.githubusercontent.com/kubernetes/release/master/cmd/kubepkg/templates/latest/deb/kubeadm/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf

create symbolic links for executables:

for u in $(ls|grep -v bins > bins && sed 'H;1h;$!d;x;s/\n/ /g' bins); do ln -s /opt/bin/$u /usr/local/bin/$u &&  chmod +x /usr/local/bin/$u; done

start kube cluster:

kubeadm init --pod-network-cidr 10.234.0.0/16 --apiserver-advertise-address=0.0.0.0 --cri-socket /var/run/crio/crio.sock --node-name archlinux.ovoxcloud.com

to start using your cluster, you need to run the following as a regular user:

mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config

configure kubernetes

untaint the master so you can run pods

kubectl taint nodes --all node.kubernetes.io/not-ready:NoSchedule-
kubectl taint nodes --all node-role.kubernetes.io/control-plane:NoSchedule-

watch kubelet lot for errors:

journalctl -u kubelet -f

install calico operator:

kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/tigera-operator.yaml

download calico custom-resources:

wget https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/custom-resources.yaml

edit subnet on custom-resources yaml:

sed -i 's|192.168.0.0/16|10.234.0.0/16|g' custom-resources.yaml

create calico config:

kubectl create -f custom-resources.yaml

verify everthing is ok:

kubectl get nodes

if everything is ok you got this message:

NAME                      STATUS   ROLES    AGE     VERSION
archlinux.ovoxcloud.com   Ready    master   3m45s   v1.27.2

set up nginx-ingress:

kubectl apply -f https://raw.githubusercontent.com/vidalinux/kubernetes/main/ingress-nginx/deploy-nginx.yaml

deploy rook-ceph storage

install devel package:

pacman -Sy base-devel --noconfirm

create normal user:

useradd -m linux

add user linux to sudoers:

cat > /etc/sudoers.d/10-linux << EOF
archy ALL=(ALL:ALL) ALL
EOF

installing yay as normal user:

su - linux
git clone https://aur.archlinux.org/yay.git
cd yay
makepkg -si

install ceph-bin and ceph-libs-bin:

yay -S ceph-libs-bin ceph-bin

clone rook git repo:

git clone --single-branch --branch v1.11.7 https://github.com/rook/rook.git

deploy rook-ceph operator:

cd rook/deploy/examples
kubectl create -f crds.yaml -f common.yaml -f operator.yaml

edit the following settings on cluster.yaml:

  mon:
    count: 1
    allowMultiplePerNode: true
  mgr:
    count: 1
    allowMultiplePerNode: true 
  storage:
    config:
      osdsPerDevice: "1" 
    nodes:
      - name: "archlinux.ovoxcloud.com"
        devices: # specific devices to use for storage can be specified for each node
          - name: "vdb"

deploy cluster:

kubectl create -f cluster.yaml

verify installation status:

kubectl --namespace rook-ceph get cephclusters.ceph.rook.io rook-ceph

deploy application

clone vidalinux kubernetes repo:

git clone https://github.com/vidalinux/kubernetes.git

edit settings on wordpress yaml:

     # wordpress container
       - name: WORDPRESS_DB_NAME
         value: "wordpressdb"
       - name: WORDPRESS_DB_USER
         value: "wordpress"
       - name: WORDPRESS_DB_PASSWORD
         value: "wordpress"
       - name: WORDPRESS_DB_HOST
         value: "wpvidalinux-db"
       - name: TZ
         value: "America/Puerto_Rico"
     # mariadb container
       - name: MYSQL_DATABASE
         value: wordpressdb
       - name: MYSQL_ROOT_PASSWORD
         value: root
       - name: MYSQL_USER
         value: wordpress
       - name: MYSQL_PASSWORD
         value: wordpress
       - name: TZ
         value: "America/Puerto_Rico"

get geniune ssl cert and key:

cat domain.crt |base64 -w 0
cat domain.key |base64 -w 0

use crt and key to create secret yaml:

echo > $PWD/secret.yaml << EOF
apiVersion: v1
kind: Secret
metadata:
  name: ovoxcloud.com-tls
  namespace: wpvidalinux
data:
  tls.crt: $(cat ovoxcloud.crt |base64 -w 0)
  tls.key: $(cat ovoxcloud.key |base64 -w 0)
type: kubernetes.io/tls
EOF

deploy secret yaml:

kubectl create -f secret.yaml

deploy wordpress app:

kubectl create -f wordpress/wordpress.yml

change name space:

kubectl config set-context --current --namespace=wpvidalinux-app

display pods running in current namespace:

kubectl get pods

show all pods running in all namespaces:

kubectl get pods -A

copy geniune ssl certifcates to seafile ssl directory:

cp domain.crt /root/kubernetes/seafile/ssl/seafile.domain.com.key
cp domain.key /root/kubernetes/seafile/ssl/seafile.domain.com.crt

edit configs on seafile yaml:

      # mariadb container
       - name: MYSQL_LOG_CONSOLE
         value: "true"
       - name: MYSQL_ROOT_PASSWORD
         value: "livinglavidalinux"
       - name: TZ
         value: "America/Puerto_Rico"
      # seafile container
       - name: DB_HOST
         value: "seafile-db"
       - name: DB_ROOT_PASSWD
         value: "livinglavidalinux"
       - name: SEAFILE_ADMIN_EMAIL
         value: "junior@ovoxcloud.com"
       - name: SEAFILE_ADMIN_PASSWORD
         value: "livinglavidalinux"
       - name: SEAFILE_SERVER_HOSTNAME
         value: "seafile.ovoxcloud.com"
       - name: SEAFILE_SERVER_LETSENCRYPT
         value: "true"
       - name: TZ
         value: "America/Puerto_Rico"

execute copy-certs.sh:

bash /root/kubernetes/seafile/copy-certs.sh

deploy seafile application using yaml:

kubectl create -f seafile.yaml

show container log:

kubectl logs seafile-srv-697c787f5c-px7cw

renew certificates

verify certificates:

kubeadm certs check-expiration

renew all certificates:

kubeadm certs renew all

replace the config:

cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

references

[latest version]

[cni latest version]

[cri-tools latest version]

[nginx supported versions]