Howto kubernetes archlinux: Difference between revisions
Mandulete1 (talk | contribs) |
Mandulete1 (talk | contribs) |
||
(143 intermediate revisions by the same user not shown) | |||
Line 1: | Line 1: | ||
= configure os = | |||
download latest archlinux cloud image: | download latest archlinux cloud image: | ||
wget https://linuximages.de/openstack/arch/arch-openstack-LATEST-image-bootstrap.qcow2 | wget https://linuximages.de/openstack/arch/arch-openstack-LATEST-image-bootstrap.qcow2 | ||
make sure we have libguestfs installed: | make sure we have libguestfs installed: | ||
pacman -S libguestfs | pacman -S libguestfs guestfs-tools | ||
resize image: | resize image: | ||
cp arch-openstack-LATEST-image-bootstrap.qcow2 arch-openstack-LATEST-image-bootstrap_100G.qcow2 | cp arch-openstack-LATEST-image-bootstrap.qcow2 arch-openstack-LATEST-image-bootstrap_100G.qcow2 | ||
Line 14: | Line 15: | ||
on centos copy the image to this directory: | on centos copy the image to this directory: | ||
cd /var/lib/libvirt/images/ | cd /var/lib/libvirt/images/ | ||
mount image: | |||
qemu-nbd -c /dev/nbd0 arch-openstack-LATEST-image-bootstrap_100G.qcow2 | |||
kpartx -a /dev/nbd0 | |||
mkdir /mnt/cloudimg | |||
mount /dev/mapper/nbd0p1 /mnt/cloudimg | |||
mount -o bind /dev /mnt/cloudimg/dev | |||
mount -o bind /proc /mnt/cloudimg/proc | |||
enter enviroment with arch-chroot: | |||
arch-chroot /mnt/cloudimg | |||
initialize the pacman keyring and populate signing keys: | |||
pacman-key --init | |||
pacman-key --populate archlinux | |||
configure /etc/hosts: | |||
cat > /etc/hosts << EOF | |||
127.0.0.1 localhost | |||
192.168.24.10 archlinux.ovoxcloud.com | |||
EOF | |||
edit /etc/ssh/sshd_config: | |||
sed -i 's/#PasswordAuthentication yes/PasswordAuthentication yes/g' /etc/ssh/sshd_config | |||
sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/g' /etc/ssh/sshd_config | |||
enable sshd service: | |||
systemctl enable sshd | |||
ensure net.bridge.bridge-nf-call-iptables is set to 1 in your sysctl config: | |||
cat <<EOF > /etc/sysctl.d/k8s.conf | |||
net.bridge.bridge-nf-call-ip6tables = 1 | |||
net.bridge.bridge-nf-call-iptables = 1 | |||
net.ipv4.ip_forward = 1 | |||
EOF | |||
configure networkmanager: | |||
cat > /root/net << EOF | |||
systemctl start NetworkManager && systemctl enable NetworkManager | |||
nmcli con del eth0 | |||
nmcli con del Wired\ connection\ 1 | |||
nmcli con add con-name eth0 ipv4.method manual type ethernet ifname eth0 ipv4.addresses 192.168.24.10/24 ipv4.gateway 192.168.24.254 ipv4.dns 4.2.2.1,4.2.2.2 autoconnect yes | |||
EOF | |||
umount qcow2 image: | |||
umount /mnt/cloudimg/proc | |||
umount /mnt/cloudimg/dev | |||
umount /mnt/cloudimg | |||
nbd-client -d /dev/nbd0 | |||
dmsetup remove /dev/mapper/nbd0p1 | |||
configure hostname: | configure hostname: | ||
hostnamectl set-hostname | hostnamectl set-hostname archlinux.ovoxcloud.com | ||
configure timezone: | |||
timedatectl set-timezone America/Puerto_Rico | |||
network configuration: | network configuration: | ||
ip addr add 192.168. | ip addr add 192.168.24.10/24 dev eth0 | ||
ip route add default via 192.168. | ip route add default via 192.168.24.254 | ||
echo "4.2.2.1" > /etc/resolv.conf | rm /etc/resolv.conf | ||
echo "nameserver 4.2.2.1" > /etc/resolv.conf | |||
echo "nameserver 4.2.2.2" >> /etc/resolv.conf | |||
stop and disable systemd-resolved: | |||
systemctl disable systemd-resolved && systemctl stop systemd-resolved | |||
update entire os: | |||
systemctl | |||
pacman -Syuu --noconfirm | pacman -Syuu --noconfirm | ||
install packages: | install packages: | ||
pacman -Sy -- | pacman -Sy curl vim screen nano net-tools bind-tools containerd networkmanager ebtables ethtool wget unzip socat cni-plugins conntrack-tools cri-o parted gptfdisk lvm2 git | ||
install iptables: | |||
pacman -S iptables | |||
add the following registries to /etc/containers/registries.conf | |||
cat >> /etc/containers/registries.conf << "EOF" | |||
[registries.search] | |||
registries = ['docker.io'] | |||
EOF | |||
add the following config to crio: | |||
cat > /etc/crio/crio.conf.d/00-plugin-dir.conf << EOF | |||
[crio.network] | |||
plugin_dirs = [ | |||
"/opt/cni/bin/", | |||
] | |||
EOF | |||
enable cri-o: | |||
systemctl enable crio | |||
install chrony: | |||
pacman -S chrony --noconfirm | |||
enable and start chrony: | |||
systemctl enable chronyd | |||
reboot machine: | |||
reboot | |||
= install kubernetes = | |||
installing CNI: | installing CNI: | ||
CNI_VERSION= | CNI_VERSION="v1.3.0" | ||
mkdir -p /opt/cni/bin | mkdir -p /opt/cni/bin | ||
curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-linux-amd64-${CNI_VERSION}.tgz" | tar -C /opt/cni/bin -xz | curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-linux-amd64-${CNI_VERSION}.tgz" | tar -C /opt/cni/bin -xz | ||
installing CRI: | installing CRI: | ||
CRICTL_VERSION="v1. | CRICTL_VERSION="v1.27.0" | ||
mkdir -p /opt/bin | mkdir -p /opt/bin | ||
curl -L "https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-amd64.tar.gz" | tar -C /opt/bin -xz | curl -L "https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-amd64.tar.gz" | tar -C /opt/bin -xz | ||
Line 51: | Line 110: | ||
curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/amd64/{kubeadm,kubelet,kubectl} | curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/amd64/{kubeadm,kubelet,kubectl} | ||
chmod +x {kubeadm,kubelet,kubectl} | chmod +x {kubeadm,kubelet,kubectl} | ||
curl -sSL "https://raw.githubusercontent.com/kubernetes/release/master/cmd/kubepkg/templates/latest/deb/kubelet/lib/systemd/system/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service | |||
curl -sSL "https://raw.githubusercontent.com/kubernetes/release/ | |||
mkdir -p /etc/systemd/system/kubelet.service.d | mkdir -p /etc/systemd/system/kubelet.service.d | ||
curl -sSL "https://raw.githubusercontent.com/kubernetes/release/ | curl -sSL "https://raw.githubusercontent.com/kubernetes/release/master/cmd/kubepkg/templates/latest/deb/kubeadm/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf | ||
create symbolic links for executables: | create symbolic links for executables: | ||
for u in $(ls|grep -v bins > bins && sed 'H;1h;$!d;x;s/\n/ /g' bins); do ln -s /opt/bin/$u /usr/local/bin/$u && chmod +x /usr/local/bin/$u; done | for u in $(ls|grep -v bins > bins && sed 'H;1h;$!d;x;s/\n/ /g' bins); do ln -s /opt/bin/$u /usr/local/bin/$u && chmod +x /usr/local/bin/$u; done | ||
start kube cluster: | start kube cluster: | ||
kubeadm init --pod-network-cidr 10.234.0.0/16 --apiserver-advertise-address=0.0.0.0 --node-name | kubeadm init --pod-network-cidr 10.234.0.0/16 --apiserver-advertise-address=0.0.0.0 --cri-socket /var/run/crio/crio.sock --node-name archlinux.ovoxcloud.com | ||
to start using your cluster, you need to run the following as a regular user: | to start using your cluster, you need to run the following as a regular user: | ||
mkdir -p $HOME/.kube | mkdir -p $HOME/.kube | ||
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config | cp -i /etc/kubernetes/admin.conf $HOME/.kube/config | ||
chown $(id -u):$(id -g) $HOME/.kube/config | chown $(id -u):$(id -g) $HOME/.kube/config | ||
enable kubelet service: | |||
systemctl enable kubelet | |||
= configure kubernetes = | |||
untaint the master so you can run pods | untaint the master so you can run pods | ||
kubectl taint nodes --all node-role.kubernetes.io/ | kubectl taint nodes --all node.kubernetes.io/not-ready:NoSchedule- | ||
kubectl taint nodes --all node-role.kubernetes.io/control-plane:NoSchedule- | |||
watch kubelet lot for errors: | |||
journalctl -u kubelet -f | |||
install calico operator: | |||
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/tigera-operator.yaml | |||
download calico custom-resources: | |||
wget https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/custom-resources.yaml | |||
edit subnet on custom-resources yaml: | |||
sed -i 's|192.168.0.0/16|10.234.0.0/16|g' custom-resources.yaml | |||
create calico config: | |||
kubectl create -f custom-resources.yaml | |||
verify everthing is ok: | verify everthing is ok: | ||
kubectl get nodes | kubectl get nodes | ||
if everything is ok you got this message: | if everything is ok you got this message: | ||
NAME STATUS ROLES | NAME STATUS ROLES AGE VERSION | ||
archlinux.ovoxcloud.com Ready control-plane 112s v1.27.3 | |||
set up nginx-ingress: | set up nginx-ingress: | ||
kubectl apply -f | kubectl apply -f https://raw.githubusercontent.com/vidalinux/kubernetes/main/ingress-nginx/deploy-nginx.yaml | ||
kubectl -n | |||
kubectl | = deploy rook-ceph storage = | ||
use | create qcow2 images for rook-ceph: | ||
cat | qemu-img create -f qcow2 /static/rook/rook_disk1_300G.qcow2 300G | ||
qemu-img create -f qcow2 /static/rook/rook_disk2_300G.qcow2 300G | |||
qemu-img create -f qcow2 /static/rook/rook_disk3_300G.qcow2 300G | |||
edit virtual machine: | |||
virsh edit archlinux | |||
add the following: | |||
<disk type='file' device='disk'> | |||
<driver name='qemu' type='qcow2'/> | |||
<source file='/static/rook/rook_disk1_300G.qcow2'/> | |||
<target dev='vdb' bus='virtio'/> | |||
</disk> | |||
<disk type='file' device='disk'> | |||
<driver name='qemu' type='qcow2'/> | |||
<source file='/static/rook/rook_disk2_300G.qcow2'/> | |||
<target dev='vdc' bus='virtio'/> | |||
</disk> | |||
<disk type='file' device='disk'> | |||
<driver name='qemu' type='qcow2'/> | |||
<source file='/static/rook/rook_disk3_300G.qcow2'/> | |||
<target dev='vdd' bus='virtio'/> | |||
</disk> | |||
install devel package: | |||
pacman -Sy base-devel --noconfirm | |||
create normal user: | |||
useradd -m linux | |||
passwd linux | |||
add user linux to sudoers: | |||
cat >> /etc/sudoers << EOF | |||
linux ALL=(ALL:ALL) ALL | |||
EOF | |||
installing yay as normal user: | |||
su - linux | |||
git clone https://aur.archlinux.org/yay.git | |||
cd yay | |||
makepkg -si | |||
install ceph-bin and ceph-libs-bin: | |||
yay -S ceph-libs-bin ceph-bin | |||
load rbd kernel module: | |||
depmod -a | |||
modprobe rbd | |||
clone rook git repo: | |||
git clone --single-branch --branch v1.11.7 https://github.com/rook/rook.git | |||
deploy rook-ceph operator: | |||
cd rook/deploy/examples | |||
kubectl create -f crds.yaml -f common.yaml -f operator.yaml | |||
override rook config: | |||
cat > override_poolsize.yaml << EOF | |||
kind: ConfigMap | |||
apiVersion: v1 | |||
metadata: | |||
name: rook-config-override | |||
namespace: rook-ceph | |||
data: | |||
config: | | |||
[global] | |||
osd_pool_default_size = 1 | |||
--- | |||
EOF | |||
deploy config: | |||
kubectl create -f override_poolsize.yaml | |||
edit the following settings on cluster.yaml: | |||
mon: | |||
count: 1 | |||
allowMultiplePerNode: true | |||
mgr: | |||
count: 1 | |||
allowMultiplePerNode: true | |||
storage: | |||
config: | |||
osdsPerDevice: "1" | |||
nodes: | |||
- name: "archlinux.ovoxcloud.com" | |||
devices: # specific devices to use for storage can be specified for each node | |||
- name: "vdb" | |||
- name: "vdc" | |||
- name: "vdd" | |||
deploy cluster: | |||
kubectl create -f cluster.yaml | |||
verify installation status: | |||
kubectl --namespace rook-ceph get cephclusters.ceph.rook.io rook-ceph | |||
deploy toolbox: | |||
kubectl create -f /root/rook/deploy/examples/toolbox.yaml | |||
copy ceph.conf and keyring from container to host: | |||
kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- cat /etc/ceph/ceph.conf | |||
kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- cat /etc/ceph/keyring | |||
verify ceph cluster status: | |||
ceph -s | |||
deploy storageclass: | |||
kubectl create -f /root/rook/deploy/examples/csi/rbd/storageclass-test.yaml | |||
edit cephfs storageclass yaml: | |||
nano /root/rook/deploy/examples/csi/cephfs/storageclass.yaml | |||
change the following settings: | |||
fsName: test-fs | |||
pool: test-fs-data0 | |||
create ceph-filesystem: | |||
cat > /root/testfs-ceph-filesystem.yaml << EOF | |||
apiVersion: ceph.rook.io/v1 | |||
kind: CephFilesystem | |||
metadata: | |||
name: test-fs | |||
namespace: rook-ceph | |||
spec: | |||
metadataPool: | |||
replicated: | |||
size: 1 | |||
requireSafeReplicaSize: false | |||
dataPools: | |||
- failureDomain: osd | |||
replicated: | |||
size: 1 | |||
requireSafeReplicaSize: false | |||
compressionMode: none | |||
preservePoolsOnDelete: false | |||
metadataServer: | |||
activeCount: 1 | |||
activeStandby: false | |||
EOF | |||
deploy ceph filesystem: | |||
kubectl create -f /root/testfs-ceph-filesystem.yaml | |||
deploy cephfs storageclass: | |||
kubectl create -f /root/rook/deploy/examples/csi/cephfs/storageclass.yaml | |||
test ceph-block storage: | |||
kubectl create -f https://raw.githubusercontent.com/vidalinux/kubernetes/main/rook/ceph/ceph-block-pvc.yaml | |||
verify ceph-block storage status: | |||
kubectl -n default get pvc | |||
test cephfs storage: | |||
kubectl create -f https://raw.githubusercontent.com/vidalinux/kubernetes/main/rook/ceph/ceph-fs-pvc.yaml | |||
verify cephfs storage status: | |||
kubectl -n default get pvc | |||
mount cephfs as local filesystem: | |||
cat > /usr/local/bin/mount-cephfs << "EOF" | |||
#!/bin/bash | |||
MON_HOST=$(kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- grep mon_host /etc/ceph/ceph.conf | cut -d " " -f 3 | tr -d '\r') | |||
CEPH_SECRET=$(kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- grep key /etc/ceph/keyring | cut -d " " -f 3 | tr -d '\r') | |||
MOUNT_DIR=/mnt/ceph-test | |||
if [ ! -d ${MOUNT_DIR} ]; | |||
then | |||
mkdir -p ${MOUNT_DIR} | |||
fi | |||
mount -t ceph -o mds_namespace=test-fs,name=admin,secret=${CEPH_SECRET} ${MON_HOST}:/ ${MOUNT_DIR} | |||
EOF | |||
fix mount-cephfs script permissions: | |||
chmod +x /usr/local/bin/mount-cephfs | |||
run script | |||
mount-cephfs | |||
confirm cephfs is mounted: | |||
mount|grep /mnt/cephfs | |||
if you want to remove rook-ceph deployment: | |||
cd /root/rook/deploy/examples | |||
kubectl delete -f cluster.yaml | |||
kubectl delete -f crds.yaml -f common.yaml -f operator.yaml | |||
rm -rf /var/lib/rook/* | |||
= deploy wordpress app = | |||
clone vidalinux kubernetes repo: | |||
git clone https://github.com/vidalinux/kubernetes.git | |||
edit settings on wordpress yaml: | |||
# wordpress container | |||
- name: WORDPRESS_DB_NAME | |||
value: "wordpressdb" | |||
- name: WORDPRESS_DB_USER | |||
value: "wordpress" | |||
- name: WORDPRESS_DB_PASSWORD | |||
value: "wordpress" | |||
- name: WORDPRESS_DB_HOST | |||
value: "wpvidalinux-db" | |||
- name: TZ | |||
value: "America/Puerto_Rico" | |||
# mariadb container | |||
- name: MYSQL_DATABASE | |||
value: wordpressdb | |||
- name: MYSQL_ROOT_PASSWORD | |||
value: root | |||
- name: MYSQL_USER | |||
value: wordpress | |||
- name: MYSQL_PASSWORD | |||
value: wordpress | |||
- name: TZ | |||
value: "America/Puerto_Rico" | |||
get geniune ssl cert and key: | |||
cat domain.com.crt |base64 -w 0 | |||
cat domain.com.key |base64 -w 0 | |||
use crt and key to create secret yaml: | |||
cat > /root/kubernetes/wordpress/secret.yaml << EOF | |||
apiVersion: v1 | |||
kind: Secret | |||
metadata: | |||
name: ovoxcloud.com-tls | |||
namespace: wpvidalinux | |||
data: | |||
tls.crt: $(cat domain.com.crt |base64 -w 0) | |||
tls.key: $(cat domain.com.key |base64 -w 0) | |||
type: kubernetes.io/tls | |||
EOF | |||
deploy wordpress app: | |||
kubectl create -f /root/kubernetes/wordpress/wordpress.yml | |||
deploy secret yaml: | |||
kubectl create -f /root/kubernetes/wordpress/secret.yaml | |||
change name space: | change name space: | ||
kubectl config set-context --current --namespace= | kubectl config set-context --current --namespace=wpvidalinux-app | ||
display pods running in current namespace: | display pods running in current namespace: | ||
kubectl get pods | kubectl get pods | ||
show all pods running in all namespaces: | show all pods running in all namespaces: | ||
kubectl get pods - | kubectl get pods -A | ||
copy certifcates to seafile | access application web interface: | ||
cp | https://portal.ovoxcloud.com | ||
cp | |||
= deploy seafile app = | |||
copy geniune ssl certifcates to seafile ssl directory: | |||
cp domain.crt /root/kubernetes/seafile/ssl/seafile.domain.com.key | |||
cp domain.key /root/kubernetes/seafile/ssl/seafile.domain.com.crt | |||
edit configs on seafile yaml: | |||
# mariadb container | |||
- name: MYSQL_LOG_CONSOLE | |||
value: "true" | |||
- name: MYSQL_ROOT_PASSWORD | |||
value: "livinglavidalinux" | |||
- name: TZ | |||
value: "America/Puerto_Rico" | |||
# seafile container | |||
- name: DB_HOST | |||
value: "seafile-db" | |||
- name: DB_ROOT_PASSWD | |||
value: "livinglavidalinux" | |||
- name: SEAFILE_ADMIN_EMAIL | |||
value: "junior@ovoxcloud.com" | |||
- name: SEAFILE_ADMIN_PASSWORD | |||
value: "livinglavidalinux" | |||
- name: SEAFILE_SERVER_HOSTNAME | |||
value: "seafile.ovoxcloud.com" | |||
- name: SEAFILE_SERVER_LETSENCRYPT | |||
value: "true" | |||
- name: TZ | |||
value: "America/Puerto_Rico" | |||
execute copy_certs.sh: | |||
cd /root/kubernetes/seafile | |||
bash copy_certs.sh | |||
deploy seafile application using yaml: | |||
kubectl create -f /root/kubernetes/seafile/seafile.yml | |||
show container log: | show container log: | ||
kubectl logs seafile-srv-697c787f5c-px7cw | kubectl logs seafile-srv-697c787f5c-px7cw | ||
access application web interface: | |||
https://seafile.ovoxcloud.com | |||
download seafile syncing client for your system: | |||
https://www.seafile.com/en/download | |||
= deploy awx ansible app = | |||
install the latest version of kustomize: | |||
cd /usr/local/bin/ | |||
curl -s "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" | bash | |||
deploy awx tower operator: | |||
cd /root/kubernetes/awx | |||
kubectl apply -k . | |||
look at the operator pod logs: | |||
kubectl logs -f deployments/awx-operator-controller-manager -c awx-manager | |||
use crt and key to create secret yaml: | |||
cat > /root/kubernetes/awx/secret.yaml << EOF | |||
apiVersion: v1 | |||
kind: Secret | |||
metadata: | |||
name: ovoxcloud.com-tls | |||
namespace: awx | |||
data: | |||
tls.crt: $(cat domain.com.crt |base64 -w 0) | |||
tls.key: $(cat domain.com.key |base64 -w 0) | |||
type: kubernetes.io/tls | |||
EOF | |||
deploy secret yaml: | |||
kubectl create -f /root/kubernetes/awx/secret.yaml | |||
deploy ingress yaml: | |||
kubectl create -f /root/kubernetes/awx/ingress.yaml | |||
get user admin password for web access: | |||
kubectl -n awx get secret awx-tower-admin-password -o jsonpath="{.data.password}" -n awx | base64 --decode ; echo | |||
access web interface: | |||
https://awx.ovoxcloud.com | |||
if you want to uninstall awx: | |||
kustomize build '/root/kubernetes/awx' | kubectl delete -f - | |||
= helm = | |||
install helm: | |||
curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash | |||
= renew certificates = | = renew certificates = | ||
verify certificates: | verify certificates: | ||
kubeadm | kubeadm certs check-expiration | ||
renew all certificates: | renew all certificates: | ||
kubeadm | kubeadm certs renew all | ||
replace the config: | replace the config: | ||
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config | cp -i /etc/kubernetes/admin.conf $HOME/.kube/config | ||
= references = | |||
[latest version] | |||
* https://github.com/kubernetes/release | |||
[cni latest version] | |||
* https://github.com/containernetworking/plugins/releases | |||
[cri-tools latest version] | |||
* https://github.com/kubernetes-sigs/cri-tools | |||
[nginx supported versions] | |||
* https://github.com/kubernetes/ingress-nginx#supported-versions-table | |||
[single-node ceph cluster kubernetes] | |||
* https://www.rusinov.ie/en/posts/2020/setting-up-single-node-ceph-cluster-for-kubernetes/ |
Latest revision as of 17:42, 22 June 2023
configure os
download latest archlinux cloud image:
wget https://linuximages.de/openstack/arch/arch-openstack-LATEST-image-bootstrap.qcow2
make sure we have libguestfs installed:
pacman -S libguestfs guestfs-tools
resize image:
cp arch-openstack-LATEST-image-bootstrap.qcow2 arch-openstack-LATEST-image-bootstrap_100G.qcow2 qemu-img resize arch-openstack-LATEST-image-bootstrap_100G.qcow2 +99G
expand image:
virt-resize --expand /dev/sda1 arch-openstack-LATEST-image-bootstrap.qcow2 arch-openstack-LATEST-image-bootstrap_100G.qcow2
change password:
virt-sysprep -a arch-openstack-LATEST-image-bootstrap_100G.qcow2 -q --root-password password:vidalinux
uninstall cloud-init:
virt-sysprep -a arch-openstack-LATEST-image-bootstrap_100G.qcow2 --run-command "pacman -R cloud-init --noconfirm"
on centos copy the image to this directory:
cd /var/lib/libvirt/images/
mount image:
qemu-nbd -c /dev/nbd0 arch-openstack-LATEST-image-bootstrap_100G.qcow2 kpartx -a /dev/nbd0 mkdir /mnt/cloudimg mount /dev/mapper/nbd0p1 /mnt/cloudimg mount -o bind /dev /mnt/cloudimg/dev mount -o bind /proc /mnt/cloudimg/proc
enter enviroment with arch-chroot:
arch-chroot /mnt/cloudimg
initialize the pacman keyring and populate signing keys:
pacman-key --init pacman-key --populate archlinux
configure /etc/hosts:
cat > /etc/hosts << EOF 127.0.0.1 localhost 192.168.24.10 archlinux.ovoxcloud.com EOF
edit /etc/ssh/sshd_config:
sed -i 's/#PasswordAuthentication yes/PasswordAuthentication yes/g' /etc/ssh/sshd_config sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/g' /etc/ssh/sshd_config
enable sshd service:
systemctl enable sshd
ensure net.bridge.bridge-nf-call-iptables is set to 1 in your sysctl config:
cat <<EOF > /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 net.ipv4.ip_forward = 1 EOF
configure networkmanager:
cat > /root/net << EOF systemctl start NetworkManager && systemctl enable NetworkManager nmcli con del eth0 nmcli con del Wired\ connection\ 1 nmcli con add con-name eth0 ipv4.method manual type ethernet ifname eth0 ipv4.addresses 192.168.24.10/24 ipv4.gateway 192.168.24.254 ipv4.dns 4.2.2.1,4.2.2.2 autoconnect yes EOF
umount qcow2 image:
umount /mnt/cloudimg/proc umount /mnt/cloudimg/dev umount /mnt/cloudimg nbd-client -d /dev/nbd0 dmsetup remove /dev/mapper/nbd0p1
configure hostname:
hostnamectl set-hostname archlinux.ovoxcloud.com
configure timezone:
timedatectl set-timezone America/Puerto_Rico
network configuration:
ip addr add 192.168.24.10/24 dev eth0 ip route add default via 192.168.24.254 rm /etc/resolv.conf echo "nameserver 4.2.2.1" > /etc/resolv.conf echo "nameserver 4.2.2.2" >> /etc/resolv.conf
stop and disable systemd-resolved:
systemctl disable systemd-resolved && systemctl stop systemd-resolved
update entire os:
pacman -Syuu --noconfirm
install packages:
pacman -Sy curl vim screen nano net-tools bind-tools containerd networkmanager ebtables ethtool wget unzip socat cni-plugins conntrack-tools cri-o parted gptfdisk lvm2 git
install iptables:
pacman -S iptables
add the following registries to /etc/containers/registries.conf
cat >> /etc/containers/registries.conf << "EOF" [registries.search] registries = ['docker.io'] EOF
add the following config to crio:
cat > /etc/crio/crio.conf.d/00-plugin-dir.conf << EOF [crio.network] plugin_dirs = [ "/opt/cni/bin/", ] EOF
enable cri-o:
systemctl enable crio
install chrony:
pacman -S chrony --noconfirm
enable and start chrony:
systemctl enable chronyd
reboot machine:
reboot
install kubernetes
installing CNI:
CNI_VERSION="v1.3.0" mkdir -p /opt/cni/bin curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-linux-amd64-${CNI_VERSION}.tgz" | tar -C /opt/cni/bin -xz
installing CRI:
CRICTL_VERSION="v1.27.0" mkdir -p /opt/bin curl -L "https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-amd64.tar.gz" | tar -C /opt/bin -xz
installing kubeadm, kubelet, kubectl
RELEASE="$(curl -sSL https://dl.k8s.io/release/stable.txt)" mkdir -p /opt/bin cd /opt/bin curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/amd64/{kubeadm,kubelet,kubectl} chmod +x {kubeadm,kubelet,kubectl} curl -sSL "https://raw.githubusercontent.com/kubernetes/release/master/cmd/kubepkg/templates/latest/deb/kubelet/lib/systemd/system/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service mkdir -p /etc/systemd/system/kubelet.service.d curl -sSL "https://raw.githubusercontent.com/kubernetes/release/master/cmd/kubepkg/templates/latest/deb/kubeadm/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
create symbolic links for executables:
for u in $(ls|grep -v bins > bins && sed 'H;1h;$!d;x;s/\n/ /g' bins); do ln -s /opt/bin/$u /usr/local/bin/$u && chmod +x /usr/local/bin/$u; done
start kube cluster:
kubeadm init --pod-network-cidr 10.234.0.0/16 --apiserver-advertise-address=0.0.0.0 --cri-socket /var/run/crio/crio.sock --node-name archlinux.ovoxcloud.com
to start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube cp -i /etc/kubernetes/admin.conf $HOME/.kube/config chown $(id -u):$(id -g) $HOME/.kube/config
enable kubelet service:
systemctl enable kubelet
configure kubernetes
untaint the master so you can run pods
kubectl taint nodes --all node.kubernetes.io/not-ready:NoSchedule- kubectl taint nodes --all node-role.kubernetes.io/control-plane:NoSchedule-
watch kubelet lot for errors:
journalctl -u kubelet -f
install calico operator:
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/tigera-operator.yaml
download calico custom-resources:
wget https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/custom-resources.yaml
edit subnet on custom-resources yaml:
sed -i 's|192.168.0.0/16|10.234.0.0/16|g' custom-resources.yaml
create calico config:
kubectl create -f custom-resources.yaml
verify everthing is ok:
kubectl get nodes
if everything is ok you got this message:
NAME STATUS ROLES AGE VERSION archlinux.ovoxcloud.com Ready control-plane 112s v1.27.3
set up nginx-ingress:
kubectl apply -f https://raw.githubusercontent.com/vidalinux/kubernetes/main/ingress-nginx/deploy-nginx.yaml
deploy rook-ceph storage
create qcow2 images for rook-ceph:
qemu-img create -f qcow2 /static/rook/rook_disk1_300G.qcow2 300G qemu-img create -f qcow2 /static/rook/rook_disk2_300G.qcow2 300G qemu-img create -f qcow2 /static/rook/rook_disk3_300G.qcow2 300G
edit virtual machine:
virsh edit archlinux
add the following:
<disk type='file' device='disk'> <driver name='qemu' type='qcow2'/> <source file='/static/rook/rook_disk1_300G.qcow2'/> <target dev='vdb' bus='virtio'/> </disk> <disk type='file' device='disk'> <driver name='qemu' type='qcow2'/> <source file='/static/rook/rook_disk2_300G.qcow2'/> <target dev='vdc' bus='virtio'/> </disk> <disk type='file' device='disk'> <driver name='qemu' type='qcow2'/> <source file='/static/rook/rook_disk3_300G.qcow2'/> <target dev='vdd' bus='virtio'/> </disk>
install devel package:
pacman -Sy base-devel --noconfirm
create normal user:
useradd -m linux passwd linux
add user linux to sudoers:
cat >> /etc/sudoers << EOF linux ALL=(ALL:ALL) ALL EOF
installing yay as normal user:
su - linux git clone https://aur.archlinux.org/yay.git cd yay makepkg -si
install ceph-bin and ceph-libs-bin:
yay -S ceph-libs-bin ceph-bin
load rbd kernel module:
depmod -a modprobe rbd
clone rook git repo:
git clone --single-branch --branch v1.11.7 https://github.com/rook/rook.git
deploy rook-ceph operator:
cd rook/deploy/examples kubectl create -f crds.yaml -f common.yaml -f operator.yaml
override rook config:
cat > override_poolsize.yaml << EOF kind: ConfigMap apiVersion: v1 metadata: name: rook-config-override namespace: rook-ceph data: config: | [global] osd_pool_default_size = 1 --- EOF
deploy config:
kubectl create -f override_poolsize.yaml
edit the following settings on cluster.yaml:
mon: count: 1 allowMultiplePerNode: true mgr: count: 1 allowMultiplePerNode: true storage: config: osdsPerDevice: "1" nodes: - name: "archlinux.ovoxcloud.com" devices: # specific devices to use for storage can be specified for each node - name: "vdb" - name: "vdc" - name: "vdd"
deploy cluster:
kubectl create -f cluster.yaml
verify installation status:
kubectl --namespace rook-ceph get cephclusters.ceph.rook.io rook-ceph
deploy toolbox:
kubectl create -f /root/rook/deploy/examples/toolbox.yaml
copy ceph.conf and keyring from container to host:
kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- cat /etc/ceph/ceph.conf kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- cat /etc/ceph/keyring
verify ceph cluster status:
ceph -s
deploy storageclass:
kubectl create -f /root/rook/deploy/examples/csi/rbd/storageclass-test.yaml
edit cephfs storageclass yaml:
nano /root/rook/deploy/examples/csi/cephfs/storageclass.yaml
change the following settings:
fsName: test-fs pool: test-fs-data0
create ceph-filesystem:
cat > /root/testfs-ceph-filesystem.yaml << EOF apiVersion: ceph.rook.io/v1 kind: CephFilesystem metadata: name: test-fs namespace: rook-ceph spec: metadataPool: replicated: size: 1 requireSafeReplicaSize: false dataPools: - failureDomain: osd replicated: size: 1 requireSafeReplicaSize: false compressionMode: none preservePoolsOnDelete: false metadataServer: activeCount: 1 activeStandby: false EOF
deploy ceph filesystem:
kubectl create -f /root/testfs-ceph-filesystem.yaml
deploy cephfs storageclass:
kubectl create -f /root/rook/deploy/examples/csi/cephfs/storageclass.yaml
test ceph-block storage:
kubectl create -f https://raw.githubusercontent.com/vidalinux/kubernetes/main/rook/ceph/ceph-block-pvc.yaml
verify ceph-block storage status:
kubectl -n default get pvc
test cephfs storage:
kubectl create -f https://raw.githubusercontent.com/vidalinux/kubernetes/main/rook/ceph/ceph-fs-pvc.yaml
verify cephfs storage status:
kubectl -n default get pvc
mount cephfs as local filesystem:
cat > /usr/local/bin/mount-cephfs << "EOF" #!/bin/bash MON_HOST=$(kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- grep mon_host /etc/ceph/ceph.conf | cut -d " " -f 3 | tr -d '\r') CEPH_SECRET=$(kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- grep key /etc/ceph/keyring | cut -d " " -f 3 | tr -d '\r') MOUNT_DIR=/mnt/ceph-test if [ ! -d ${MOUNT_DIR} ]; then mkdir -p ${MOUNT_DIR} fi mount -t ceph -o mds_namespace=test-fs,name=admin,secret=${CEPH_SECRET} ${MON_HOST}:/ ${MOUNT_DIR} EOF
fix mount-cephfs script permissions:
chmod +x /usr/local/bin/mount-cephfs
run script
mount-cephfs
confirm cephfs is mounted:
mount|grep /mnt/cephfs
if you want to remove rook-ceph deployment:
cd /root/rook/deploy/examples kubectl delete -f cluster.yaml kubectl delete -f crds.yaml -f common.yaml -f operator.yaml rm -rf /var/lib/rook/*
deploy wordpress app
clone vidalinux kubernetes repo:
git clone https://github.com/vidalinux/kubernetes.git
edit settings on wordpress yaml:
# wordpress container - name: WORDPRESS_DB_NAME value: "wordpressdb" - name: WORDPRESS_DB_USER value: "wordpress" - name: WORDPRESS_DB_PASSWORD value: "wordpress" - name: WORDPRESS_DB_HOST value: "wpvidalinux-db" - name: TZ value: "America/Puerto_Rico" # mariadb container - name: MYSQL_DATABASE value: wordpressdb - name: MYSQL_ROOT_PASSWORD value: root - name: MYSQL_USER value: wordpress - name: MYSQL_PASSWORD value: wordpress - name: TZ value: "America/Puerto_Rico"
get geniune ssl cert and key:
cat domain.com.crt |base64 -w 0 cat domain.com.key |base64 -w 0
use crt and key to create secret yaml:
cat > /root/kubernetes/wordpress/secret.yaml << EOF apiVersion: v1 kind: Secret metadata: name: ovoxcloud.com-tls namespace: wpvidalinux data: tls.crt: $(cat domain.com.crt |base64 -w 0) tls.key: $(cat domain.com.key |base64 -w 0) type: kubernetes.io/tls EOF
deploy wordpress app:
kubectl create -f /root/kubernetes/wordpress/wordpress.yml
deploy secret yaml:
kubectl create -f /root/kubernetes/wordpress/secret.yaml
change name space:
kubectl config set-context --current --namespace=wpvidalinux-app
display pods running in current namespace:
kubectl get pods
show all pods running in all namespaces:
kubectl get pods -A
access application web interface:
https://portal.ovoxcloud.com
deploy seafile app
copy geniune ssl certifcates to seafile ssl directory:
cp domain.crt /root/kubernetes/seafile/ssl/seafile.domain.com.key cp domain.key /root/kubernetes/seafile/ssl/seafile.domain.com.crt
edit configs on seafile yaml:
# mariadb container - name: MYSQL_LOG_CONSOLE value: "true" - name: MYSQL_ROOT_PASSWORD value: "livinglavidalinux" - name: TZ value: "America/Puerto_Rico" # seafile container - name: DB_HOST value: "seafile-db" - name: DB_ROOT_PASSWD value: "livinglavidalinux" - name: SEAFILE_ADMIN_EMAIL value: "junior@ovoxcloud.com" - name: SEAFILE_ADMIN_PASSWORD value: "livinglavidalinux" - name: SEAFILE_SERVER_HOSTNAME value: "seafile.ovoxcloud.com" - name: SEAFILE_SERVER_LETSENCRYPT value: "true" - name: TZ value: "America/Puerto_Rico"
execute copy_certs.sh:
cd /root/kubernetes/seafile bash copy_certs.sh
deploy seafile application using yaml:
kubectl create -f /root/kubernetes/seafile/seafile.yml
show container log:
kubectl logs seafile-srv-697c787f5c-px7cw
access application web interface:
https://seafile.ovoxcloud.com
download seafile syncing client for your system:
https://www.seafile.com/en/download
deploy awx ansible app
install the latest version of kustomize:
cd /usr/local/bin/ curl -s "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" | bash
deploy awx tower operator:
cd /root/kubernetes/awx kubectl apply -k .
look at the operator pod logs:
kubectl logs -f deployments/awx-operator-controller-manager -c awx-manager
use crt and key to create secret yaml:
cat > /root/kubernetes/awx/secret.yaml << EOF apiVersion: v1 kind: Secret metadata: name: ovoxcloud.com-tls namespace: awx data: tls.crt: $(cat domain.com.crt |base64 -w 0) tls.key: $(cat domain.com.key |base64 -w 0) type: kubernetes.io/tls EOF
deploy secret yaml:
kubectl create -f /root/kubernetes/awx/secret.yaml
deploy ingress yaml:
kubectl create -f /root/kubernetes/awx/ingress.yaml
get user admin password for web access:
kubectl -n awx get secret awx-tower-admin-password -o jsonpath="{.data.password}" -n awx | base64 --decode ; echo
access web interface:
https://awx.ovoxcloud.com
if you want to uninstall awx:
kustomize build '/root/kubernetes/awx' | kubectl delete -f -
helm
install helm:
curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
renew certificates
verify certificates:
kubeadm certs check-expiration
renew all certificates:
kubeadm certs renew all
replace the config:
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
references
[latest version]
[cni latest version]
[cri-tools latest version]
[nginx supported versions]
[single-node ceph cluster kubernetes]