diff --git a/Kubernetes/K3S-Deploy/k3s.sh b/Kubernetes/K3S-Deploy/k3s.sh index 7b7f29c2..724ea809 100644 --- a/Kubernetes/K3S-Deploy/k3s.sh +++ b/Kubernetes/K3S-Deploy/k3s.sh @@ -21,186 +21,258 @@ echo -e " \033[32;5m \ ############################################# # Version of Kube-VIP to deploy -KVVERSION="v0.6.3" + +KVVERSION="v0.7.0" # K3S Version -k3sVersion="v1.26.10+k3s2" -# Set the IP addresses of the master and work nodes -master1=192.168.3.21 -master2=192.168.3.22 -master3=192.168.3.23 -worker1=192.168.3.24 -worker2=192.168.3.25 +k3sVersion="v1.27.10+k3s2" + +# Set the IP addresses of the master and worker nodes. + +masters=(10.0.5.1 10.0.5.2 10.0.5.3) +workers=(10.0.5.4 10.0.5.5) # User of remote machines -user=ubuntu + +user=bones # Interface used on remotes + interface=eth0 # Set the virtual IP address (VIP) -vip=192.168.3.50 -# Array of master nodes -masters=($master2 $master3) +vip=10.0.5.10 + +# Loadbalancer IP range -# Array of worker nodes -workers=($worker1 $worker2) +lbrange=10.0.5.100-10.0.5.120 -# Array of all -all=($master1 $master2 $master3 $worker1 $worker2) +# SSH certificate name variable -# Array of all minus master -allnomaster1=($master2 $master3 $worker1 $worker2) +certName=id_ed25519 -#Loadbalancer IP range -lbrange=192.168.3.60-192.168.3.80 +# Should we copy the above SSH cert to the local .ssh directory? +# (Only set if you haven't done this) -#ssh certificate name variable -certName=id_rsa +copyCert=false ############################################# # DO NOT EDIT BELOW # ############################################# + # For testing purposes - in case time is wrong due to VM snapshots + sudo timedatectl set-ntp off sudo timedatectl set-ntp on -# Move SSH certs to ~/.ssh and change permissions -cp /home/$user/{$certName,$certName.pub} /home/$user/.ssh -chmod 600 /home/$user/.ssh/$certName -chmod 644 /home/$user/.ssh/$certName.pub +# Create handy functions for outputting messages. + +function notice { + echo -e " \033[32;5m${1}\033[0m" +} + +function error { + echo -e " \033[31;5m${1}\033[0m" +} + +# Move SSH certs to ~/.ssh and change permissions if requested. + +if [ "${copyCert}" = true ]; then + cp ${HOME}/{$certName,$certName.pub} ${HOME}/.ssh + chmod 600 ${HOME}/.ssh/$certName + chmod 644 ${HOME}/.ssh/$certName.pub +fi + +# Set some SSH options for the sake of convenience. The default is to include +# our specified cert and to disable strict host key checking. + +ssh_ops="-o StrictHostKeyChecking=no -i ${HOME}/.ssh/$certName" + +# Create a temporary folder where we can work without leaving artifacts after +# the script completes. + +tempdir=$(mktemp --tmpdir -d kube_install-XXXXX) + +# Download all of the K3S-Deploy files necessary for our work. + +repo_path=https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy + +for repo_file in kube-vip ipAddressPool l2Advertisement.yaml; do + curl --output-dir ${tempdir} -sO ${repo_path}/${repo_file} +done + +# Before starting to modify the cluster, download any other remote resources +# that could cause us to abort if one isn't available. + +curl --output-dir ${tempdir} -sO https://kube-vip.io/manifests/rbac.yaml +curl --output-dir ${tempdir} -sO https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml +curl --output-dir ${tempdir} -sO https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/namespace.yaml +curl --output-dir ${tempdir} -sO https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml +curl --output-dir ${tempdir} -sO https://raw.githubusercontent.com/inlets/inlets-operator/master/contrib/nginx-sample-deployment.yaml # Install k3sup to local machine if not already present + if ! command -v k3sup version &> /dev/null then - echo -e " \033[31;5mk3sup not found, installing\033[0m" + error "k3sup not found, installing" curl -sLS https://get.k3sup.dev | sh sudo install k3sup /usr/local/bin/ else - echo -e " \033[32;5mk3sup already installed\033[0m" + notice "k3sup already installed" fi # Install Kubectl if not already present + if ! command -v kubectl version &> /dev/null then - echo -e " \033[31;5mKubectl not found, installing\033[0m" - curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" + kc_version=$(curl -sL https://dl.k8s.io/release/stable.txt) + error "Kubectl not found, installing version ${kc_version}" + curl -LO "https://dl.k8s.io/release/${kc_version}/bin/linux/amd64/kubectl" sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl else - echo -e " \033[32;5mKubectl already installed\033[0m" + notice "Kubectl already installed" fi -# Create SSH Config file to ignore checking (don't use in production!) -echo "StrictHostKeyChecking no" > ~/.ssh/config +# Add ssh keys for all nodes; this may have already been done as part of +# cloud-init, but it never hurts to be certain. -#add ssh keys for all nodes -for node in "${all[@]}"; do - ssh-copy-id $user@$node +for node in ${masters[@]} ${workers[@]}; do + ssh-copy-id ${ssh_ops} $user@$node done # Install policycoreutils for each node -for newnode in "${all[@]}"; do - ssh $user@$newnode -i ~/.ssh/$certName sudo su < $HOME/kube-vip.yaml +kubectl apply -f ${tempdir}/rbac.yaml + +# Step 3: Copy kube-vip.yaml to master1 + +sed -i 's/$interface/'$interface'/g; s/$vip/'$vip'/g' ${tempdir}/kube-vip +sed -i "s%/version.*%/version: ${KVVERSION}%g" ${tempdir}/kube-vip -# Step 4: Copy kube-vip.yaml to master1 -scp -i ~/.ssh/$certName $HOME/kube-vip.yaml $user@$master1:~/kube-vip.yaml +scp ${ssh_ops} ${tempdir}/kube-vip $user@${masters[0]}:kube-vip.yaml +# Step 4: Connect to Master1 and move kube-vip.yaml -# Step 5: Connect to Master1 and move kube-vip.yaml -ssh $user@$master1 -i ~/.ssh/$certName <<- EOF +ssh ${ssh_ops} $user@${masters[0]} <<- EOF sudo mkdir -p /var/lib/rancher/k3s/server/manifests sudo mv kube-vip.yaml /var/lib/rancher/k3s/server/manifests/kube-vip.yaml EOF -# Step 6: Add new master nodes (servers) & workers -for newnode in "${masters[@]}"; do +# Step 5: Add new master nodes (servers) & workers + +notice "Setting up remaining master nodes" + +for newnode in ${masters[@]:1}; do k3sup join \ --ip $newnode \ --user $user \ --sudo \ --k3s-version $k3sVersion \ --server \ - --server-ip $master1 \ + --server-ip ${masters[0]} \ --ssh-key $HOME/.ssh/$certName \ --k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$newnode --node-taint node-role.kubernetes.io/master=true:NoSchedule" \ --server-user $user - echo -e " \033[32;5mMaster node joined successfully!\033[0m" + + notice "Master node ${newnode} joined successfully!" done -# add workers -for newagent in "${workers[@]}"; do +# Add workers + +notice "Setting up worker nodes" + +for newagent in ${workers[@]}; do k3sup join \ --ip $newagent \ --user $user \ --sudo \ --k3s-version $k3sVersion \ - --server-ip $master1 \ + --server-ip ${masters[0]} \ --ssh-key $HOME/.ssh/$certName \ --k3s-extra-args "--node-label \"longhorn=true\" --node-label \"worker=true\"" - echo -e " \033[32;5mAgent node joined successfully!\033[0m" + + notice "Agent node ${newagent} joined successfully!" done -# Step 7: Install kube-vip as network LoadBalancer - Install the kube-vip Cloud Provider -kubectl apply -f https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml +# Step 6: Install kube-vip as network LoadBalancer - Install the kube-vip Cloud Provider -# Step 8: Install Metallb -kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/namespace.yaml -kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml -# Download ipAddressPool and configure using lbrange above -curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/ipAddressPool -cat ipAddressPool | sed 's/$lbrange/'$lbrange'/g' > $HOME/ipAddressPool.yaml -kubectl apply -f $HOME/ipAddressPool.yaml +notice "Installing kube-vip as Load Balancer and Cloud Provider" -# Step 9: Test with Nginx -kubectl apply -f https://raw.githubusercontent.com/inlets/inlets-operator/master/contrib/nginx-sample-deployment.yaml -n default +kubectl apply -f ${tempdir}/kube-vip-cloud-controller.yaml + +# Step 7: Install Metallb + +notice "Installing MetalLB" + +kubectl apply -f ${tempdir}/namespace.yaml +kubectl apply -f ${tempdir}/metallb-native.yaml + +# Configure ipAddressPool using lbrange above + +notice "Setting up ip address pool" + +cat ${tempdir}/ipAddressPool | sed 's/$lbrange/'$lbrange'/g' > ${tempdir}/ipAddressPool.yaml + +# Step 8: Test with Nginx + +notice "Testing with Nginx" + +kubectl apply -f ${tempdir}/nginx-sample-deployment.yaml -n default kubectl expose deployment nginx-1 --port=80 --type=LoadBalancer -n default -echo -e " \033[32;5mWaiting for K3S to sync and LoadBalancer to come online\033[0m" +notice "Waiting for K3S to sync and LoadBalancer to come online" while [[ $(kubectl get pods -l app=nginx -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}') != "True" ]]; do sleep 1 done -# Step 10: Deploy IP Pools and l2Advertisement +# Step 9: Deploy IP Pools and l2Advertisement + +notice "Deploying IP Pools and l2Advertisement" + kubectl wait --namespace metallb-system \ --for=condition=ready pod \ --selector=component=controller \ --timeout=120s -kubectl apply -f ipAddressPool.yaml -kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/l2Advertisement.yaml + +kubectl apply -f ${tempdir}/ipAddressPool.yaml +kubectl apply -f ${tempdir}/l2Advertisement.yaml kubectl get nodes kubectl get svc kubectl get pods --all-namespaces -o wide -echo -e " \033[32;5mHappy Kubing! Access Nginx at EXTERNAL-IP above\033[0m" +notice "Happy Kubing! Access Nginx at EXTERNAL-IP above" diff --git a/Kubernetes/Longhorn/longhorn-K3S.sh b/Kubernetes/Longhorn/longhorn-K3S.sh index c3a867dd..bfd4b795 100644 --- a/Kubernetes/Longhorn/longhorn-K3S.sh +++ b/Kubernetes/Longhorn/longhorn-K3S.sh @@ -20,41 +20,45 @@ echo -e " \033[32;2m \ # YOU SHOULD ONLY NEED TO EDIT THIS SECTION # ############################################# # Set the IP addresses of master1 -master1=192.168.3.21 -# Set the IP addresses of your Longhorn nodes -longhorn1=192.168.3.26 -longhorn2=192.168.3.27 -longhorn3=192.168.3.28 +master1=10.0.5.1 + +# Array of longhorn nodes + +storage=(10.0.5.21 10.0.5.22 10.0.5.23) # User of remote machines -user=ubuntu + +user=bones # Interface used on remotes + interface=eth0 # Set the virtual IP address (VIP) -vip=192.168.3.50 -# Array of longhorn nodes -storage=($longhorn1 $longhorn2 $longhorn3) +vip=10.0.5.20 + +# Ssh certificate name variable -#ssh certificate name variable -certName=id_rsa +certName=id_ed25519 ############################################# # DO NOT EDIT BELOW # ############################################# + # For testing purposes - in case time is wrong due to VM snapshots + sudo timedatectl set-ntp off sudo timedatectl set-ntp on # add ssh keys for all nodes -for node in "${storage[@]}"; do - ssh-copy-id $user@$node -done +#for node in "${storage[@]}"; do +# ssh-copy-id $user@$node +#done + +# Add open-iscsi - needed for Debian and non-cloud Ubuntu -# add open-iscsi - needed for Debian and non-cloud Ubuntu if ! command -v sudo service open-iscsi status &> /dev/null then echo -e " \033[31;5mOpen-ISCSI not found, installing\033[0m" @@ -64,6 +68,7 @@ else fi # Step 1: Add new longhorn nodes to cluster (note: label added) + for newnode in "${storage[@]}"; do k3sup join \ --ip $newnode \ @@ -77,7 +82,9 @@ for newnode in "${storage[@]}"; do done # Step 2: Install Longhorn (using modified Official to pin to Longhorn Nodes) -kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/Longhorn/longhorn.yaml + +kubectl apply -f longhorn.yaml + kubectl get pods \ --namespace longhorn-system \ --watch