From fec2a9205179341809aece597b9a02d90500cabc Mon Sep 17 00:00:00 2001 From: Ira Abramov Date: Thu, 4 Apr 2024 15:45:26 +0300 Subject: [PATCH 01/13] standard indentations etc --- .editorconfig | 55 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 .editorconfig diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 00000000..8efe08d4 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,55 @@ +# EditorConfig is awesome: http://EditorConfig.org + +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = tab +indent_size = 4 + +[*.{c,h,cpp}] + +# Matches multiple files with brace expansion notation +# Set default charset +[*.{js,py}] +charset = utf-8 + +# 4 space indentation by tab +[*.py] +indent_style = tab +indent_size = 4 + +# Tab indentation (no size specified) +[Makefile*] +indent_style = tab + +# Indentation override for all JS under lib directory +[lib/**.js] +indent_style = tab +indent_size = 2 + +[.gitconfig] +indent_style = tab +indent_size = 2 + +[Vagrantfile] +indent_style = tab +indent_size = 2 + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = spaces +indent_size = 2 + +# Standards I like +[{*.groovy,*.rb,*.sh,.bash*}] +indent_style = tab +indent_size = 4 + +# Standards I'm forced to... +[{*.md,*.MD,*.yaml,*.yml}] +indent_style = spaces +indent_size = 2 From bc087e1c36322f0c500f51eadc930b9b5f825a8f Mon Sep 17 00:00:00 2001 From: Ira Abramov Date: Thu, 4 Apr 2024 15:57:51 +0300 Subject: [PATCH 02/13] Add more checks in pre-commit --- .pre-commit-config.yaml | 43 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 41 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 615bc752..8f246f17 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,13 +1,52 @@ +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks --- +# fail_fast: true +minimum_pre_commit_version: 1.18.1 +# exclude: docs/_build/ repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.5.0 hooks: - - id: check-symlinks - - id: destroyed-symlinks - id: detect-aws-credentials args: [--allow-missing-credentials] + - id: trailing-whitespace # trims trailing whitespace. + - id: requirements-txt-fixer + exclude: ".(md|rst)$" + - id: end-of-file-fixer # ensures that a file is either empty, or ends with one newline. + - id: check-ast # simply checks whether the files parse as valid python. + - id: check-merge-conflict # checks for files that contain merge conflict strings. + - id: check-symlinks # checks for symlinks which do not point to anything. + - id: check-added-large-files # prevents giant files from being committed. + args: ["--maxkb=4096"] + - id: check-builtin-literals + - id: check-case-conflict + - id: check-toml # checks toml files for parseable syntax. + - id: check-docstring-first # checks a common error of defining a docstring after code. + - id: check-executables-have-shebangs # ensures that (non-binary) executables have a shebang. + - id: check-shebang-scripts-are-executable + - id: check-yaml # checks yaml files for parseable syntax. + - id: debug-statements # checks for debugger imports and py37+ `breakpoint()` calls in python source. + - id: destroyed-symlinks # detects symlinks which are changed to regular files with a content of a path which that symlink was pointing to. + - id: detect-private-key # detects the presence of private keys. + - id: mixed-line-ending # replaces or checks mixed line ending. + args: ["--fix=lf"] + - repo: https://github.com/IamTheFij/docker-pre-commit rev: v3.0.1 hooks: - id: docker-compose-check + + - repo: https://github.com/pre-commit/pre-commit + rev: v3.5.0 + hooks: + - id: validate_manifest + + - repo: https://github.com/jumanjihouse/pre-commit-hooks + rev: 2.1.5 + hooks: + #- id: git-check # Configure in .gitattributes + - id: shellcheck + # exclude: ".bats$" + - id: shfmt + # exclude: ".bats$" From 39883fa083892db8de6e0c46a5938d645450e5a8 Mon Sep 17 00:00:00 2001 From: Ira Abramov Date: Thu, 4 Apr 2024 16:03:10 +0300 Subject: [PATCH 03/13] Domain default comes from the environment, KubeVIP version upgraded to latest --- Kubernetes/RKE2/rke2.sh | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/Kubernetes/RKE2/rke2.sh b/Kubernetes/RKE2/rke2.sh index 90998cfd..3b6f60a0 100644 --- a/Kubernetes/RKE2/rke2.sh +++ b/Kubernetes/RKE2/rke2.sh @@ -21,7 +21,10 @@ echo -e " \033[32;5m \ ############################################# # Version of Kube-VIP to deploy -KVVERSION="v0.6.3" +KVVERSION="v0.7.2" + +# The domain name of your cluster, inherit env by default +DOMAIN=${DOMAIN:-my.org} # Set the IP addresses of the admin, masters, and workers nodes admin=192.168.3.5 @@ -248,7 +251,7 @@ kubectl get pods --namespace cert-manager echo -e " \033[32;5mDeploying Rancher\033[0m" helm install rancher rancher-latest/rancher \ --namespace cattle-system \ - --set hostname=rancher.my.org \ + --set hostname=rancher.${DOMAIN} \ --set bootstrapPassword=admin kubectl -n cattle-system rollout status deploy/rancher kubectl -n cattle-system get deploy rancher From 50c7274c95c553892072e778496603084fbd3f36 Mon Sep 17 00:00:00 2001 From: Ira Abramov Date: Thu, 4 Apr 2024 16:06:12 +0300 Subject: [PATCH 04/13] fix run of whitespaces, shfmt, indentations --- Kubernetes/RKE2/rke2.sh | 83 +++++++++++++++++++++-------------------- 1 file changed, 42 insertions(+), 41 deletions(-) mode change 100644 => 100755 Kubernetes/RKE2/rke2.sh diff --git a/Kubernetes/RKE2/rke2.sh b/Kubernetes/RKE2/rke2.sh old mode 100644 new mode 100755 index 3b6f60a0..bbf47547 --- a/Kubernetes/RKE2/rke2.sh +++ b/Kubernetes/RKE2/rke2.sh @@ -15,7 +15,6 @@ echo -e " \033[36;5m \ echo -e " \033[32;5m https://youtube.com/@jims-garage \033[0m" echo -e " \033[32;5m \033[0m" - ############################################# # YOU SHOULD ONLY NEED TO EDIT THIS SECTION # ############################################# @@ -73,17 +72,16 @@ sudo timedatectl set-ntp on # Move SSH certs to ~/.ssh and change permissions cp /home/$user/{$certName,$certName.pub} /home/$user/.ssh -chmod 600 /home/$user/.ssh/$certName +chmod 600 /home/$user/.ssh/$certName chmod 644 /home/$user/.ssh/$certName.pub # Install Kubectl if not already present -if ! command -v kubectl version &> /dev/null -then - echo -e " \033[31;5mKubectl not found, installing\033[0m" - curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" - sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl +if ! command -v kubectl version &>/dev/null; then + echo -e " \033[31;5mKubectl not found, installing\033[0m" + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" + sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl else - echo -e " \033[32;5mKubectl already installed\033[0m" + echo -e " \033[32;5mKubectl already installed\033[0m" fi # Create SSH Config file to ignore checking (don't use in production!) @@ -91,7 +89,7 @@ sed -i '1s/^/StrictHostKeyChecking no\n/' ~/.ssh/config #add ssh keys for all nodes for node in "${all[@]}"; do - ssh-copy-id $user@$node + ssh-copy-id $user@$node done # Step 1: Create Kube VIP @@ -99,7 +97,7 @@ done sudo mkdir -p /var/lib/rancher/rke2/server/manifests # Install the kube-vip deployment into rke2's self-installing manifest folder curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/kube-vip -cat kube-vip | sed 's/$interface/'$interface'/g; s/$vip/'$vip'/g' > $HOME/kube-vip.yaml +cat kube-vip | sed 's/$interface/'$interface'/g; s/$vip/'$vip'/g' >$HOME/kube-vip.yaml sudo mv kube-vip.yaml /var/lib/rancher/rke2/server/manifests/kube-vip.yaml # Find/Replace all k3s entries to represent rke2 @@ -114,26 +112,29 @@ mkdir ~/.kube # create the rke2 config file sudo mkdir -p /etc/rancher/rke2 touch config.yaml -echo "tls-san:" >> config.yaml -echo " - $vip" >> config.yaml -echo " - $master1" >> config.yaml -echo " - $master2" >> config.yaml -echo " - $master3" >> config.yaml -echo "write-kubeconfig-mode: 0644" >> config.yaml -echo "disable:" >> config.yaml -echo " - rke2-ingress-nginx" >> config.yaml +echo "tls-san:" >>config.yaml +echo " - $vip" >>config.yaml +echo " - $master1" >>config.yaml +echo " - $master2" >>config.yaml +echo " - $master3" >>config.yaml +echo "write-kubeconfig-mode: 0644" >>config.yaml +echo "disable:" >>config.yaml +echo " - rke2-ingress-nginx" >>config.yaml # copy config.yaml to rancher directory sudo cp ~/config.yaml /etc/rancher/rke2/config.yaml # update path with rke2-binaries -echo 'export KUBECONFIG=/etc/rancher/rke2/rke2.yaml' >> ~/.bashrc ; echo 'export PATH=${PATH}:/var/lib/rancher/rke2/bin' >> ~/.bashrc ; echo 'alias k=kubectl' >> ~/.bashrc ; source ~/.bashrc ; +echo 'export KUBECONFIG=/etc/rancher/rke2/rke2.yaml' >>~/.bashrc +echo 'export PATH=${PATH}:/var/lib/rancher/rke2/bin' >>~/.bashrc +echo 'alias k=kubectl' >>~/.bashrc +source ~/.bashrc # Step 2: Copy kube-vip.yaml and certs to all masters for newnode in "${allmasters[@]}"; do - scp -i ~/.ssh/$certName $HOME/kube-vip.yaml $user@$newnode:~/kube-vip.yaml - scp -i ~/.ssh/$certName $HOME/config.yaml $user@$newnode:~/config.yaml - scp -i ~/.ssh/$certName ~/.ssh/{$certName,$certName.pub} $user@$newnode:~/.ssh - echo -e " \033[32;5mCopied successfully!\033[0m" + scp -i ~/.ssh/$certName $HOME/kube-vip.yaml $user@$newnode:~/kube-vip.yaml + scp -i ~/.ssh/$certName $HOME/config.yaml $user@$newnode:~/config.yaml + scp -i ~/.ssh/$certName ~/.ssh/{$certName,$certName.pub} $user@$newnode:~/.ssh + echo -e " \033[32;5mCopied successfully!\033[0m" done # Step 3: Connect to Master1 and move kube-vip.yaml and config.yaml. Then install RKE2, copy token back to admin machine. We then use the token to bootstrap additional masternodes @@ -155,8 +156,8 @@ EOF echo -e " \033[32;5mMaster1 Completed\033[0m" # Step 4: Set variable to the token we just extracted, set kube config location -token=`cat token` -sudo cat ~/.kube/rke2.yaml | sed 's/127.0.0.1/'$master1'/g' > $HOME/.kube/config +token=$(cat token) +sudo cat ~/.kube/rke2.yaml | sed 's/127.0.0.1/'$master1'/g' >$HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config export KUBECONFIG=${HOME}/.kube/config sudo cp ~/.kube/config /etc/rancher/rke2/rke2.yaml @@ -168,7 +169,7 @@ kubectl apply -f https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provi # Step 6: Add other Masternodes, note we import the token we extracted from step 3 for newnode in "${masters[@]}"; do - ssh -tt $user@$newnode -i ~/.ssh/$certName sudo su <> /etc/rancher/rke2/config.yaml @@ -183,14 +184,14 @@ for newnode in "${masters[@]}"; do systemctl start rke2-server.service exit EOF - echo -e " \033[32;5mMaster node joined successfully!\033[0m" + echo -e " \033[32;5mMaster node joined successfully!\033[0m" done kubectl get nodes # Step 7: Add Workers for newnode in "${workers[@]}"; do - ssh -tt $user@$newnode -i ~/.ssh/$certName sudo su <> /etc/rancher/rke2/config.yaml @@ -203,7 +204,7 @@ for newnode in "${workers[@]}"; do systemctl start rke2-agent.service exit EOF - echo -e " \033[32;5mWorker node joined successfully!\033[0m" + echo -e " \033[32;5mWorker node joined successfully!\033[0m" done kubectl get nodes @@ -214,14 +215,14 @@ kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manif kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml # Download ipAddressPool and configure using lbrange above curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/ipAddressPool -cat ipAddressPool | sed 's/$lbrange/'$lbrange'/g' > $HOME/ipAddressPool.yaml +cat ipAddressPool | sed 's/$lbrange/'$lbrange'/g' >$HOME/ipAddressPool.yaml # Step 9: Deploy IP Pools and l2Advertisement echo -e " \033[32;5mAdding IP Pools, waiting for Metallb to be available first. This can take a long time as we're likely being rate limited for container pulls...\033[0m" kubectl wait --namespace metallb-system \ - --for=condition=ready pod \ - --selector=component=controller \ - --timeout=1800s + --for=condition=ready pod \ + --selector=component=controller \ + --timeout=1800s kubectl apply -f ipAddressPool.yaml kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/l2Advertisement.yaml @@ -242,17 +243,17 @@ kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/ helm repo add jetstack https://charts.jetstack.io helm repo update helm install cert-manager jetstack/cert-manager \ ---namespace cert-manager \ ---create-namespace \ ---version v1.13.2 + --namespace cert-manager \ + --create-namespace \ + --version v1.13.2 kubectl get pods --namespace cert-manager # Install Rancher echo -e " \033[32;5mDeploying Rancher\033[0m" helm install rancher rancher-latest/rancher \ - --namespace cattle-system \ - --set hostname=rancher.${DOMAIN} \ - --set bootstrapPassword=admin + --namespace cattle-system \ + --set hostname=rancher.${DOMAIN} \ + --set bootstrapPassword=admin kubectl -n cattle-system rollout status deploy/rancher kubectl -n cattle-system get deploy rancher @@ -260,8 +261,8 @@ kubectl -n cattle-system get deploy rancher kubectl get svc -n cattle-system kubectl expose deployment rancher --name=rancher-lb --port=443 --type=LoadBalancer -n cattle-system while [[ $(kubectl get svc -n cattle-system 'jsonpath={..status.conditions[?(@.type=="Pending")].status}') = "True" ]]; do - sleep 5 - echo -e " \033[32;5mWaiting for LoadBalancer to come online\033[0m" + sleep 5 + echo -e " \033[32;5mWaiting for LoadBalancer to come online\033[0m" done kubectl get svc -n cattle-system From 32945936298c4413218ed81f9599a0203aaed95e Mon Sep 17 00:00:00 2001 From: Ira Abramov Date: Thu, 4 Apr 2024 16:25:12 +0300 Subject: [PATCH 05/13] changed to my own IPs (at least while I test this script) and organized the veriable editable section --- Kubernetes/RKE2/rke2.sh | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) mode change 100755 => 100644 Kubernetes/RKE2/rke2.sh diff --git a/Kubernetes/RKE2/rke2.sh b/Kubernetes/RKE2/rke2.sh old mode 100755 new mode 100644 index bbf47547..f49d5633 --- a/Kubernetes/RKE2/rke2.sh +++ b/Kubernetes/RKE2/rke2.sh @@ -26,12 +26,16 @@ KVVERSION="v0.7.2" DOMAIN=${DOMAIN:-my.org} # Set the IP addresses of the admin, masters, and workers nodes -admin=192.168.3.5 -master1=192.168.3.21 -master2=192.168.3.22 -master3=192.168.3.23 -worker1=192.168.3.24 -worker2=192.168.3.25 +# "admin" is the machine from which you will be running the ops, +# in theory it can be "localhost", just make sure you have sshd +# running and accessible there. +admin=192.168.60.22 +master1=192.168.60.37 +master2=192.168.60.38 +master3=192.168.60.39 + +# Array of worker nodes +workers=(192.168.60.26 192.168.60.83) # User of remote machines user=ubuntu @@ -40,25 +44,22 @@ user=ubuntu interface=eth0 # Set the virtual IP address (VIP) -vip=192.168.3.50 +vip=192.168.60.171 -# Array of all master nodes -allmasters=($master1 $master2 $master3) +# Array of extra master nodes +extramasters=("$master2" "$master3") -# Array of master nodes -masters=($master2 $master3) +# Array of all master nodes +allmasters=("$master1" "${extramasters[@]}") -# Array of worker nodes -workers=($worker1 $worker2) +# Array of all minus master1 +allnomaster1=("${extramasters[@]}" "${workers[@]}") # Array of all -all=($master1 $master2 $master3 $worker1 $worker2) - -# Array of all minus master1 -allnomaster1=($master2 $master3 $worker1 $worker2) +all=("$master1" "${allnomaster1[@]}") #Loadbalancer IP range -lbrange=192.168.3.60-192.168.3.80 +lbrange=192.168.60.171-192.168.60.189 #ssh certificate name variable certName=id_rsa From 793753296a1c548d3f56ec18728ac3bddc419a02 Mon Sep 17 00:00:00 2001 From: Ira Abramov Date: Thu, 4 Apr 2024 16:33:47 +0300 Subject: [PATCH 06/13] make less assumptions about what platforms the users have --- Kubernetes/RKE2/rke2.sh | 41 ++++++++++++++++++++++++++++++----------- 1 file changed, 30 insertions(+), 11 deletions(-) mode change 100644 => 100755 Kubernetes/RKE2/rke2.sh diff --git a/Kubernetes/RKE2/rke2.sh b/Kubernetes/RKE2/rke2.sh old mode 100644 new mode 100755 index f49d5633..07e111ee --- a/Kubernetes/RKE2/rke2.sh +++ b/Kubernetes/RKE2/rke2.sh @@ -67,14 +67,33 @@ certName=id_rsa ############################################# # DO NOT EDIT BELOW # ############################################# + +#fail immediately on errors +set -e + # For testing purposes - in case time is wrong due to VM snapshots -sudo timedatectl set-ntp off -sudo timedatectl set-ntp on +if hash timedatectl 2>/dev/null; then + sudo timedatectl set-ntp off + sudo timedatectl set-ntp on +fi -# Move SSH certs to ~/.ssh and change permissions -cp /home/$user/{$certName,$certName.pub} /home/$user/.ssh -chmod 600 /home/$user/.ssh/$certName -chmod 644 /home/$user/.ssh/$certName.pub +# Create a directory for the SSH certs +mkdir -p ~/.ssh + +# don't assume the /home convention, Some run this on a mac :) +homedir=$(eval echo ~$user) + +# Generate SSH certs if missing +if [ ! -f "$homedir"/.ssh/$certName ]; then + if [ -f "$homedir"/$certName ]; then + # Move SSH certs to ~/.ssh and change permissions + cp "$homedir"/$certName{,.pub} "$homedir"/.ssh + chmod 400 "$homedir"/.ssh/* + chmod 700 "$homedir"/.ssh + else + ssh-keygen -t rsa -f "$homedir"/.ssh/$certName -N "" + fi +fi # Install Kubectl if not already present if ! command -v kubectl version &>/dev/null; then @@ -149,9 +168,9 @@ curl -sfL https://get.rke2.io | sh - systemctl enable rke2-server.service systemctl start rke2-server.service echo "StrictHostKeyChecking no" > ~/.ssh/config -ssh-copy-id -i /home/$user/.ssh/$certName $user@$admin -scp -i /home/$user/.ssh/$certName /var/lib/rancher/rke2/server/token $user@$admin:~/token -scp -i /home/$user/.ssh/$certName /etc/rancher/rke2/rke2.yaml $user@$admin:~/.kube/rke2.yaml +ssh-copy-id -i "$homedir"/.ssh/$certName $user@$admin +scp -i "$homedir"/.ssh/$certName /var/lib/rancher/rke2/server/token $user@$admin:~/token +scp -i "$homedir"/.ssh/$certName /etc/rancher/rke2/rke2.yaml $user@$admin:~/.kube/rke2.yaml exit EOF echo -e " \033[32;5mMaster1 Completed\033[0m" @@ -169,7 +188,7 @@ kubectl apply -f https://kube-vip.io/manifests/rbac.yaml kubectl apply -f https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml # Step 6: Add other Masternodes, note we import the token we extracted from step 3 -for newnode in "${masters[@]}"; do +for newnode in "${extramasters[@]}"; do ssh -tt $user@$newnode -i ~/.ssh/$certName sudo su < Date: Thu, 4 Apr 2024 17:01:27 +0300 Subject: [PATCH 07/13] Is the KVVERSION var ever used? shellcheck was not happy it was set and never read. --- Kubernetes/RKE2/rke2.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Kubernetes/RKE2/rke2.sh b/Kubernetes/RKE2/rke2.sh index 07e111ee..78a08e72 100755 --- a/Kubernetes/RKE2/rke2.sh +++ b/Kubernetes/RKE2/rke2.sh @@ -20,7 +20,7 @@ echo -e " \033[32;5m \ ############################################# # Version of Kube-VIP to deploy -KVVERSION="v0.7.2" +export KVVERSION="v0.7.2" # The domain name of your cluster, inherit env by default DOMAIN=${DOMAIN:-my.org} From 01ddd3a81780f1c8e8ff29d67e9a7838ffc18c3e Mon Sep 17 00:00:00 2001 From: Ira Abramov Date: Thu, 4 Apr 2024 17:05:50 +0300 Subject: [PATCH 08/13] separated the local $USER from $remoteuser and $HOME from remote homes the correct way. Also make sure you don't assume local machine is Linux --- Kubernetes/RKE2/rke2.sh | 52 ++++++++++++++++++++++------------------- 1 file changed, 28 insertions(+), 24 deletions(-) diff --git a/Kubernetes/RKE2/rke2.sh b/Kubernetes/RKE2/rke2.sh index 78a08e72..f5eb7190 100755 --- a/Kubernetes/RKE2/rke2.sh +++ b/Kubernetes/RKE2/rke2.sh @@ -38,7 +38,7 @@ master3=192.168.60.39 workers=(192.168.60.26 192.168.60.83) # User of remote machines -user=ubuntu +remoteuser=ubuntu # Interface used on remotes interface=eth0 @@ -80,26 +80,27 @@ fi # Create a directory for the SSH certs mkdir -p ~/.ssh -# don't assume the /home convention, Some run this on a mac :) -homedir=$(eval echo ~$user) - # Generate SSH certs if missing -if [ ! -f "$homedir"/.ssh/$certName ]; then - if [ -f "$homedir"/$certName ]; then +if [ ! -f "$HOME"/.ssh/$certName ]; then + if [ -f "$HOME"/$certName ]; then # Move SSH certs to ~/.ssh and change permissions - cp "$homedir"/$certName{,.pub} "$homedir"/.ssh - chmod 400 "$homedir"/.ssh/* - chmod 700 "$homedir"/.ssh + cp "$HOME"/$certName{,.pub} "$HOME"/.ssh + chmod 400 "$HOME"/.ssh/* + chmod 700 "$HOME"/.ssh else - ssh-keygen -t rsa -f "$homedir"/.ssh/$certName -N "" + ssh-keygen -t rsa -f "$HOME"/.ssh/$certName -N "" fi fi # Install Kubectl if not already present if ! command -v kubectl version &>/dev/null; then - echo -e " \033[31;5mKubectl not found, installing\033[0m" - curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" - sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl + if [ "$OS" == "Darwin" ]; then + brew install kubernetes-cli + else # assume Linux? + echo -e " \033[31;5mKubectl not found, installing\033[0m" + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/$(uname -m)/kubectl" + sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl + fi else echo -e " \033[32;5mKubectl already installed\033[0m" fi @@ -109,7 +110,7 @@ sed -i '1s/^/StrictHostKeyChecking no\n/' ~/.ssh/config #add ssh keys for all nodes for node in "${all[@]}"; do - ssh-copy-id $user@$node + ssh-copy-id $remoteuser@$node done # Step 1: Create Kube VIP @@ -125,7 +126,7 @@ sudo sed -i 's/k3s/rke2/g' /var/lib/rancher/rke2/server/manifests/kube-vip.yaml # copy kube-vip.yaml to home directory sudo cp /var/lib/rancher/rke2/server/manifests/kube-vip.yaml ~/kube-vip.yaml # change owner -sudo chown $user:$user kube-vip.yaml +sudo chown $USER:$USER kube-vip.yaml # make kube folder to run kubectl later mkdir ~/.kube @@ -145,20 +146,23 @@ sudo cp ~/config.yaml /etc/rancher/rke2/config.yaml # update path with rke2-binaries echo 'export KUBECONFIG=/etc/rancher/rke2/rke2.yaml' >>~/.bashrc +# shellcheck disable=SC2016 echo 'export PATH=${PATH}:/var/lib/rancher/rke2/bin' >>~/.bashrc echo 'alias k=kubectl' >>~/.bashrc + +# shellcheck disable=SC1090 source ~/.bashrc # Step 2: Copy kube-vip.yaml and certs to all masters for newnode in "${allmasters[@]}"; do - scp -i ~/.ssh/$certName $HOME/kube-vip.yaml $user@$newnode:~/kube-vip.yaml - scp -i ~/.ssh/$certName $HOME/config.yaml $user@$newnode:~/config.yaml - scp -i ~/.ssh/$certName ~/.ssh/{$certName,$certName.pub} $user@$newnode:~/.ssh + scp -i ~/.ssh/$certName $HOME/kube-vip.yaml $remoteuser@$newnode:~/kube-vip.yaml + scp -i ~/.ssh/$certName $HOME/config.yaml $remoteuser@$newnode:~/config.yaml + scp -i ~/.ssh/$certName ~/.ssh/{$certName,$certName.pub} $remoteuser@$newnode:~/.ssh echo -e " \033[32;5mCopied successfully!\033[0m" done # Step 3: Connect to Master1 and move kube-vip.yaml and config.yaml. Then install RKE2, copy token back to admin machine. We then use the token to bootstrap additional masternodes -ssh -tt $user@$master1 -i ~/.ssh/$certName sudo su < ~/.ssh/config -ssh-copy-id -i "$homedir"/.ssh/$certName $user@$admin -scp -i "$homedir"/.ssh/$certName /var/lib/rancher/rke2/server/token $user@$admin:~/token -scp -i "$homedir"/.ssh/$certName /etc/rancher/rke2/rke2.yaml $user@$admin:~/.kube/rke2.yaml +ssh-copy-id -i ~/.ssh/$certName $remoteuser@$admin +scp -i ~/.ssh/$certName /var/lib/rancher/rke2/server/token $remoteuser@$admin:~/token +scp -i ~/.ssh/$certName /etc/rancher/rke2/rke2.yaml $remoteuser@$admin:~/.kube/rke2.yaml exit EOF echo -e " \033[32;5mMaster1 Completed\033[0m" @@ -189,7 +193,7 @@ kubectl apply -f https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provi # Step 6: Add other Masternodes, note we import the token we extracted from step 3 for newnode in "${extramasters[@]}"; do - ssh -tt $user@$newnode -i ~/.ssh/$certName sudo su <> /etc/rancher/rke2/config.yaml @@ -211,7 +215,7 @@ kubectl get nodes # Step 7: Add Workers for newnode in "${workers[@]}"; do - ssh -tt $user@$newnode -i ~/.ssh/$certName sudo su <> /etc/rancher/rke2/config.yaml From 3af315b3dd3c990aa22340b4c37eeb425f631f62 Mon Sep 17 00:00:00 2001 From: Ira Abramov Date: Thu, 4 Apr 2024 17:21:26 +0300 Subject: [PATCH 09/13] yup, too risky --- Kubernetes/RKE2/rke2.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Kubernetes/RKE2/rke2.sh b/Kubernetes/RKE2/rke2.sh index f5eb7190..39850be2 100755 --- a/Kubernetes/RKE2/rke2.sh +++ b/Kubernetes/RKE2/rke2.sh @@ -106,7 +106,7 @@ else fi # Create SSH Config file to ignore checking (don't use in production!) -sed -i '1s/^/StrictHostKeyChecking no\n/' ~/.ssh/config +#sed -i '1s/^/StrictHostKeyChecking no\n/' ~/.ssh/config #add ssh keys for all nodes for node in "${all[@]}"; do From 8ac9cbe10523bed44d3d4340c9dbc91c056fdae5 Mon Sep 17 00:00:00 2001 From: Ira Abramov Date: Thu, 4 Apr 2024 17:22:31 +0300 Subject: [PATCH 10/13] clean up the coding logic and style a bit --- Kubernetes/RKE2/rke2.sh | 46 ++++++++++++++++++++--------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/Kubernetes/RKE2/rke2.sh b/Kubernetes/RKE2/rke2.sh index 39850be2..5f11a02e 100755 --- a/Kubernetes/RKE2/rke2.sh +++ b/Kubernetes/RKE2/rke2.sh @@ -117,38 +117,38 @@ done # create RKE2's self-installing manifest dir sudo mkdir -p /var/lib/rancher/rke2/server/manifests # Install the kube-vip deployment into rke2's self-installing manifest folder -curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/kube-vip -cat kube-vip | sed 's/$interface/'$interface'/g; s/$vip/'$vip'/g' >$HOME/kube-vip.yaml -sudo mv kube-vip.yaml /var/lib/rancher/rke2/server/manifests/kube-vip.yaml - +curl -s https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/kube-vip >$HOME/kube-vip.yaml +sed -i 's/$interface/'$interface'/g; s/$vip/'$vip'/g' $HOME/kube-vip.yaml # Find/Replace all k3s entries to represent rke2 -sudo sed -i 's/k3s/rke2/g' /var/lib/rancher/rke2/server/manifests/kube-vip.yaml -# copy kube-vip.yaml to home directory -sudo cp /var/lib/rancher/rke2/server/manifests/kube-vip.yaml ~/kube-vip.yaml -# change owner -sudo chown $USER:$USER kube-vip.yaml +sed -i 's/k3s/rke2/g' $HOME/kube-vip.yaml +sudo cp kube-vip.yaml /var/lib/rancher/rke2/server/manifests/kube-vip.yaml + # make kube folder to run kubectl later -mkdir ~/.kube +mkdir -p ~/.kube # create the rke2 config file sudo mkdir -p /etc/rancher/rke2 touch config.yaml -echo "tls-san:" >>config.yaml -echo " - $vip" >>config.yaml -echo " - $master1" >>config.yaml -echo " - $master2" >>config.yaml -echo " - $master3" >>config.yaml -echo "write-kubeconfig-mode: 0644" >>config.yaml -echo "disable:" >>config.yaml -echo " - rke2-ingress-nginx" >>config.yaml +{ + echo "tls-san:" + echo " - $vip" + echo " - $master1" + echo " - $master2" + echo " - $master3" + echo "write-kubeconfig-mode: 0644" + echo "disable:" + echo " - rke2-ingress-nginx" +} >>config.yaml # copy config.yaml to rancher directory sudo cp ~/config.yaml /etc/rancher/rke2/config.yaml -# update path with rke2-binaries -echo 'export KUBECONFIG=/etc/rancher/rke2/rke2.yaml' >>~/.bashrc -# shellcheck disable=SC2016 -echo 'export PATH=${PATH}:/var/lib/rancher/rke2/bin' >>~/.bashrc -echo 'alias k=kubectl' >>~/.bashrc +{ + # update path with rke2-binaries + echo 'export KUBECONFIG=/etc/rancher/rke2/rke2.yaml' + # shellcheck disable=SC2016 + echo 'export PATH=${PATH}:/var/lib/rancher/rke2/bin' + echo 'alias k=kubectl' +} >>~/.bashrc # shellcheck disable=SC1090 source ~/.bashrc From 0ce35878a8a76402e2b94ec971533dfdb4d01129 Mon Sep 17 00:00:00 2001 From: Ira Abramov Date: Thu, 4 Apr 2024 17:33:23 +0300 Subject: [PATCH 11/13] Make shellcheck happy --- Kubernetes/RKE2/rke2.sh | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/Kubernetes/RKE2/rke2.sh b/Kubernetes/RKE2/rke2.sh index 5f11a02e..c528939b 100755 --- a/Kubernetes/RKE2/rke2.sh +++ b/Kubernetes/RKE2/rke2.sh @@ -110,17 +110,18 @@ fi #add ssh keys for all nodes for node in "${all[@]}"; do - ssh-copy-id $remoteuser@$node + ssh-copy-id "$remoteuser@$node" done # Step 1: Create Kube VIP # create RKE2's self-installing manifest dir sudo mkdir -p /var/lib/rancher/rke2/server/manifests # Install the kube-vip deployment into rke2's self-installing manifest folder -curl -s https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/kube-vip >$HOME/kube-vip.yaml -sed -i 's/$interface/'$interface'/g; s/$vip/'$vip'/g' $HOME/kube-vip.yaml +curl -s https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/kube-vip >"$HOME/kube-vip.yaml" +# shellcheck disable=SC2016 +sed -i 's/$interface/'$interface'/g; s/$vip/'$vip'/g' "$HOME/kube-vip.yaml" # Find/Replace all k3s entries to represent rke2 -sed -i 's/k3s/rke2/g' $HOME/kube-vip.yaml +sed -i 's/k3s/rke2/g' "$HOME/kube-vip.yaml" sudo cp kube-vip.yaml /var/lib/rancher/rke2/server/manifests/kube-vip.yaml # make kube folder to run kubectl later @@ -155,13 +156,14 @@ source ~/.bashrc # Step 2: Copy kube-vip.yaml and certs to all masters for newnode in "${allmasters[@]}"; do - scp -i ~/.ssh/$certName $HOME/kube-vip.yaml $remoteuser@$newnode:~/kube-vip.yaml - scp -i ~/.ssh/$certName $HOME/config.yaml $remoteuser@$newnode:~/config.yaml - scp -i ~/.ssh/$certName ~/.ssh/{$certName,$certName.pub} $remoteuser@$newnode:~/.ssh + scp -i ~/.ssh/$certName "$HOME/kube-vip.yaml" "$remoteuser@$newnode":~/kube-vip.yaml + scp -i ~/.ssh/$certName "$HOME/config.yaml" "$remoteuser@$newnode":~/config.yaml + scp -i ~/.ssh/$certName ~/.ssh/{$certName,$certName.pub} "$remoteuser@$newnode":~/.ssh echo -e " \033[32;5mCopied successfully!\033[0m" done # Step 3: Connect to Master1 and move kube-vip.yaml and config.yaml. Then install RKE2, copy token back to admin machine. We then use the token to bootstrap additional masternodes +# shellcheck disable=SC2087 ssh -tt $remoteuser@$master1 -i ~/.ssh/$certName sudo su <$HOME/.kube/config -sudo chown $(id -u):$(id -g) $HOME/.kube/config +sudo cat ~/.kube/rke2.yaml | sed 's/127.0.0.1/'$master1'/g' >"$HOME/.kube/config" +sudo chown "$(id -u):$(id -g)" "$HOME/.kube/config" export KUBECONFIG=${HOME}/.kube/config sudo cp ~/.kube/config /etc/rancher/rke2/rke2.yaml kubectl get nodes @@ -193,7 +195,8 @@ kubectl apply -f https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provi # Step 6: Add other Masternodes, note we import the token we extracted from step 3 for newnode in "${extramasters[@]}"; do - ssh -tt $remoteuser@$newnode -i ~/.ssh/$certName sudo su <> /etc/rancher/rke2/config.yaml @@ -215,7 +218,8 @@ kubectl get nodes # Step 7: Add Workers for newnode in "${workers[@]}"; do - ssh -tt $remoteuser@$newnode -i ~/.ssh/$certName sudo su <> /etc/rancher/rke2/config.yaml @@ -238,8 +242,9 @@ echo -e " \033[32;5mDeploying Metallb\033[0m" kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/namespace.yaml kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml # Download ipAddressPool and configure using lbrange above -curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/ipAddressPool -cat ipAddressPool | sed 's/$lbrange/'$lbrange'/g' >$HOME/ipAddressPool.yaml +# shellcheck disable=SC2016 +curl -s https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/ipAddressPool | + sed 's/$lbrange/'$lbrange'/g' >"$HOME/ipAddressPool.yaml" # Step 9: Deploy IP Pools and l2Advertisement echo -e " \033[32;5mAdding IP Pools, waiting for Metallb to be available first. This can take a long time as we're likely being rate limited for container pulls...\033[0m" From 9ffba28f72bf55062c282048cbe002f2af7e1830 Mon Sep 17 00:00:00 2001 From: Ira Abramov Date: Thu, 4 Apr 2024 18:05:07 +0300 Subject: [PATCH 12/13] unifying coding style for readability, remove unnecessary risk --- Kubernetes/RKE2/rke2.sh | 49 +++++++++++++++++++++++------------------ 1 file changed, 28 insertions(+), 21 deletions(-) diff --git a/Kubernetes/RKE2/rke2.sh b/Kubernetes/RKE2/rke2.sh index c528939b..76ec9917 100755 --- a/Kubernetes/RKE2/rke2.sh +++ b/Kubernetes/RKE2/rke2.sh @@ -117,19 +117,19 @@ done # create RKE2's self-installing manifest dir sudo mkdir -p /var/lib/rancher/rke2/server/manifests # Install the kube-vip deployment into rke2's self-installing manifest folder -curl -s https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/kube-vip >"$HOME/kube-vip.yaml" # shellcheck disable=SC2016 -sed -i 's/$interface/'$interface'/g; s/$vip/'$vip'/g' "$HOME/kube-vip.yaml" +curl -s https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/kube-vip | + sed 's/$interface/'$interface'/g; s/$vip/'$vip'/g' >~/kube-vip.yaml # Find/Replace all k3s entries to represent rke2 -sed -i 's/k3s/rke2/g' "$HOME/kube-vip.yaml" -sudo cp kube-vip.yaml /var/lib/rancher/rke2/server/manifests/kube-vip.yaml +sed -i 's/k3s/rke2/g' ~/kube-vip.yaml +sudo cp ~/kube-vip.yaml /var/lib/rancher/rke2/server/manifests/kube-vip.yaml # make kube folder to run kubectl later mkdir -p ~/.kube # create the rke2 config file sudo mkdir -p /etc/rancher/rke2 -touch config.yaml +echo >~/config.yaml { echo "tls-san:" echo " - $vip" @@ -139,7 +139,7 @@ touch config.yaml echo "write-kubeconfig-mode: 0644" echo "disable:" echo " - rke2-ingress-nginx" -} >>config.yaml +} >>~/config.yaml # copy config.yaml to rancher directory sudo cp ~/config.yaml /etc/rancher/rke2/config.yaml @@ -156,9 +156,9 @@ source ~/.bashrc # Step 2: Copy kube-vip.yaml and certs to all masters for newnode in "${allmasters[@]}"; do - scp -i ~/.ssh/$certName "$HOME/kube-vip.yaml" "$remoteuser@$newnode":~/kube-vip.yaml - scp -i ~/.ssh/$certName "$HOME/config.yaml" "$remoteuser@$newnode":~/config.yaml - scp -i ~/.ssh/$certName ~/.ssh/{$certName,$certName.pub} "$remoteuser@$newnode":~/.ssh + scp -i ~/.ssh/$certName ~/kube-vip.yaml "$remoteuser@$newnode":~/kube-vip.yaml + scp -i ~/.ssh/$certName ~/config.yaml "$remoteuser@$newnode":~/config.yaml + scp -i ~/.ssh/$certName ~/.ssh/$certName{,.pub} "$remoteuser@$newnode":~/.ssh echo -e " \033[32;5mCopied successfully!\033[0m" done @@ -169,12 +169,17 @@ mkdir -p /var/lib/rancher/rke2/server/manifests mv kube-vip.yaml /var/lib/rancher/rke2/server/manifests/kube-vip.yaml mkdir -p /etc/rancher/rke2 mv config.yaml /etc/rancher/rke2/config.yaml -echo 'export KUBECONFIG=/etc/rancher/rke2/rke2.yaml' >> ~/.bashrc ; echo 'export PATH=${PATH}:/var/lib/rancher/rke2/bin' >> ~/.bashrc ; echo 'alias k=kubectl' >> ~/.bashrc ; source ~/.bashrc ; +{ + echo 'export KUBECONFIG=/etc/rancher/rke2/rke2.yaml' + echo 'export PATH=${PATH}:/var/lib/rancher/rke2/bin' + echo 'alias k=kubectl' +} >> ~/.bashrc +source ~/.bashrc curl -sfL https://get.rke2.io | sh - systemctl enable rke2-server.service systemctl start rke2-server.service -echo "StrictHostKeyChecking no" > ~/.ssh/config -ssh-copy-id -i ~/.ssh/$certName $remoteuser@$admin +#echo "StrictHostKeyChecking no" > ~/.ssh/config +#ssh-copy-id -i ~/.ssh/$certName $remoteuser@$admin scp -i ~/.ssh/$certName /var/lib/rancher/rke2/server/token $remoteuser@$admin:~/token scp -i ~/.ssh/$certName /etc/rancher/rke2/rke2.yaml $remoteuser@$admin:~/.kube/rke2.yaml exit @@ -198,14 +203,16 @@ for newnode in "${extramasters[@]}"; do # shellcheck disable=SC2087 ssh -tt "$remoteuser@$newnode" -i ~/.ssh/$certName sudo su <> /etc/rancher/rke2/config.yaml - echo "server: https://$master1:9345" >> /etc/rancher/rke2/config.yaml - echo "tls-san:" >> /etc/rancher/rke2/config.yaml - echo " - $vip" >> /etc/rancher/rke2/config.yaml - echo " - $master1" >> /etc/rancher/rke2/config.yaml - echo " - $master2" >> /etc/rancher/rke2/config.yaml - echo " - $master3" >> /etc/rancher/rke2/config.yaml + echo > /etc/rancher/rke2/config.yaml + { + echo "token: $token" + echo "server: https://$master1:9345" + echo "tls-san:" + echo " - $vip" + echo " - $master1" + echo " - $master2" + echo " - $master3" + } >> /etc/rancher/rke2/config.yaml curl -sfL https://get.rke2.io | sh - systemctl enable rke2-server.service systemctl start rke2-server.service @@ -221,7 +228,7 @@ for newnode in "${workers[@]}"; do # shellcheck disable=SC2087 ssh -tt "$remoteuser@$newnode" -i ~/.ssh/$certName sudo su < /etc/rancher/rke2/config.yaml echo "token: $token" >> /etc/rancher/rke2/config.yaml echo "server: https://$vip:9345" >> /etc/rancher/rke2/config.yaml echo "node-label:" >> /etc/rancher/rke2/config.yaml From cbfa72157c0e3e628cbb1232894aba3056ccf77c Mon Sep 17 00:00:00 2001 From: Ira Abramov Date: Thu, 4 Apr 2024 19:12:30 +0300 Subject: [PATCH 13/13] returned the risky 'StrictHostKeyChecking no' and a few more fixes --- Kubernetes/RKE2/rke2.sh | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/Kubernetes/RKE2/rke2.sh b/Kubernetes/RKE2/rke2.sh index 76ec9917..0eb0997a 100755 --- a/Kubernetes/RKE2/rke2.sh +++ b/Kubernetes/RKE2/rke2.sh @@ -26,9 +26,9 @@ export KVVERSION="v0.7.2" DOMAIN=${DOMAIN:-my.org} # Set the IP addresses of the admin, masters, and workers nodes -# "admin" is the machine from which you will be running the ops, -# in theory it can be "localhost", just make sure you have sshd -# running and accessible there. +# "admin" is your desktop machine from which you will be running the ops, +# just for this run, make sure you have sshd +# running and accessible here! admin=192.168.60.22 master1=192.168.60.37 master2=192.168.60.38 @@ -106,7 +106,7 @@ else fi # Create SSH Config file to ignore checking (don't use in production!) -#sed -i '1s/^/StrictHostKeyChecking no\n/' ~/.ssh/config +sed -i '1s/^/StrictHostKeyChecking no\n/' ~/.ssh/config #add ssh keys for all nodes for node in "${all[@]}"; do @@ -171,25 +171,25 @@ mkdir -p /etc/rancher/rke2 mv config.yaml /etc/rancher/rke2/config.yaml { echo 'export KUBECONFIG=/etc/rancher/rke2/rke2.yaml' - echo 'export PATH=${PATH}:/var/lib/rancher/rke2/bin' + echo 'export PATH=\${PATH}:/var/lib/rancher/rke2/bin' echo 'alias k=kubectl' } >> ~/.bashrc source ~/.bashrc curl -sfL https://get.rke2.io | sh - systemctl enable rke2-server.service systemctl start rke2-server.service -#echo "StrictHostKeyChecking no" > ~/.ssh/config -#ssh-copy-id -i ~/.ssh/$certName $remoteuser@$admin -scp -i ~/.ssh/$certName /var/lib/rancher/rke2/server/token $remoteuser@$admin:~/token -scp -i ~/.ssh/$certName /etc/rancher/rke2/rke2.yaml $remoteuser@$admin:~/.kube/rke2.yaml +echo "StrictHostKeyChecking no" > ~/.ssh/config +ssh-copy-id -i ~/.ssh/$certName $USER@$admin +scp -i ~/.ssh/$certName /var/lib/rancher/rke2/server/token $USER@$admin:~/token +scp -i ~/.ssh/$certName /etc/rancher/rke2/rke2.yaml $USER@$admin:~/.kube/rke2.yaml exit EOF echo -e " \033[32;5mMaster1 Completed\033[0m" # Step 4: Set variable to the token we just extracted, set kube config location -token=$(cat token) -sudo cat ~/.kube/rke2.yaml | sed 's/127.0.0.1/'$master1'/g' >"$HOME/.kube/config" -sudo chown "$(id -u):$(id -g)" "$HOME/.kube/config" +token=$(cat ~/token) +sed 's/127.0.0.1/'$master1'/g' <~/.kube/rke2.yaml >~/.kube/config +sudo chown "$(id -u):$(id -g)" ~/.kube/config export KUBECONFIG=${HOME}/.kube/config sudo cp ~/.kube/config /etc/rancher/rke2/rke2.yaml kubectl get nodes @@ -215,7 +215,7 @@ for newnode in "${extramasters[@]}"; do } >> /etc/rancher/rke2/config.yaml curl -sfL https://get.rke2.io | sh - systemctl enable rke2-server.service - systemctl start rke2-server.service + time systemctl start rke2-server.service exit EOF echo -e " \033[32;5mMaster node joined successfully!\033[0m" @@ -236,7 +236,7 @@ for newnode in "${workers[@]}"; do echo " - longhorn=true" >> /etc/rancher/rke2/config.yaml curl -sfL https://get.rke2.io | INSTALL_RKE2_TYPE="agent" sh - systemctl enable rke2-agent.service - systemctl start rke2-agent.service + time systemctl start rke2-agent.service exit EOF echo -e " \033[32;5mWorker node joined successfully!\033[0m" @@ -251,7 +251,7 @@ kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/conf # Download ipAddressPool and configure using lbrange above # shellcheck disable=SC2016 curl -s https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/ipAddressPool | - sed 's/$lbrange/'$lbrange'/g' >"$HOME/ipAddressPool.yaml" + sed 's/$lbrange/'$lbrange'/g' >~/ipAddressPool.yaml # Step 9: Deploy IP Pools and l2Advertisement echo -e " \033[32;5mAdding IP Pools, waiting for Metallb to be available first. This can take a long time as we're likely being rate limited for container pulls...\033[0m" @@ -259,7 +259,7 @@ kubectl wait --namespace metallb-system \ --for=condition=ready pod \ --selector=component=controller \ --timeout=1800s -kubectl apply -f ipAddressPool.yaml +kubectl apply -f ~/ipAddressPool.yaml kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/l2Advertisement.yaml # Step 10: Install Rancher (Optional - Delete if not required)