Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Slight overhaul to enhance readability, safety, and consistency #61

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
222 changes: 147 additions & 75 deletions Kubernetes/K3S-Deploy/k3s.sh
Original file line number Diff line number Diff line change
Expand Up @@ -21,186 +21,258 @@ echo -e " \033[32;5m \
#############################################

# Version of Kube-VIP to deploy
KVVERSION="v0.6.3"

KVVERSION="v0.7.0"

# K3S Version
k3sVersion="v1.26.10+k3s2"

# Set the IP addresses of the master and work nodes
master1=192.168.3.21
master2=192.168.3.22
master3=192.168.3.23
worker1=192.168.3.24
worker2=192.168.3.25
k3sVersion="v1.27.10+k3s2"

# Set the IP addresses of the master and worker nodes.

masters=(10.0.5.1 10.0.5.2 10.0.5.3)
workers=(10.0.5.4 10.0.5.5)

# User of remote machines
user=ubuntu

user=bones

# Interface used on remotes

interface=eth0

# Set the virtual IP address (VIP)
vip=192.168.3.50

# Array of master nodes
masters=($master2 $master3)
vip=10.0.5.10

# Loadbalancer IP range

# Array of worker nodes
workers=($worker1 $worker2)
lbrange=10.0.5.100-10.0.5.120

# Array of all
all=($master1 $master2 $master3 $worker1 $worker2)
# SSH certificate name variable

# Array of all minus master
allnomaster1=($master2 $master3 $worker1 $worker2)
certName=id_ed25519

#Loadbalancer IP range
lbrange=192.168.3.60-192.168.3.80
# Should we copy the above SSH cert to the local .ssh directory?
# (Only set if you haven't done this)

#ssh certificate name variable
certName=id_rsa
copyCert=false

#############################################
# DO NOT EDIT BELOW #
#############################################

# For testing purposes - in case time is wrong due to VM snapshots

sudo timedatectl set-ntp off
sudo timedatectl set-ntp on

# Move SSH certs to ~/.ssh and change permissions
cp /home/$user/{$certName,$certName.pub} /home/$user/.ssh
chmod 600 /home/$user/.ssh/$certName
chmod 644 /home/$user/.ssh/$certName.pub
# Create handy functions for outputting messages.

function notice {
echo -e " \033[32;5m${1}\033[0m"
}

function error {
echo -e " \033[31;5m${1}\033[0m"
}

# Move SSH certs to ~/.ssh and change permissions if requested.

if [ "${copyCert}" = true ]; then
cp ${HOME}/{$certName,$certName.pub} ${HOME}/.ssh
chmod 600 ${HOME}/.ssh/$certName
chmod 644 ${HOME}/.ssh/$certName.pub
fi

# Set some SSH options for the sake of convenience. The default is to include
# our specified cert and to disable strict host key checking.

ssh_ops="-o StrictHostKeyChecking=no -i ${HOME}/.ssh/$certName"

# Create a temporary folder where we can work without leaving artifacts after
# the script completes.

tempdir=$(mktemp --tmpdir -d kube_install-XXXXX)

# Download all of the K3S-Deploy files necessary for our work.

repo_path=https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy

for repo_file in kube-vip ipAddressPool l2Advertisement.yaml; do
curl --output-dir ${tempdir} -sO ${repo_path}/${repo_file}
done

# Before starting to modify the cluster, download any other remote resources
# that could cause us to abort if one isn't available.

curl --output-dir ${tempdir} -sO https://kube-vip.io/manifests/rbac.yaml
curl --output-dir ${tempdir} -sO https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml
curl --output-dir ${tempdir} -sO https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/namespace.yaml
curl --output-dir ${tempdir} -sO https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml
curl --output-dir ${tempdir} -sO https://raw.githubusercontent.com/inlets/inlets-operator/master/contrib/nginx-sample-deployment.yaml

# Install k3sup to local machine if not already present

if ! command -v k3sup version &> /dev/null
then
echo -e " \033[31;5mk3sup not found, installing\033[0m"
error "k3sup not found, installing"
curl -sLS https://get.k3sup.dev | sh
sudo install k3sup /usr/local/bin/
else
echo -e " \033[32;5mk3sup already installed\033[0m"
notice "k3sup already installed"
fi

# Install Kubectl if not already present

if ! command -v kubectl version &> /dev/null
then
echo -e " \033[31;5mKubectl not found, installing\033[0m"
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
kc_version=$(curl -sL https://dl.k8s.io/release/stable.txt)
error "Kubectl not found, installing version ${kc_version}"
curl -LO "https://dl.k8s.io/release/${kc_version}/bin/linux/amd64/kubectl"
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
else
echo -e " \033[32;5mKubectl already installed\033[0m"
notice "Kubectl already installed"
fi

# Create SSH Config file to ignore checking (don't use in production!)
echo "StrictHostKeyChecking no" > ~/.ssh/config
# Add ssh keys for all nodes; this may have already been done as part of
# cloud-init, but it never hurts to be certain.

#add ssh keys for all nodes
for node in "${all[@]}"; do
ssh-copy-id $user@$node
for node in ${masters[@]} ${workers[@]}; do
ssh-copy-id ${ssh_ops} $user@$node
done

# Install policycoreutils for each node
for newnode in "${all[@]}"; do
ssh $user@$newnode -i ~/.ssh/$certName sudo su <<EOF
NEEDRESTART_MODE=a apt install policycoreutils -y

for node in ${masters[@]} ${workers[@]}; do
ssh ${ssh_ops} $user@$node sudo su <<EOF
NEEDRESTART_MODE=a apt-get install policycoreutils -y
exit
EOF
echo -e " \033[32;5mPolicyCoreUtils installed!\033[0m"
notice "PolicyCoreUtils installed!"
done

# Step 1: Bootstrap First k3s Node

notice "Setting up first master node"

mkdir ~/.kube

k3sup install \
--ip $master1 \
--ip ${masters[0]} \
--user $user \
--tls-san $vip \
--cluster \
--k3s-version $k3sVersion \
--k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$master1 --node-taint node-role.kubernetes.io/master=true:NoSchedule" \
--k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=${masters[0]} --node-taint node-role.kubernetes.io/master=true:NoSchedule" \
--merge \
--sudo \
--local-path $HOME/.kube/config \
--ssh-key $HOME/.ssh/$certName \
--context k3s-ha
echo -e " \033[32;5mFirst Node bootstrapped successfully!\033[0m"

notice "First Node bootstrapped successfully!"

# Step 2: Install Kube-VIP for HA
kubectl apply -f https://kube-vip.io/manifests/rbac.yaml

# Step 3: Download kube-vip
curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/kube-vip
cat kube-vip | sed 's/$interface/'$interface'/g; s/$vip/'$vip'/g' > $HOME/kube-vip.yaml
kubectl apply -f ${tempdir}/rbac.yaml

# Step 3: Copy kube-vip.yaml to master1

sed -i 's/$interface/'$interface'/g; s/$vip/'$vip'/g' ${tempdir}/kube-vip
sed -i "s%/version.*%/version: ${KVVERSION}%g" ${tempdir}/kube-vip

# Step 4: Copy kube-vip.yaml to master1
scp -i ~/.ssh/$certName $HOME/kube-vip.yaml $user@$master1:~/kube-vip.yaml
scp ${ssh_ops} ${tempdir}/kube-vip $user@${masters[0]}:kube-vip.yaml

# Step 4: Connect to Master1 and move kube-vip.yaml

# Step 5: Connect to Master1 and move kube-vip.yaml
ssh $user@$master1 -i ~/.ssh/$certName <<- EOF
ssh ${ssh_ops} $user@${masters[0]} <<- EOF
sudo mkdir -p /var/lib/rancher/k3s/server/manifests
sudo mv kube-vip.yaml /var/lib/rancher/k3s/server/manifests/kube-vip.yaml
EOF

# Step 6: Add new master nodes (servers) & workers
for newnode in "${masters[@]}"; do
# Step 5: Add new master nodes (servers) & workers

notice "Setting up remaining master nodes"

for newnode in ${masters[@]:1}; do
k3sup join \
--ip $newnode \
--user $user \
--sudo \
--k3s-version $k3sVersion \
--server \
--server-ip $master1 \
--server-ip ${masters[0]} \
--ssh-key $HOME/.ssh/$certName \
--k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$newnode --node-taint node-role.kubernetes.io/master=true:NoSchedule" \
--server-user $user
echo -e " \033[32;5mMaster node joined successfully!\033[0m"

notice "Master node ${newnode} joined successfully!"
done

# add workers
for newagent in "${workers[@]}"; do
# Add workers

notice "Setting up worker nodes"

for newagent in ${workers[@]}; do
k3sup join \
--ip $newagent \
--user $user \
--sudo \
--k3s-version $k3sVersion \
--server-ip $master1 \
--server-ip ${masters[0]} \
--ssh-key $HOME/.ssh/$certName \
--k3s-extra-args "--node-label \"longhorn=true\" --node-label \"worker=true\""
echo -e " \033[32;5mAgent node joined successfully!\033[0m"

notice "Agent node ${newagent} joined successfully!"
done

# Step 7: Install kube-vip as network LoadBalancer - Install the kube-vip Cloud Provider
kubectl apply -f https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml
# Step 6: Install kube-vip as network LoadBalancer - Install the kube-vip Cloud Provider

# Step 8: Install Metallb
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/namespace.yaml
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml
# Download ipAddressPool and configure using lbrange above
curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/ipAddressPool
cat ipAddressPool | sed 's/$lbrange/'$lbrange'/g' > $HOME/ipAddressPool.yaml
kubectl apply -f $HOME/ipAddressPool.yaml
notice "Installing kube-vip as Load Balancer and Cloud Provider"

# Step 9: Test with Nginx
kubectl apply -f https://raw.githubusercontent.com/inlets/inlets-operator/master/contrib/nginx-sample-deployment.yaml -n default
kubectl apply -f ${tempdir}/kube-vip-cloud-controller.yaml

# Step 7: Install Metallb

notice "Installing MetalLB"

kubectl apply -f ${tempdir}/namespace.yaml
kubectl apply -f ${tempdir}/metallb-native.yaml

# Configure ipAddressPool using lbrange above

notice "Setting up ip address pool"

cat ${tempdir}/ipAddressPool | sed 's/$lbrange/'$lbrange'/g' > ${tempdir}/ipAddressPool.yaml

# Step 8: Test with Nginx

notice "Testing with Nginx"

kubectl apply -f ${tempdir}/nginx-sample-deployment.yaml -n default
kubectl expose deployment nginx-1 --port=80 --type=LoadBalancer -n default

echo -e " \033[32;5mWaiting for K3S to sync and LoadBalancer to come online\033[0m"
notice "Waiting for K3S to sync and LoadBalancer to come online"

while [[ $(kubectl get pods -l app=nginx -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}') != "True" ]]; do
sleep 1
done

# Step 10: Deploy IP Pools and l2Advertisement
# Step 9: Deploy IP Pools and l2Advertisement

notice "Deploying IP Pools and l2Advertisement"

kubectl wait --namespace metallb-system \
--for=condition=ready pod \
--selector=component=controller \
--timeout=120s
kubectl apply -f ipAddressPool.yaml
kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/l2Advertisement.yaml

kubectl apply -f ${tempdir}/ipAddressPool.yaml
kubectl apply -f ${tempdir}/l2Advertisement.yaml

kubectl get nodes
kubectl get svc
kubectl get pods --all-namespaces -o wide

echo -e " \033[32;5mHappy Kubing! Access Nginx at EXTERNAL-IP above\033[0m"
notice "Happy Kubing! Access Nginx at EXTERNAL-IP above"
Loading