added script and readme
This commit is contained in:
29
README.md
Normal file
29
README.md
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
# Kubernetes Cluster Deployment Guide
|
||||||
|
|
||||||
|
This guide describes how to deploy a Kubernetes cluster tailored to your environment using the provided `init-deploy.sh` script.
|
||||||
|
|
||||||
|
## ⚙️ Pre-requisites
|
||||||
|
|
||||||
|
Before running the deployment script, ensure the following are in place:
|
||||||
|
|
||||||
|
- `kubectl` is installed on your local machine
|
||||||
|
- The remote user can execute `sudo` commands **without a password prompt**
|
||||||
|
- SSH host keys of the target nodes are already added to your system (to avoid interactive confirmation prompts)
|
||||||
|
|
||||||
|
## 🧭 Deployment Goals
|
||||||
|
|
||||||
|
This setup will create a Kubernetes cluster with the following topology and components:
|
||||||
|
|
||||||
|
- **1 Control Plane Node**
|
||||||
|
- **2 Worker Nodes**
|
||||||
|
- **Calico** as the CNI (Container Network Interface)
|
||||||
|
- **MetalLB** as the cloud load balancer for bare metal services
|
||||||
|
- **Longhorn** as the persistent storage provider
|
||||||
|
|
||||||
|
## 🚀 How to Deploy
|
||||||
|
|
||||||
|
To initiate the deployment, run the provided script:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
chmod +x init-deploy.sh
|
||||||
|
./init-deploy.sh
|
@ -2,6 +2,7 @@
|
|||||||
hosts: k8s_control_plane
|
hosts: k8s_control_plane
|
||||||
become: true
|
become: true
|
||||||
vars:
|
vars:
|
||||||
|
## This is the default networkd of Calico
|
||||||
pod_network_cidr: "192.168.0.0/16"
|
pod_network_cidr: "192.168.0.0/16"
|
||||||
apiserver_advertise_address: "10.50.3.21"
|
apiserver_advertise_address: "10.50.3.21"
|
||||||
cri_socket: "unix:///run/containerd/containerd.sock"
|
cri_socket: "unix:///run/containerd/containerd.sock"
|
||||||
|
36
ansible/longhorn-reqs.yaml
Normal file
36
ansible/longhorn-reqs.yaml
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
---
|
||||||
|
- name: Setup Longhorn Requirements
|
||||||
|
hosts: all
|
||||||
|
become: true
|
||||||
|
tasks:
|
||||||
|
|
||||||
|
- name: Install required packages
|
||||||
|
apt:
|
||||||
|
name:
|
||||||
|
- open-iscsi
|
||||||
|
- nfs-common
|
||||||
|
state: present
|
||||||
|
update_cache: yes
|
||||||
|
|
||||||
|
- name: Ensure iscsi_tcp kernel module is loaded
|
||||||
|
modprobe:
|
||||||
|
name: iscsi_tcp
|
||||||
|
state: present
|
||||||
|
|
||||||
|
- name: Ensure iscsi_tcp module loads on boot
|
||||||
|
copy:
|
||||||
|
dest: /etc/modules-load.d/iscsi.conf
|
||||||
|
content: |
|
||||||
|
iscsi_tcp
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0644'
|
||||||
|
|
||||||
|
- name: Disable and stop multipathd service
|
||||||
|
systemd:
|
||||||
|
name: multipathd
|
||||||
|
enabled: false
|
||||||
|
state: stopped
|
||||||
|
masked: true
|
||||||
|
daemon_reload: yes
|
||||||
|
ignore_errors: true # in case it's not installed
|
@ -1,4 +1,5 @@
|
|||||||
# master-k8s.yaml
|
# master-k8s.yaml
|
||||||
- import_playbook: k8s-install-deps.yaml
|
- import_playbook: k8s-install-deps.yaml
|
||||||
- import_playbook: k8s-init.yaml
|
- import_playbook: k8s-init.yaml
|
||||||
- import_playbook: install_cni.yaml
|
- import_playbook: install_cni.yaml
|
||||||
|
- import_playbook: longhorn-reqs.yaml
|
25
init-deploy.sh
Executable file
25
init-deploy.sh
Executable file
@ -0,0 +1,25 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
KUBE_USER="kevin"
|
||||||
|
KUBE_CP="k8s-control-plane"
|
||||||
|
KUBE_W1="k8s-worker1"
|
||||||
|
KUBE_W2="k8s-worker2"
|
||||||
|
|
||||||
|
ANSIBLE_DIR="ansible"
|
||||||
|
|
||||||
|
## Prepare all of the nodes with k8s using the ansible playbooks I prepared
|
||||||
|
|
||||||
|
ansible-playbook -i ./"$ANSIBLE_DIR"/inventory.ini ./"$ANSIBLE_DIR"/master-k8s.yaml
|
||||||
|
|
||||||
|
## Fetch the configuration from the freshly installed cluster. BEWARE THAT ANY EXISTING CONFIG WILL BE OVERWRITTEN
|
||||||
|
|
||||||
|
scp "$KUBE_USER@$KUBE_CP":"$HOME"/.kube/config "$HOME"/.kube/config
|
||||||
|
|
||||||
|
## Now join the workers to the cluster
|
||||||
|
|
||||||
|
JOIN_TOKEN=$(ssh "$KUBE_USER@$KUBE_CP" "kubeadm token create --print-join-command")
|
||||||
|
|
||||||
|
for NODE in "$KUBE_W1" "$KUBE_W2"; do
|
||||||
|
echo "Joining $NODE"
|
||||||
|
ssh "$KUBE_USER@$NODE" "sudo $JOIN_TOKEN"
|
||||||
|
done
|
Reference in New Issue
Block a user