Compare commits
13 Commits
0997a2f864
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 7ac1ac9f8c | |||
| 81288cdf4e | |||
| 8f56a7e2a7 | |||
| 3f8c06cfe4 | |||
| 5c69bb29ff | |||
| 8de6a0166a | |||
| eb3a97e403 | |||
| fec3446ecc | |||
| bb7393b05e | |||
| 319c7b2a6f | |||
| 842c113af4 | |||
| c341b3283a | |||
| 25a0d4882f |
37
README.md
37
README.md
@@ -0,0 +1,37 @@
|
|||||||
|
# Terraform-Ansible-Helm Deployer
|
||||||
|
[](https://github.com/marketplace/actions/super-linter)
|
||||||
|
## Usage
|
||||||
|
From the `terraform/` folder:
|
||||||
|
```shell
|
||||||
|
terraform init
|
||||||
|
terraform plan
|
||||||
|
terraform apply
|
||||||
|
```
|
||||||
|
|
||||||
|
## Decisions and goals
|
||||||
|
The `terraform-provider-libvirt` has been chosen over Vagrant to deploy the VMs as a way to simplify the structure of the project. The choice over a cloud provider such as AWS or GCP has been done to not incur into billing cost during troubleshooting and deployments.
|
||||||
|
### Terraform script
|
||||||
|
The Terraform script roughly follows these steps:
|
||||||
|
1. Deploy 3 VMs (one master and two workers) with:
|
||||||
|
- 2 vCPUs;
|
||||||
|
- 2GB vRAM;
|
||||||
|
- 20GB of disk space;
|
||||||
|
- Ubuntu 24.04 LTS;
|
||||||
|
- An Ansible user.
|
||||||
|
2. Call an Ansible Playbook that:
|
||||||
|
1. Configures the master node and installs Kubernetes;
|
||||||
|
2. Configures the network for the Kubernetes cluster;
|
||||||
|
3. Configures the worker nodes and installs Kubernetes.
|
||||||
|
3. Create the `kiratech-test` namespace;
|
||||||
|
4. Run the CIS Kubernetes benchmark;
|
||||||
|
5. Copy the helm folder to the master node and install helm.
|
||||||
|
### Next steps
|
||||||
|
The script currently lacks:
|
||||||
|
- [ ] Capability of deploying an Helm application;
|
||||||
|
- [ ] Usage of Terraform outputs to populate Ansible files;
|
||||||
|
|
||||||
|
### CIS Kubernetes Benchmark
|
||||||
|
The CIS Benchamrk is one of (if not the) most popular benchmarks publicly available, and also has a simple way to implement it in a deployment pipeline using the [kube-bench](https://github.com/aquasecurity/kube-bench) implementation.
|
||||||
|
|
||||||
|
## Linting
|
||||||
|
The project uses GitHub Actions as a CI tool, running [super-linter](https://github.com/super-linter/super-linter) on the entire codebase.
|
||||||
|
|||||||
@@ -8,3 +8,4 @@
|
|||||||
[all:vars]
|
[all:vars]
|
||||||
ansible_user=ansible
|
ansible_user=ansible
|
||||||
ansible_ssh_private_key_file=../terraform/.local/.ssh/id_rsa
|
ansible_ssh_private_key_file=../terraform/.local/.ssh/id_rsa
|
||||||
|
ansible_python_interpreter=/usr/bin/python3
|
||||||
|
|||||||
@@ -123,9 +123,7 @@
|
|||||||
mode: "0755"
|
mode: "0755"
|
||||||
|
|
||||||
- name: Download Kubernetes GPG key securely
|
- name: Download Kubernetes GPG key securely
|
||||||
ansible.builtin.shell: |
|
ansible.builtin.shell: curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.31/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
|
||||||
set -o pipefail
|
|
||||||
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.31/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
|
|
||||||
|
|
||||||
- name: Add Kubernetes repository
|
- name: Add Kubernetes repository
|
||||||
ansible.builtin.apt_repository:
|
ansible.builtin.apt_repository:
|
||||||
@@ -139,7 +137,7 @@
|
|||||||
- kubeadm
|
- kubeadm
|
||||||
- kubectl
|
- kubectl
|
||||||
state: present
|
state: present
|
||||||
update_cache: true
|
update_cache: yes
|
||||||
|
|
||||||
- name: Hold kubelet, kubeadm, kubectl packages
|
- name: Hold kubelet, kubeadm, kubectl packages
|
||||||
ansible.builtin.command:
|
ansible.builtin.command:
|
||||||
@@ -213,6 +211,8 @@
|
|||||||
- name: Initialize Kubernetes control plane
|
- name: Initialize Kubernetes control plane
|
||||||
ansible.builtin.command:
|
ansible.builtin.command:
|
||||||
cmd: kubeadm init --pod-network-cidr=10.244.0.0/16
|
cmd: kubeadm init --pod-network-cidr=10.244.0.0/16
|
||||||
|
creates: /tmp/kubeadm_output
|
||||||
|
register: kubeadm_init_output
|
||||||
become: true
|
become: true
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
@@ -223,15 +223,17 @@
|
|||||||
owner: ansible
|
owner: ansible
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
|
|
||||||
- name: Generate join command
|
- name: Store Kubernetes initialization output to file
|
||||||
ansible.builtin.command: kubeadm token create --print-join-command
|
copy:
|
||||||
register: join_command
|
content: "{{ kubeadm_init_output.stdout }}"
|
||||||
|
dest: /tmp/kubeadm_output
|
||||||
- name: Copy join command to local file
|
become: true
|
||||||
ansible.builtin.copy:
|
|
||||||
content: '"{{ join_command.stdout_lines[0] }}"'
|
|
||||||
dest: '"/tmp/join-command"'
|
|
||||||
delegate_to: localhost
|
delegate_to: localhost
|
||||||
|
|
||||||
|
- name: Generate the Join Command
|
||||||
|
ansible.builtin.shell: cat /tmp/kubeadm_output | tail -n 2 | sed ':a;N;$!ba;s/\\\n\s*/ /g' > /tmp/join-command
|
||||||
|
delegate_to: localhost
|
||||||
|
|
||||||
- name: Set permissions for the Join Executable
|
- name: Set permissions for the Join Executable
|
||||||
ansible.builtin.file:
|
ansible.builtin.file:
|
||||||
path: /tmp/join-command
|
path: /tmp/join-command
|
||||||
|
|||||||
@@ -11,6 +11,10 @@
|
|||||||
state: present
|
state: present
|
||||||
update_cache: true
|
update_cache: true
|
||||||
|
|
||||||
|
- name: Set a hostname
|
||||||
|
ansible.builtin.hostname:
|
||||||
|
name: worker-{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}
|
||||||
|
|
||||||
- name: Install Docker
|
- name: Install Docker
|
||||||
ansible.builtin.apt:
|
ansible.builtin.apt:
|
||||||
name: docker.io
|
name: docker.io
|
||||||
|
|||||||
0
helm/.placeholder
Normal file
0
helm/.placeholder
Normal file
@@ -10,6 +10,10 @@ terraform {
|
|||||||
source = "hashicorp/template"
|
source = "hashicorp/template"
|
||||||
version = "2.2.0"
|
version = "2.2.0"
|
||||||
}
|
}
|
||||||
|
null = {
|
||||||
|
source = "hashicorp/null"
|
||||||
|
version = "3.2.3"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -47,6 +51,9 @@ resource libvirt_volume ubuntu2404_resized {
|
|||||||
count = local.masternodes + local.workernodes
|
count = local.masternodes + local.workernodes
|
||||||
}
|
}
|
||||||
|
|
||||||
|
data template_file private_key {
|
||||||
|
template = file("${path.module}/.local/.ssh/id_rsa")
|
||||||
|
}
|
||||||
|
|
||||||
data template_file public_key {
|
data template_file public_key {
|
||||||
template = file("${path.module}/.local/.ssh/id_rsa.pub")
|
template = file("${path.module}/.local/.ssh/id_rsa.pub")
|
||||||
@@ -158,3 +165,75 @@ resource libvirt_domain k8s_workers {
|
|||||||
autoport = true
|
autoport = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resource null_resource run_ansible {
|
||||||
|
depends_on = [
|
||||||
|
libvirt_domain.k8s_masters,
|
||||||
|
libvirt_domain.k8s_workers
|
||||||
|
]
|
||||||
|
|
||||||
|
provisioner "local-exec" {
|
||||||
|
command = "ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -vvv -i ../ansible/inventory.ini ../ansible/k8s.yml -K"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource null_resource create_namespace {
|
||||||
|
depends_on = [
|
||||||
|
null_resource.run_ansible
|
||||||
|
]
|
||||||
|
provisioner "remote-exec" {
|
||||||
|
inline = ["sudo mkdir ~/.kube", "sudo cp /etc/kubernetes/admin.conf ~/.kube/", "sudo mv ~/.kube/admin.conf ~/.kube/config", "sudo service kubelet restart", "sudo kubectl --kubeconfig ~/.kube/config create namespace kiratech-test"]
|
||||||
|
|
||||||
|
connection {
|
||||||
|
host = libvirt_domain.k8s_masters[0].network_interface[0].addresses[0]
|
||||||
|
type = "ssh"
|
||||||
|
user = "ansible"
|
||||||
|
private_key = data.template_file.private_key.rendered
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource null_resource run_benchmark {
|
||||||
|
depends_on = [
|
||||||
|
null_resource.create_namespace
|
||||||
|
]
|
||||||
|
provisioner "remote-exec" {
|
||||||
|
inline = ["curl https://raw.githubusercontent.com/aquasecurity/kube-bench/refs/heads/main/job-master.yaml > job-master.yaml", "kubectl --kubeconfig ~/.kube/config apply -f job-master.yaml", "rm job-master.yaml"]
|
||||||
|
|
||||||
|
|
||||||
|
connection {
|
||||||
|
host = libvirt_domain.k8s_masters[0].network_interface[0].addresses[0]
|
||||||
|
type = "ssh"
|
||||||
|
user = "ansible"
|
||||||
|
private_key = data.template_file.private_key.rendered
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource null_resource deploy_helm {
|
||||||
|
depends_on = [
|
||||||
|
null_resource.run_benchmark
|
||||||
|
]
|
||||||
|
|
||||||
|
provisioner "local-exec" {
|
||||||
|
command = "scp -i ${path.module}/.local/.ssh/id_rsa -r ../helm ansible@${libvirt_domain.k8s_masters[0].network_interface[0].addresses[0]}:/home/ansible/helm"
|
||||||
|
}
|
||||||
|
# https://helm.sh/docs/intro/install/#from-apt-debianubuntu
|
||||||
|
provisioner "remote-exec" {
|
||||||
|
inline = [
|
||||||
|
"curl https://baltocdn.com/helm/signing.asc | gpg --dearmor | sudo tee /usr/share/keyrings/helm.gpg > /dev/null",
|
||||||
|
"sudo apt-get install apt-transport-https --yes",
|
||||||
|
"echo \"deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/helm.gpg] https://baltocdn.com/helm/stable/debian/ all main\" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list",
|
||||||
|
"sudo apt-get update",
|
||||||
|
"sudo apt-get install helm"
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
connection {
|
||||||
|
host = libvirt_domain.k8s_masters[0].network_interface[0].addresses[0]
|
||||||
|
type = "ssh"
|
||||||
|
user = "ansible"
|
||||||
|
private_key = data.template_file.private_key.rendered
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user