Compare commits
69 Commits
8df9ec11bd
...
main
Author | SHA1 | Date | |
---|---|---|---|
629be846af | |||
b15eaa7c7f | |||
312394af74 | |||
2c6c74fe73 | |||
0f995dadbd | |||
862278c419 | |||
4dbb2a1573 | |||
f3bd1f0ee8 | |||
18bde2e1b9 | |||
722793830b | |||
b56c0edd1f | |||
6c0d57b60e | |||
d0ad44a8e8 | |||
754368ffce | |||
3796dcce50 | |||
eb0db5f633 | |||
383f2c76fb | |||
a866be86f1 | |||
1ecfc17231 | |||
a76d9813bd | |||
faf93e7eb5 | |||
f1494b1817 | |||
44ad0306a9 | |||
467993dee3
|
|||
e6b3321c2a | |||
140067aa15 | |||
8d4f4c9ab6 | |||
4d8a40fe8e | |||
e48e6f0c3d | |||
1f7b711048
|
|||
e014270903
|
|||
5c9f96be8e
|
|||
9eaca5ae11
|
|||
1cb55b1fb3
|
|||
ecc2e332da
|
|||
ede5600da5
|
|||
b4ee531a81
|
|||
1663a469c2
|
|||
5f301ce32a
|
|||
4356588131
|
|||
1f286384c2
|
|||
d74cdfa143
|
|||
baae7a8d7e
|
|||
4da3293569
|
|||
4ee8043bb8
|
|||
c50547c2fb
|
|||
44323ef46b
|
|||
7d868cbd60
|
|||
11efe45f77
|
|||
f2c81dc9ac
|
|||
ecc308735c
|
|||
e0c93087ea
|
|||
e5726cf742
|
|||
170c5e0730
|
|||
81abb30846
|
|||
97ab34737d
|
|||
23358515b8
|
|||
1870a78fde
|
|||
fad6e517e5
|
|||
9eba0f9042
|
|||
8441f85f88
|
|||
42a6110e3c
|
|||
e9671cbe81
|
|||
662ce1b1a5
|
|||
8cfa0fb4d9
|
|||
2b85b7db66
|
|||
4544292114
|
|||
6eaeb601bd
|
|||
5e20237572
|
49
.drone.yml
49
.drone.yml
@@ -1,23 +1,36 @@
|
||||
type: docker
|
||||
kind: pipeline
|
||||
name: Serverctl
|
||||
|
||||
steps:
|
||||
- name: terraform plan
|
||||
image: alpine
|
||||
environment:
|
||||
HCLOUD_TOKEN:
|
||||
from_secret: serverctl_hcloud_token
|
||||
ACCESS_KEY:
|
||||
from_secret: serverctl_access_key
|
||||
SECRET_KEY:
|
||||
from_secret: serverctl_secret_key
|
||||
- name: test
|
||||
image: harbor.front.kjuulh.io/docker-proxy/library/bash:latest
|
||||
commands:
|
||||
- apk --update add curl
|
||||
- curl --silent --output terraform.zip "https://releases.hashicorp.com/terraform/1.1.6/terraform_1.1.6_linux_amd64.zip"
|
||||
- unzip terraform.zip ; rm -f terraform.zip; chmod +x terraform
|
||||
- mkdir -p ${HOME}/bin ; export PATH=${PATH}:${HOME}/bin; mv terraform ${HOME}/bin/
|
||||
- terraform -v
|
||||
- cd infrastructure/create-resources
|
||||
- terraform init -backend-config="access_key=$ACCESS_KEY" -backend-config="secret_key=$SECRET_KEY"
|
||||
- terraform validate
|
||||
- terraform plan -vars="hcloud_token=$HCLOUD_TOKEN"
|
||||
- echo 'Run tests'
|
||||
#
|
||||
# - name: terraform plan
|
||||
# image: alpine
|
||||
# environment:
|
||||
# HCLOUD_TOKEN:
|
||||
# from_secret: serverctl_hcloud_token
|
||||
# ACCESS_KEY:
|
||||
# from_secret: serverctl_access_key
|
||||
# SECRET_KEY:
|
||||
# from_secret: serverctl_secret_key
|
||||
# SSH_ZIP_KEY:
|
||||
# from_secret: serverctl_ssh_zip_key
|
||||
# HCLOUD_SSH_KEY_ID:
|
||||
# from_secret: serverctl_hcloud_ssh_key_id
|
||||
# commands:
|
||||
# - apk --update add curl zip ansible python3
|
||||
# - cd infrastructure && ./unzip-ssh-keys.sh "$SSH_ZIP_KEY" && cd ..
|
||||
# - curl --silent --output terraform.zip "https://releases.hashicorp.com/terraform/1.1.6/terraform_1.1.6_linux_amd64.zip"
|
||||
# - unzip terraform.zip ; rm -f terraform.zip; chmod +x terraform
|
||||
# - mkdir -p ${HOME}/bin ; export PATH=${PATH}:${HOME}/bin; mv terraform ${HOME}/bin/
|
||||
# - terraform -v
|
||||
# - cd infrastructure/create-resources
|
||||
# - terraform init -backend-config="access_key=$ACCESS_KEY" -backend-config="secret_key=$SECRET_KEY"
|
||||
# - terraform validate
|
||||
# - terraform apply -auto-approve -var "hcloud_token=$HCLOUD_TOKEN" -var "pvt_key=../ssh_keys/id_ed25519" -var "pub_key=../ssh_keys/id_ed25519.pub" -var "hcloud_serverctl_ssh_key_id=$HCLOUD_SSH_KEY_ID"
|
||||
# - cd ansible
|
||||
# - ANSIBLE_HOST_KEY_CHECKING=False /usr/bin/ansible-playbook -u root --key-file '../../ssh_keys/id_ed25519' -e 'pub_key=../../ssh_keys/id_ed25519.pub' site.yml
|
||||
|
@@ -55,7 +55,7 @@ services:
|
||||
|
||||
# Logging
|
||||
loki:
|
||||
image: grafana/loki:2.4.2
|
||||
image: grafana/loki:2.7.0
|
||||
ports:
|
||||
- 3100
|
||||
networks:
|
||||
@@ -66,7 +66,7 @@ services:
|
||||
logging: *loki-logging
|
||||
|
||||
promtail:
|
||||
image: grafana/promtail:2.4.2
|
||||
image: grafana/promtail:2.7.0
|
||||
volumes:
|
||||
- ./services/logs/promtail/config.yaml:/mnt/config/promtail-config.yaml
|
||||
- /var/lib/docker/containers:/host/containers
|
||||
|
1
infrastructure/.gitignore
vendored
Normal file
1
infrastructure/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
ssh_keys/
|
2
infrastructure/create-resources/.gitignore
vendored
2
infrastructure/create-resources/.gitignore
vendored
@@ -3,3 +3,5 @@
|
||||
.terraform.lock.hcl
|
||||
terraform.tfstate
|
||||
terraform.tfstate.backup
|
||||
secrets.txt
|
||||
.env
|
||||
|
9
infrastructure/create-resources/ansible/.yamllint
Normal file
9
infrastructure/create-resources/ansible/.yamllint
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
extends: default
|
||||
|
||||
rules:
|
||||
line-length:
|
||||
max: 120
|
||||
level: warning
|
||||
truthy:
|
||||
allowed-values: ['true', 'false', 'yes', 'no']
|
12
infrastructure/create-resources/ansible/ansible.cfg
Normal file
12
infrastructure/create-resources/ansible/ansible.cfg
Normal file
@@ -0,0 +1,12 @@
|
||||
[defaults]
|
||||
nocows = True
|
||||
roles_path = ./roles
|
||||
inventory = ./inventory/hosts.cfg
|
||||
|
||||
remote_tmp = $HOME/.ansible/tmp
|
||||
local_tmp = $HOME/.ansible/tmp
|
||||
pipelining = True
|
||||
become = True
|
||||
host_key_checking = False
|
||||
deprecation_warnings = True
|
||||
callback_whitelist = profile_tasks
|
@@ -0,0 +1,3 @@
|
||||
---
|
||||
collections:
|
||||
- name: community.general
|
@@ -0,0 +1,15 @@
|
||||
---
|
||||
k3s_version: v1.22.3+k3s1
|
||||
ansible_user: root
|
||||
systemd_dir: /etc/systemd/system
|
||||
systemd_network_dir: /etc/systemd/network
|
||||
master_ip: "{{ hostvars[groups['serverctl_master_hosts'][0]]['wireguard_ip'] | default(groups['serverctl_master_hosts'][0]) }}"
|
||||
extra_server_args: "--flannel-iface=serverctl-wg0"
|
||||
extra_agent_args: "--flannel-iface=serverctl-wg0"
|
||||
|
||||
ansible_become_method: su
|
||||
|
||||
ufw_enabled: true
|
||||
|
||||
wireguard_mask_bits: 24
|
||||
wireguard_port: 51871
|
32
infrastructure/create-resources/ansible/inventory/hosts.cfg
Executable file
32
infrastructure/create-resources/ansible/inventory/hosts.cfg
Executable file
@@ -0,0 +1,32 @@
|
||||
[serverctl_master_hosts]
|
||||
95.217.155.228 ansible_host=95.217.155.228 wireguard_ip=10.1.1.1
|
||||
|
||||
[serverctl_node_hosts]
|
||||
65.21.50.146 ansible_host=65.21.50.146 wireguard_ip=10.1.1.10
|
||||
95.216.162.16 ansible_host=95.216.162.16 wireguard_ip=10.1.1.11
|
||||
|
||||
[serverctl_home_servers]
|
||||
192.168.1.150 ansible_host=192.168.1.150 wireguard_ip=10.1.1.8
|
||||
#192.168.1.233 ansible_host=192.168.1.233 wireguard_ip=10.1.1.9
|
||||
|
||||
[serverctl_cluster:children]
|
||||
serverctl_master_hosts
|
||||
serverctl_node_hosts
|
||||
|
||||
[serverctl_super_cluster:children]
|
||||
serverctl_cluster
|
||||
serverctl_home_servers
|
||||
|
||||
[serverctl_home_servers:vars]
|
||||
client_server=True
|
||||
|
||||
[serverctl_super_cluster:vars]
|
||||
pipelining=true
|
||||
ansible_ssh_user=root
|
||||
ansible_ssh_port=22
|
||||
|
||||
[serverctl_cluster:vars]
|
||||
client_server=False
|
||||
pipelining=true
|
||||
ansible_ssh_user=root
|
||||
ansible_ssh_port=22
|
8
infrastructure/create-resources/ansible/kubeconfig.yml
Normal file
8
infrastructure/create-resources/ansible/kubeconfig.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
- hosts: serverctl_master_hosts[0]
|
||||
become: yes
|
||||
tasks:
|
||||
- name: Fetch kubeconfig
|
||||
ansible.builtin.fetch:
|
||||
src: ~/.kube/config
|
||||
dest: temp/.kube/config
|
||||
|
7
infrastructure/create-resources/ansible/ping.yml
Normal file
7
infrastructure/create-resources/ansible/ping.yml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
- hosts: serverctl_super_cluster
|
||||
gather_facts: yes
|
||||
tasks:
|
||||
- name: ping
|
||||
command: "ping -c3 {{ hostvars[item].wireguard_ip}}"
|
||||
with_items: "{{groups['all']}}"
|
@@ -0,0 +1,10 @@
|
||||
---
|
||||
- name: Download k3s binary x64
|
||||
get_url:
|
||||
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s
|
||||
checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-amd64.txt
|
||||
dest: /usr/local/bin/k3s
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
when: ansible_facts.architecture == "x86_64"
|
@@ -0,0 +1,67 @@
|
||||
---
|
||||
- name: update packages
|
||||
apt:
|
||||
update_cache: yes
|
||||
cache_valid_time: 3600
|
||||
become: yes
|
||||
|
||||
- name: install ufw
|
||||
apt:
|
||||
name: ufw
|
||||
state: present
|
||||
become: yes
|
||||
when: ufw_enabled
|
||||
|
||||
- name: Allow SSH in UFW
|
||||
ufw:
|
||||
rule: allow
|
||||
port: "{{ ansible_ssh_port }}"
|
||||
proto: tcp
|
||||
become: yes
|
||||
when: ufw_enabled
|
||||
|
||||
- name: Allow wireguard port in UFW
|
||||
ufw:
|
||||
rule: allow
|
||||
port: "{{ wireguard_port }}"
|
||||
proto: udp
|
||||
become: yes
|
||||
when: ufw_enabled
|
||||
|
||||
- name: Set ufw logging
|
||||
ufw:
|
||||
logging: "on"
|
||||
become: yes
|
||||
when: ufw_enabled
|
||||
|
||||
- name: inter-node Wireguard UFW connectivity
|
||||
ufw:
|
||||
rule: allow
|
||||
src: "{{ hostvars[item].wireguard_ip }}"
|
||||
with_items: "{{ groups['all'] }}"
|
||||
become: yes
|
||||
when: ufw_enabled and item != inventory_hostname
|
||||
|
||||
- name: Reject everything and enable UFW
|
||||
ufw:
|
||||
state: enabled
|
||||
policy: reject
|
||||
log: yes
|
||||
become: yes
|
||||
when: ufw_enabled
|
||||
|
||||
- name: Allow 6443 in UFW /tcp
|
||||
ufw:
|
||||
rule: allow
|
||||
port: "6443"
|
||||
proto: tcp
|
||||
become: yes
|
||||
when: ufw_enabled
|
||||
|
||||
- name: Allow 6443 in UFW udp
|
||||
ufw:
|
||||
rule: allow
|
||||
port: "6443"
|
||||
proto: udp
|
||||
become: yes
|
||||
when: ufw_enabled
|
@@ -0,0 +1,2 @@
|
||||
---
|
||||
k3s_server_location: /var/lib/rancher/k3s
|
@@ -0,0 +1,79 @@
|
||||
---
|
||||
|
||||
- name: Copy K3s service file
|
||||
register: k3s_service
|
||||
template:
|
||||
src: "k3s.service.j2"
|
||||
dest: "{{ systemd_dir }}/k3s.service"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
|
||||
- name: Enable and check K3s service
|
||||
systemd:
|
||||
name: k3s
|
||||
daemon_reload: yes
|
||||
state: restarted
|
||||
enabled: yes
|
||||
|
||||
- name: Wait for node-token
|
||||
wait_for:
|
||||
path: "{{ k3s_server_location }}/server/node-token"
|
||||
|
||||
- name: Register node-token file access mode
|
||||
stat:
|
||||
path: "{{ k3s_server_location }}/server/node-token"
|
||||
register: p
|
||||
|
||||
- name: Change file access node-token
|
||||
file:
|
||||
path: "{{ k3s_server_location }}/server/node-token"
|
||||
mode: "g+rx,o+rx"
|
||||
|
||||
- name: Read node-token from master
|
||||
slurp:
|
||||
path: "{{ k3s_server_location }}/server/node-token"
|
||||
register: node_token
|
||||
|
||||
- name: Store Master node-token
|
||||
set_fact:
|
||||
token: "{{ node_token.content | b64decode | regex_replace('\n', '') }}"
|
||||
|
||||
- name: Restore node-token file access
|
||||
file:
|
||||
path: "{{ k3s_server_location }}/server/node-token"
|
||||
mode: "{{ p.stat.mode }}"
|
||||
|
||||
- name: Create directory .kube
|
||||
file:
|
||||
path: ~{{ ansible_user }}/.kube
|
||||
state: directory
|
||||
owner: "{{ ansible_user }}"
|
||||
mode: "u=rwx,g=rx,o="
|
||||
|
||||
- name: Copy config file to user home directory
|
||||
copy:
|
||||
src: /etc/rancher/k3s/k3s.yaml
|
||||
dest: ~{{ ansible_user }}/.kube/config
|
||||
remote_src: yes
|
||||
owner: "{{ ansible_user }}"
|
||||
mode: "u=rw,g=,o="
|
||||
|
||||
- name: Replace https://localhost:6443 by https://master-ip:6443
|
||||
command: >-
|
||||
k3s kubectl config set-cluster default
|
||||
--server=https://{{ master_ip }}:6443
|
||||
--kubeconfig ~{{ ansible_user }}/.kube/config
|
||||
changed_when: true
|
||||
|
||||
- name: Create kubectl symlink
|
||||
file:
|
||||
src: /usr/local/bin/k3s
|
||||
dest: /usr/local/bin/kubectl
|
||||
state: link
|
||||
|
||||
- name: Create crictl symlink
|
||||
file:
|
||||
src: /usr/local/bin/k3s
|
||||
dest: /usr/local/bin/crictl
|
||||
state: link
|
@@ -0,0 +1,24 @@
|
||||
[Unit]
|
||||
Description=Lightweight Kubernetes
|
||||
Documentation=https://k3s.io
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
ExecStartPre=-/sbin/modprobe br_netfilter
|
||||
ExecStartPre=-/sbin/modprobe overlay
|
||||
ExecStart=/usr/local/bin/k3s server --data-dir {{ k3s_server_location }} {{ extra_server_args | default("") }} --advertise-address {{master_ip}}
|
||||
KillMode=process
|
||||
Delegate=yes
|
||||
# Having non-zero Limit*s causes performance problems due to accounting overhead
|
||||
# in the kernel. We recommend using cgroups to do container-local accounting.
|
||||
LimitNOFILE=1048576
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
TasksMax=infinity
|
||||
TimeoutStartSec=0
|
||||
Restart=always
|
||||
RestartSec=5s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
@@ -0,0 +1,15 @@
|
||||
---
|
||||
- name: Copy K3s service file
|
||||
template:
|
||||
src: "k3s.service.j2"
|
||||
dest: "{{ systemd_dir }}/k3s-node.service"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
|
||||
- name: Enable and check K3s service
|
||||
systemd:
|
||||
name: k3s-node
|
||||
daemon_reload: yes
|
||||
state: restarted
|
||||
enabled: yes
|
@@ -0,0 +1,24 @@
|
||||
[Unit]
|
||||
Description=Lightweight Kubernetes
|
||||
Documentation=https://k3s.io
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
ExecStartPre=-/sbin/modprobe br_netfilter
|
||||
ExecStartPre=-/sbin/modprobe overlay
|
||||
ExecStart=/usr/local/bin/k3s agent --server https://{{ master_ip }}:6443 --token {{ hostvars[groups['serverctl_master_hosts'][0]]['token'] }} {{ extra_agent_args | default("") }} --node-ip {{inventory_hostname}}
|
||||
KillMode=process
|
||||
Delegate=yes
|
||||
# Having non-zero Limit*s causes performance problems due to accounting overhead
|
||||
# in the kernel. We recommend using cgroups to do container-local accounting.
|
||||
LimitNOFILE=1048576
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
TasksMax=infinity
|
||||
TimeoutStartSec=0
|
||||
Restart=always
|
||||
RestartSec=5s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
@@ -0,0 +1,18 @@
|
||||
---
|
||||
- name: Enable IPv4 forwarding
|
||||
sysctl:
|
||||
name: net.ipv4.ip_forward
|
||||
value: "1"
|
||||
state: present
|
||||
reload: yes
|
||||
|
||||
- name: Enable IPv6 forwarding
|
||||
sysctl:
|
||||
name: net.ipv6.conf.all.forwarding
|
||||
value: "1"
|
||||
state: present
|
||||
reload: yes
|
||||
when: ansible_all_ipv6_addresses
|
||||
|
||||
- name: Wait for apt to unlock
|
||||
shell: while sudo fuser /var/lib/dpkg/lock >/dev/null >2&1; do sleep 5; done;
|
@@ -0,0 +1,7 @@
|
||||
---
|
||||
- name: systemd network restart
|
||||
service:
|
||||
name: systemd-networkd
|
||||
state: restarted
|
||||
enabled: yes
|
||||
become: yes
|
@@ -0,0 +1,89 @@
|
||||
---
|
||||
- name: install wireguard
|
||||
apt:
|
||||
name: wireguard
|
||||
state: present
|
||||
become: yes
|
||||
when: ansible_distribution == 'Debian' or ansible_distribution == "Ubuntu"
|
||||
|
||||
- name: install wireguard
|
||||
pacman:
|
||||
name: wireguard-tools
|
||||
state: present
|
||||
become: yes
|
||||
when: ansible_distribution == "Archlinux"
|
||||
|
||||
- name: generate wireguard keypair
|
||||
shell: wg genkey | tee /etc/wireguard/serverctl-privatekey | wg pubkey | tee /etc/wireguard/serverctl-publickey
|
||||
args:
|
||||
creates: /etc/wireguard/serverctl-privatekey
|
||||
become: yes
|
||||
|
||||
- name: register private key
|
||||
shell: cat /etc/wireguard/serverctl-privatekey
|
||||
register: wireguard_private_key
|
||||
changed_when: false
|
||||
become: yes
|
||||
|
||||
- name: register public key
|
||||
shell: cat /etc/wireguard/serverctl-publickey
|
||||
register: wireguard_public_key
|
||||
changed_when: false
|
||||
become: yes
|
||||
|
||||
- name: generate preshared keypair
|
||||
shell: "wg genpsk > /etc/wireguard/serverctl-psk-{{item}}"
|
||||
args:
|
||||
creates: "/etc/wireguard/serverctl-psk-{{item}}"
|
||||
when: inventory_hostname < item
|
||||
with_items: "{{groups['serverctl_super_cluster']}}"
|
||||
become: yes
|
||||
|
||||
- name: register preshared key
|
||||
shell: "cat /etc/wireguard/serverctl-psk-{{item}}"
|
||||
register: wireguard_preshared_key
|
||||
changed_when: false
|
||||
when: inventory_hostname < item
|
||||
with_items: "{{groups['serverctl_super_cluster']}}"
|
||||
become: yes
|
||||
|
||||
- name: message preshared keys
|
||||
set_fact: "wireguard_preshared_keys={{wireguard_preshared_keys|default({}) | combine({item.item: item.stdout})}}"
|
||||
when: item.skipped is not defined
|
||||
with_items: "{{wireguard_preshared_key.results}}"
|
||||
become: yes
|
||||
|
||||
#- name: print hostvars
|
||||
# ansible.builtin.debug:
|
||||
# msg: "{{hostvars[item]}}"
|
||||
# with_items: "{{groups['serverctl_super_cluster']}}"
|
||||
|
||||
- name: Setup wg0 device
|
||||
template:
|
||||
src: 'systemd.netdev'
|
||||
dest: '{{systemd_network_dir}}/99-serverctl-wg0.netdev'
|
||||
owner: root
|
||||
group: systemd-network
|
||||
mode: 0640
|
||||
become: yes
|
||||
notify: systemd network restart
|
||||
|
||||
- name: Setup wg0 network
|
||||
template:
|
||||
src: 'systemd.network'
|
||||
dest: "{{systemd_network_dir}}/99-serverctl-wg0.network"
|
||||
owner: root
|
||||
group: systemd-network
|
||||
mode: 0640
|
||||
become: yes
|
||||
notify: systemd network restart
|
||||
|
||||
#- name: Start and enalbe wireguard on book
|
||||
# systemd:
|
||||
# name: wg-quick@wgserverctl0
|
||||
# enabled: yes
|
||||
# state: started
|
||||
|
||||
#- debug: msg="{{item.1}} - {{ (wireguard_base_ipv4|ipaddr(item.0 + 1)) }}"
|
||||
# with_indexed_items: "{{groups.serverctl_mesh_nodes}}"
|
||||
|
@@ -0,0 +1,22 @@
|
||||
[NetDev]
|
||||
Name=serverctl-wg0
|
||||
Kind=wireguard
|
||||
Description=WireGuard tunnel serverctl-wg0
|
||||
|
||||
[WireGuard]
|
||||
ListenPort={{ wireguard_port }}
|
||||
PrivateKey={{ wireguard_private_key.stdout }}
|
||||
|
||||
{% for peer in groups['serverctl_super_cluster'] %}
|
||||
{% if peer != inventory_hostname %}
|
||||
|
||||
[WireGuardPeer]
|
||||
PublicKey={{ hostvars[peer].wireguard_public_key.stdout }}
|
||||
PresharedKey={{ wireguard_preshared_keys[peer] if inventory_hostname < peer else hostvars[peer].wireguard_preshared_keys[inventory_hostname] }}
|
||||
AllowedIPs={{ hostvars[peer].wireguard_ip }}/32
|
||||
{% if not hostvars[peer].client_server %}
|
||||
Endpoint={{ hostvars[peer].ansible_host }}:{{ wireguard_port }}
|
||||
PersistentKeepalive=25
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
@@ -0,0 +1,5 @@
|
||||
[Match]
|
||||
Name=serverctl-wg0
|
||||
|
||||
[Network]
|
||||
Address={{ wireguard_ip }}/{{ wireguard_mask_bits }}
|
16
infrastructure/create-resources/ansible/server-install.yml
Normal file
16
infrastructure/create-resources/ansible/server-install.yml
Normal file
@@ -0,0 +1,16 @@
|
||||
- become: yes
|
||||
hosts: all
|
||||
name: server-install
|
||||
tasks:
|
||||
- name: Add the user 'kjuulh' and add it to 'sudo'
|
||||
user:
|
||||
name: kjuulh
|
||||
group: sudo
|
||||
- name:
|
||||
authorized_key:
|
||||
user: kjuulh
|
||||
state: present
|
||||
key: "{{ lookup('file', pub_key) }}"
|
||||
- name: Wait for apt to unlock
|
||||
become: yes
|
||||
shell: while sudo fuser /var/lib/dpkg/lock >/dev/null >2&1; do sleep 5; done;
|
25
infrastructure/create-resources/ansible/site.yml
Normal file
25
infrastructure/create-resources/ansible/site.yml
Normal file
@@ -0,0 +1,25 @@
|
||||
---
|
||||
- hosts: serverctl_cluster
|
||||
gather_facts: yes
|
||||
become: yes
|
||||
roles:
|
||||
- role: prereq
|
||||
- role: download
|
||||
- role: firewall
|
||||
|
||||
- hosts: serverctl_super_cluster
|
||||
gather_facts: yes
|
||||
become: yes
|
||||
roles:
|
||||
- role: wireguard/mesh
|
||||
|
||||
- hosts: serverctl_master_hosts
|
||||
become: yes
|
||||
roles:
|
||||
- role: "./k3s/master"
|
||||
|
||||
- hosts: serverctl_node_hosts
|
||||
become: yes
|
||||
roles:
|
||||
- role: "./k3s/node"
|
||||
|
81
infrastructure/create-resources/hcloud.tf
Normal file
81
infrastructure/create-resources/hcloud.tf
Normal file
@@ -0,0 +1,81 @@
|
||||
variable "serverctl_master_count" {
|
||||
default = 0
|
||||
}
|
||||
|
||||
variable "serverctl_node_count" {
|
||||
default = 0
|
||||
}
|
||||
|
||||
resource "hcloud_placement_group" "serverctl_master" {
|
||||
name = "serverctl_master_group"
|
||||
type = "spread"
|
||||
}
|
||||
|
||||
resource "hcloud_server" "serverctl_master" {
|
||||
count = var.serverctl_master_count
|
||||
name = "serverctl-master-${count.index}"
|
||||
image = "debian-11"
|
||||
server_type = "cx11"
|
||||
ssh_keys = [
|
||||
var.hcloud_serverctl_ssh_key_id
|
||||
]
|
||||
placement_group_id = hcloud_placement_group.serverctl_master.id
|
||||
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = ["sudo apt update", "sudo apt install python3 -y", "echo Done!"]
|
||||
|
||||
connection {
|
||||
host = self.ipv4_address
|
||||
type = "ssh"
|
||||
user = "root"
|
||||
private_key = file(var.pvt_key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "hcloud_placement_group" "serverctl_node" {
|
||||
name = "serverctl_node_group"
|
||||
type = "spread"
|
||||
}
|
||||
|
||||
resource "hcloud_server" "serverctl_node" {
|
||||
count = var.serverctl_node_count
|
||||
name = "serverctl-node-${count.index}"
|
||||
image = "debian-11"
|
||||
server_type = "cx11"
|
||||
ssh_keys = [
|
||||
var.hcloud_serverctl_ssh_key_id
|
||||
]
|
||||
placement_group_id = hcloud_placement_group.serverctl_node.id
|
||||
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = ["sudo apt update", "sudo apt install python3 -y", "echo Done!"]
|
||||
|
||||
connection {
|
||||
host = self.ipv4_address
|
||||
type = "ssh"
|
||||
user = "root"
|
||||
private_key = file(var.pvt_key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "local_file" "hosts_cfg" {
|
||||
content = templatefile("${path.module}/templates/hosts.tftpl",
|
||||
{
|
||||
serverctl_masters = hcloud_server.serverctl_master.*.ipv4_address
|
||||
serverctl_nodes = hcloud_server.serverctl_node.*.ipv4_address
|
||||
}
|
||||
)
|
||||
filename = "ansible/inventory/hosts.cfg"
|
||||
}
|
@@ -1,43 +0,0 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
hcloud = {
|
||||
source = "hetznercloud/hcloud"
|
||||
version = "1.32.2"
|
||||
}
|
||||
}
|
||||
|
||||
backend "s3" {
|
||||
bucket = "serverctl-terraform"
|
||||
key = "terraform.tfstate"
|
||||
|
||||
endpoint = "https://api.minio.front.kjuulh.io"
|
||||
|
||||
region = "main"
|
||||
|
||||
skip_credentials_validation = true
|
||||
skip_metadata_api_check = true
|
||||
skip_region_validation = true
|
||||
force_path_style = true
|
||||
}
|
||||
}
|
||||
|
||||
variable "hcloud_token" {
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
provider "hcloud" {
|
||||
token = var.hcloud_token
|
||||
}
|
||||
|
||||
resource "hcloud_placement_group" "serverctl_master" {
|
||||
name = "serverctl_master_group"
|
||||
type = "spread"
|
||||
}
|
||||
|
||||
resource "hcloud_server" "serverctl_master" {
|
||||
count = 2
|
||||
name = "serverctl-master-${count.index}"
|
||||
image = "debian-11"
|
||||
server_type = "cx11"
|
||||
placement_group_id = hcloud_placement_group.serverctl_master.id
|
||||
}
|
35
infrastructure/create-resources/provider.tf
Normal file
35
infrastructure/create-resources/provider.tf
Normal file
@@ -0,0 +1,35 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
hcloud = {
|
||||
source = "hetznercloud/hcloud"
|
||||
version = "1.50.0"
|
||||
}
|
||||
}
|
||||
|
||||
backend "s3" {
|
||||
bucket = "serverctl-terraform"
|
||||
key = "terraform.tfstate"
|
||||
|
||||
endpoint = "https://api.minio.front.kjuulh.io"
|
||||
|
||||
region = "main"
|
||||
|
||||
skip_credentials_validation = true
|
||||
skip_metadata_api_check = true
|
||||
skip_region_validation = true
|
||||
force_path_style = true
|
||||
}
|
||||
}
|
||||
|
||||
variable "hcloud_token" {
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
provider "hcloud" {
|
||||
token = var.hcloud_token
|
||||
}
|
||||
|
||||
|
||||
variable "hcloud_serverctl_ssh_key_id" {}
|
||||
variable "pvt_key" {}
|
||||
variable "pub_key" {}
|
6
infrastructure/create-resources/setup-terraform.sh
Executable file
6
infrastructure/create-resources/setup-terraform.sh
Executable file
@@ -0,0 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
export $(grep -v "^#" .env | xargs)
|
||||
|
||||
terraform init -backend-config="access_key=$ACCESS_KEY" -backend-config="secret_key=$SECRET_KEY"
|
||||
|
35
infrastructure/create-resources/templates/hosts.tftpl
Normal file
35
infrastructure/create-resources/templates/hosts.tftpl
Normal file
@@ -0,0 +1,35 @@
|
||||
[serverctl_master_hosts]
|
||||
%{ for ip in serverctl_masters ~}
|
||||
${ip} ansible_host=${ip} wireguard_ip=${cidrhost("10.1.1.0/24", index(serverctl_masters, ip) + 1)}
|
||||
%{ endfor ~}
|
||||
|
||||
[serverctl_node_hosts]
|
||||
%{ for ip in serverctl_nodes ~}
|
||||
${ip} ansible_host=${ip} wireguard_ip=${cidrhost("10.1.1.0/24", index(serverctl_nodes, ip) + 10)}
|
||||
%{ endfor ~}
|
||||
|
||||
[serverctl_home_servers]
|
||||
192.168.1.150 ansible_host=192.168.1.150 wireguard_ip=10.1.1.8
|
||||
#192.168.1.233 ansible_host=192.168.1.233 wireguard_ip=10.1.1.9
|
||||
|
||||
[serverctl_cluster:children]
|
||||
serverctl_master_hosts
|
||||
serverctl_node_hosts
|
||||
|
||||
[serverctl_super_cluster:children]
|
||||
serverctl_cluster
|
||||
serverctl_home_servers
|
||||
|
||||
[serverctl_home_servers:vars]
|
||||
client_server=True
|
||||
|
||||
[serverctl_super_cluster:vars]
|
||||
pipelining=true
|
||||
ansible_ssh_user=root
|
||||
ansible_ssh_port=22
|
||||
|
||||
[serverctl_cluster:vars]
|
||||
client_server=False
|
||||
pipelining=true
|
||||
ansible_ssh_user=root
|
||||
ansible_ssh_port=22
|
BIN
infrastructure/ssh_keys.zip
Normal file
BIN
infrastructure/ssh_keys.zip
Normal file
Binary file not shown.
7
infrastructure/unzip-ssh-keys.sh
Executable file
7
infrastructure/unzip-ssh-keys.sh
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/bin/sh
|
||||
|
||||
ZIP_KEY=$1
|
||||
|
||||
unzip -P "$ZIP_KEY" ssh_keys.zip
|
||||
|
||||
echo "unzip done!"
|
12
infrastructure/zip-ssh-keys.sh
Executable file
12
infrastructure/zip-ssh-keys.sh
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
ZIP_KEY=$(openssl rand -hex 30)
|
||||
|
||||
mkdir -p ssh_keys/
|
||||
|
||||
cp -f ~/.ssh/id_ed25519* ssh_keys/
|
||||
|
||||
zip -r --password $ZIP_KEY ssh_keys.zip ssh_keys/
|
||||
|
||||
echo "zip done!"
|
||||
echo "Zip key: $ZIP_KEY"
|
3
renovate.json
Normal file
3
renovate.json
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json"
|
||||
}
|
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.17-bullseye
|
||||
FROM golang:1.24-bullseye
|
||||
|
||||
RUN go install github.com/jackc/tern@latest
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.17-bullseye
|
||||
FROM golang:1.24-bullseye
|
||||
|
||||
RUN go install github.com/cosmtrek/air@latest
|
||||
# Development don't need this
|
||||
|
@@ -1,6 +1,6 @@
|
||||
module serverctl
|
||||
|
||||
go 1.17
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/Microsoft/go-winio v0.4.17 // indirect
|
||||
|
Reference in New Issue
Block a user