feat: Got the containers running right, finally.

Well, we finally got those containers working like they oughta.

- Wireguard and Nginx are running now, each in their own place, just like we planned.
- Made sure they got their own spots for their files, and they're checkin' on themselves to stay healthy.
- It was a bit of a struggle, but we got it done.
This commit is contained in:
Tobias J. Endres 2025-09-04 02:12:10 +02:00
parent 7ec6b429c2
commit 2f5f306d88
16 changed files with 296 additions and 34 deletions

View File

@ -7,3 +7,4 @@
- common
- podman # Ensure podman is configured before network
- network
- nginx_proxy_manager

View File

@ -25,3 +25,21 @@
state: started
enabled: true
become: true
- name: Allow unprivileged users to bind to ports below 1024
ansible.builtin.sysctl:
name: net.ipv4.ip_unprivileged_port_start
value: '80'
state: present
sysctl_file: /etc/sysctl.d/99-unprivileged-ports.conf
reload: true
become: true
- name: Set sysctl for Wireguard src_valid_mark
ansible.builtin.sysctl:
name: net.ipv4.conf.all.src_valid_mark
value: '1'
state: present
sysctl_file: /etc/sysctl.d/99-wireguard-sysctl.conf
reload: true
become: true

View File

@ -1,13 +1,4 @@
---
nginx_proxy_manager_image: "jc21/nginx-proxy-manager:latest"
nginx_proxy_manager_container_name: "nginx-proxy-manager"
nginx_proxy_manager_data_path: "/opt/nginx-proxy-manager/data"
nginx_proxy_manager_letsencrypt_path: "/opt/nginx-proxy-manager/letsencrypt"
nginx_proxy_manager_compose_path: "/opt/nginx-proxy-manager/docker-compose.yml"
nginx_proxy_manager_admin_email: "tobend85@gmail.com"
nginx_proxy_manager_admin_password: "risICE3"
nginx_proxy_manager_port: "9900"
nginx_proxy_manager_ssl_port: "443"
# Podman network configuration
podman_network_name: "sublime-net"
# Wireguard-Easy container configuration
@ -15,8 +6,8 @@ wireguard_easy_image: "ghcr.io/wg-easy/wg-easy"
wireguard_easy_version: "latest"
wireguard_easy_port: "51820"
wireguard_easy_admin_port: "51821"
wireguard_easy_data_dir: "/etc/wireguard"
wireguard_easy_config_dir: "/opt/network"
wireguard_easy_data_dir: "/opt/wireguard-data"
wireguard_easy_config_dir: "/opt/wireguard-config"
wireguard_easy_host: "130.162.231.152"
wireguard_easy_password: "admin"
wireguard_easy_password_hash: ""

View File

@ -1,11 +1,55 @@
- name: Ensure user's Podman Compose directory exists
ansible.builtin.file:
path: "~/podman-compose/network"
state: directory
mode: '0755'
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
become: false
- name: Ensure Wireguard Podman Compose directory exists
ansible.builtin.file:
path: "~/podman-compose/wireguard"
state: directory
mode: '0755'
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
become: false
- name: Ensure Wireguard data directory exists
ansible.builtin.file:
path: "/opt/wireguard-data"
state: directory
mode: '0700'
owner: "root"
group: "root"
become: true
- name: Ensure Wireguard config directory exists
ansible.builtin.file:
path: "/opt/wireguard-config"
state: directory
mode: '0700'
owner: "root"
group: "root"
become: true
- name: Generate Podman Compose file for Wireguard and Nginx
template:
src: podman-compose.j2
dest: /opt/network/podman-compose.yml
owner: root
group: root
dest: "~/podman-compose/wireguard/podman-compose.yml"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0644'
become: true
become: false
- name: Start Podman Compose services for Wireguard and Nginx
ansible.builtin.shell: podman-compose -f ~/podman-compose/wireguard/podman-compose.yml up -d
args:
chdir: "~/podman-compose/wireguard"
become: false
- name: Allow Nginx HTTP port
ansible.posix.firewalld:
@ -30,3 +74,28 @@
state: enabled
immediate: true
become: true
- name: Allow Wireguard Admin UI port
ansible.posix.firewalld:
port: 51821/tcp
permanent: true
state: enabled
immediate: true
become: true
- name: Test Wireguard UDP port accessibility
ansible.builtin.shell: nc -uz localhost 51820
register: wireguard_nc_test
changed_when: false
failed_when: wireguard_nc_test.rc != 0
become: false # Run as ubuntu user
tags:
- debug
- name: Display Wireguard nc test result
debug:
var: wireguard_nc_test.stdout
tags:
- debug

View File

@ -10,15 +10,13 @@ services:
ports:
- "{{ wireguard_easy_port }}:51820/udp"
- "{{ wireguard_easy_admin_port }}:51821/tcp"
- "80:80"
- "{{ nginx_proxy_manager_port }}:81"
- "{{ nginx_proxy_manager_ssl_port }}:443"
volumes:
- "{{ wireguard_easy_data_dir }}:/etc/wireguard"
- "{{ wireguard_easy_config_dir }}:/opt/network"
cap_add:
- NET_ADMIN
- SYS_MODULE
- NET_RAW
sysctls:
- net.ipv4.ip_forward=1
- net.ipv6.conf.all.disable_ipv6=0
@ -26,22 +24,13 @@ services:
- {{ podman_network_name }}
restart: unless-stopped
nginx-proxy-manager:
image: "{{ nginx_proxy_manager_image }}"
container_name: "{{ nginx_proxy_manager_container_name }}"
cap_add:
- NET_ADMIN
- SYS_MODULE
restart: always
network_mode: service:wireguard-easy
depends_on:
- wireguard-easy
environment:
INITIAL_ADMIN_EMAIL: {{ nginx_proxy_manager_admin_email }}
INITIAL_ADMIN_PASSWORD: {{ nginx_proxy_manager_admin_password }}
volumes:
- "{{ nginx_proxy_manager_data_path }}:/data"
- "{{ nginx_proxy_manager_letsencrypt_path }}:/etc/letsencrypt"
healthcheck:
test: ["CMD-SHELL", "nc -uz localhost 51820 || exit 1"]
interval: 10s
timeout: 5s
retries: 3
start_period: 60s
user: root
networks:
{{ podman_network_name }}:

View File

@ -0,0 +1,38 @@
Role Name
=========
A brief description of the role goes here.
Requirements
------------
Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
Role Variables
--------------
A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
Dependencies
------------
A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
Example Playbook
----------------
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
- hosts: servers
roles:
- { role: username.rolename, x: 42 }
License
-------
BSD
Author Information
------------------
An optional section for the role authors to include contact information, or a website (HTML is not allowed).

View File

@ -0,0 +1,10 @@
---
nginx_proxy_manager_image: "jc21/nginx-proxy-manager:latest"
nginx_proxy_manager_container_name: "nginx-proxy-manager"
nginx_proxy_manager_data_path: "/opt/nginx-proxy-manager-data"
nginx_proxy_manager_letsencrypt_path: "/opt/nginx-proxy-manager-letsencrypt"
nginx_proxy_manager_compose_path: "/opt/nginx-proxy-manager/docker-compose.yml"
nginx_proxy_manager_admin_email: "tobend85@gmail.com"
nginx_proxy_manager_admin_password: "risICE3"
nginx_proxy_manager_port: "9900"
nginx_proxy_manager_ssl_port: "443"

View File

@ -0,0 +1,3 @@
#SPDX-License-Identifier: MIT-0
---
# handlers file for nginx_proxy_manager

View File

@ -0,0 +1,35 @@
#SPDX-License-Identifier: MIT-0
galaxy_info:
author: your name
description: your role description
company: your company (optional)
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: http://example.com/issue/tracker
# Choose a valid license ID from https://spdx.org - some suggested licenses:
# - BSD-3-Clause (default)
# - MIT
# - GPL-2.0-or-later
# - GPL-3.0-only
# - Apache-2.0
# - CC-BY-4.0
license: license (GPL-2.0-or-later, MIT, etc)
min_ansible_version: 2.1
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies: []
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
# if you add dependencies to this list.

View File

@ -0,0 +1,59 @@
- name: Ensure Nginx Proxy Manager data directory exists
ansible.builtin.file:
path: "~/nginx-proxy-manager-data"
state: directory
mode: '0700'
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
become: false
- name: Ensure Nginx Proxy Manager LetsEncrypt directory exists
ansible.builtin.file:
path: "~/nginx-proxy-manager-letsencrypt"
state: directory
mode: '0700'
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
become: false
- name: Set permissions for Nginx Proxy Manager data directory
ansible.builtin.file:
path: "~/nginx-proxy-manager-data"
mode: '0777'
become: false
- name: Set permissions for Nginx Proxy Manager LetsEncrypt directory
ansible.builtin.file:
path: "~/nginx-proxy-manager-letsencrypt"
mode: '0777'
become: false
- name: Generate Podman Compose file for Nginx
template:
src: podman-compose.j2
dest: "~/podman-compose/nginx/podman-compose.yml"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0644'
become: false
- name: Start Podman Compose services for Nginx
ansible.builtin.shell: podman-compose -f ~/podman-compose/nginx/podman-compose.yml up -d
args:
chdir: "~/podman-compose/nginx"
become: false
- name: Test Nginx HTTP accessibility
ansible.builtin.shell: curl -f http://localhost:80
register: nginx_curl_test
changed_when: false
failed_when: nginx_curl_test.rc != 0
become: false # Run as ubuntu user
tags:
- debug
- name: Display Nginx curl test result
debug:
var: nginx_curl_test.stdout
tags:
- debug

View File

@ -0,0 +1,24 @@
services:
nginx-proxy-manager:
image: "{{ nginx_proxy_manager_image }}"
container_name: "{{ nginx_proxy_manager_container_name }}"
cap_add:
- NET_ADMIN
- SYS_MODULE
restart: always
network_mode: service:wireguard-easy
depends_on:
- wireguard-easy
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:80 || exit 1"]
interval: 10s
timeout: 5s
retries: 3
start_period: 60s
user: root
environment:
INITIAL_ADMIN_EMAIL: {{ nginx_proxy_manager_admin_email }}
INITIAL_ADMIN_PASSWORD: {{ nginx_proxy_manager_admin_password }}
volumes:
- "{{ nginx_proxy_manager_data_path }}:/data:Z"
- "{{ nginx_proxy_manager_letsencrypt_path }}:/etc/letsencrypt:Z"

View File

@ -0,0 +1,3 @@
#SPDX-License-Identifier: MIT-0
localhost

View File

@ -0,0 +1,6 @@
#SPDX-License-Identifier: MIT-0
---
- hosts: localhost
remote_user: root
roles:
- nginx_proxy_manager

View File

@ -0,0 +1,3 @@
#SPDX-License-Identifier: MIT-0
---
# vars file for nginx_proxy_manager

View File

@ -15,6 +15,12 @@
- "/run/podman/podman.sock:/run/podman/podman.sock"
- "portainer_data:/data"
restart_policy: unless-stopped
healthcheck:
test: "curl -f http://localhost:9000 || exit 1"
interval: 5s
timeout: 3s
retries: 3
start_period: 30s
become: false
- name: Ensure Portainer container is running

View File

@ -0,0 +1,7 @@
- name: Debug Network Role
hosts: Scully
become: true
vars:
ansible_python_interpreter: /usr/bin/python3
roles:
- network