Add compose file management infrastructure

- collect-compose.yml: Fetches all compose files from docker_hosts
- collect-env-templates.yml: Creates .env.example with secrets redacted
- deploy-compose.yml: Pushes compose files to hosts (with optional restart)
- diff-compose.yml: Shows differences before deploying

Collected 23 compose files from 7 hosts:
- replicant: 12 stacks (arr-stack, mealie, portainer, etc)
- docker666: 4 stacks (unifi, gluetun, uptime, utils)
- databases: 3 stacks (postgres, forgejo, utils)
- download-stack: 2 stacks (download-stack, utils)
- media-transcode: 1 stack (utils)
- network-services: 1 stack (utils)
- immich: 1 stack (utils)
This commit is contained in:
Maddox 2026-01-23 19:11:23 +00:00
parent 4cb3a41f1c
commit ecac3c8949
27 changed files with 1637 additions and 0 deletions

15
compose-files/.gitignore vendored Normal file
View file

@ -0,0 +1,15 @@
# Never commit secrets
.env
*.env
.env.*
# Never commit data directories
data/
logs/
cache/
config/
appdata/
# OS files
.DS_Store
Thumbs.db

View file

@ -0,0 +1,51 @@
services:
forgejo:
image: codeberg.org/forgejo/forgejo:9-rootless
container_name: forgejo
restart: unless-stopped
ports:
- "${HOST_IP}:3000:3000"
- "${HOST_IP}:2222:2222"
environment:
- USER_UID=1000
- USER_GID=1000
- FORGEJO__database__DB_TYPE=postgres
- FORGEJO__database__HOST=postgres:5432
- FORGEJO__database__NAME=${FORGEJO_DB_NAME}
- FORGEJO__database__USER=${FORGEJO_DB_USER}
- FORGEJO__database__PASSWD=${FORGEJO_DB_PASS}
- FORGEJO__server__ROOT_URL=https://git.3ddbrewery.com/
- FORGEJO__server__SSH_DOMAIN=git.3ddbrewery.com
- FORGEJO__server__SSH_PORT=2222
- FORGEJO__server__SSH_LISTEN_PORT=2222
volumes:
- /mnt/nas/docker/forgejo:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
networks:
- database
- proxy
deploy:
resources:
limits:
cpus: '1'
memory: 512M
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000/api/healthz"]
interval: 30s
timeout: 10s
retries: 5
labels:
- "autoheal=true"
- "com.centurylinklabs.watchtower.enable=true"
- "homepage.group=Development"
- "homepage.name=Forgejo"
- "homepage.href=https://git.3ddbrewery.com"
- "homepage.icon=forgejo.png"
- "homepage.description=Git Server"
networks:
database:
external: true
proxy:
external: true

View file

@ -0,0 +1,31 @@
services:
postgres:
image: tensorchord/pgvecto-rs:pg14-v0.2.0
container_name: postgres
restart: unless-stopped
ports:
- "${HOST_IP}:5432:5432"
environment:
- POSTGRES_USER=${POSTGRES_USER}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
volumes:
- ./data:/var/lib/postgresql/data
networks:
- database
deploy:
resources:
limits:
cpus: '2'
memory: 2G
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER}"]
interval: 30s
timeout: 10s
retries: 5
labels:
- "autoheal=true"
- "com.centurylinklabs.watchtower.enable=true"
networks:
database:
external: true

View file

@ -0,0 +1,60 @@
services:
autoheal:
image: willfarrell/autoheal:latest
container_name: autoheal
restart: unless-stopped
environment:
- AUTOHEAL_CONTAINER_LABEL=autoheal
- AUTOHEAL_INTERVAL=5
- AUTOHEAL_START_PERIOD=0
- AUTOHEAL_DEFAULT_STOP_TIMEOUT=10
- WEBHOOK_URL=https://ntfy.3ddbrewery.com/autoheal-proxmox?title=${HOST_NAME}
volumes:
- /var/run/docker.sock:/var/run/docker.sock
labels:
- "autoheal=true"
watchtower:
image: containrrr/watchtower:latest
container_name: watchtower
restart: unless-stopped
environment:
- WATCHTOWER_LABEL_ENABLE=true
- WATCHTOWER_CLEANUP=true
- WATCHTOWER_INCLUDE_RESTARTING=true
- WATCHTOWER_INCLUDE_STOPPED=false
- WATCHTOWER_POLL_INTERVAL=3600
- WATCHTOWER_TIMEOUT=30s
- WATCHTOWER_NO_RESTART=false
- WATCHTOWER_NOTIFICATIONS=shoutrrr
- WATCHTOWER_NOTIFICATION_URL=ntfy://ntfy.3ddbrewery.com/watchtower-proxmox?title=${HOST_NAME}
- DOCKER_API_VERSION=1.44
volumes:
- /var/run/docker.sock:/var/run/docker.sock
labels:
- "autoheal=true"
- "com.centurylinklabs.watchtower.enable=true"
docker-proxy-portainer:
image: ghcr.io/tecnativa/docker-socket-proxy:latest
container_name: docker-proxy-portainer
restart: unless-stopped
ports:
- "${HOST_IP}:2376:2375"
environment:
- CONTAINERS=1
- IMAGES=1
- NETWORKS=1
- VOLUMES=1
- INFO=1
- EVENTS=1
- PING=1
- VERSION=1
- POST=1
- EXEC=1
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
cap_drop:
- ALL
security_opt:
- no-new-privileges:true

View file

@ -0,0 +1,65 @@
services:
gluetun:
image: qmcgaw/gluetun:v3
environment:
# Core VPN Configuration - WireGuard (faster)
#- VPN_SERVICE_PROVIDER=protonvpn
#- VPN_TYPE=wireguard
#- WIREGUARD_PRIVATE_KEY=MDzSV32z3GxR5VPtmtVfDR8Vkw00irXJQqyye+8sg3o=
# Core VPN Configuration - OpenVPN (fallback - uncomment if WireGuard issues)
- VPN_SERVICE_PROVIDER=protonvpn
- VPN_TYPE=openvpn
- OPENVPN_USER=LKXaFJ0HOa6RbySE+pmp
- OPENVPN_PASSWORD=6fISYNXVzWrgzUskzsAbVF3MjYYqJ8JV
# Server Selection
- SERVER_COUNTRIES=United States
- SERVER_CITIES=Secaucus,Chicago,New York
# Basic Settings
- TZ=America/New_York
- PUID=1000
- PGID=1000
# Proxy Configuration
- HTTPPROXY=on
- HTTPPROXY_LISTENING_ADDRESS=:38888
- HTTPPROXY_STEALTH=on
# Security Features
- BLOCK_ADS=on
- BLOCK_MALICIOUS=on
# Control Server
- HTTP_CONTROL_SERVER_ADDRESS=:8000
ports:
- 38888:38888 # Gluetun HTTP proxy server
- 38443:443 # HTTPS/SSL traffic passthrough
- 8999:80 # HTTP traffic/alternative web interface port
- 8898:8000
- 21080:1080
labels:
- "autoheal=true"
- "com.centurylinklabs.watchtower.enable=true"
container_name: gluetun
devices:
- /dev/net/tun:/dev/net/tun
cap_add:
- NET_ADMIN
restart: unless-stopped
volumes:
- ./:/gluetun
socks5:
image: serjs/go-socks5-proxy:latest
container_name: gluetun-socks5
network_mode: "service:gluetun"
environment:
- REQUIRE_AUTH=false
depends_on:
- gluetun
restart: unless-stopped

View file

@ -0,0 +1,17 @@
services:
uptimekuma:
image: louislam/uptime-kuma
container_name: Uptime-Kuma
hostname: uptimekuma
mem_limit: 3g
cpu_shares: 1024
security_opt:
- no-new-privileges:false
ports:
- 3444:3001
volumes:
- ./:/app/data:rw
- /var/run/docker.sock:/var/run/docker.sock
environment:
TZ: America/Indiana/Indianapolis
restart: on-failure:5

View file

@ -0,0 +1,60 @@
services:
autoheal:
image: willfarrell/autoheal:latest
container_name: autoheal
restart: unless-stopped
environment:
- AUTOHEAL_CONTAINER_LABEL=autoheal
- AUTOHEAL_INTERVAL=5
- AUTOHEAL_START_PERIOD=0
- AUTOHEAL_DEFAULT_STOP_TIMEOUT=10
- WEBHOOK_URL=https://ntfy.3ddbrewery.com/autoheal-proxmox?title=${HOST_NAME}
volumes:
- /var/run/docker.sock:/var/run/docker.sock
labels:
- "autoheal=true"
watchtower:
image: containrrr/watchtower:latest
container_name: watchtower
restart: unless-stopped
environment:
- WATCHTOWER_LABEL_ENABLE=true
- WATCHTOWER_CLEANUP=true
- WATCHTOWER_INCLUDE_RESTARTING=true
- WATCHTOWER_INCLUDE_STOPPED=false
- WATCHTOWER_POLL_INTERVAL=3600
- WATCHTOWER_TIMEOUT=30s
- WATCHTOWER_NO_RESTART=false
- WATCHTOWER_NOTIFICATIONS=shoutrrr
- WATCHTOWER_NOTIFICATION_URL=ntfy://ntfy.3ddbrewery.com/watchtower-proxmox?title=${HOST_NAME}
- DOCKER_API_VERSION=1.44
volumes:
- /var/run/docker.sock:/var/run/docker.sock
labels:
- "autoheal=true"
- "com.centurylinklabs.watchtower.enable=true"
docker-proxy-portainer:
image: ghcr.io/tecnativa/docker-socket-proxy:latest
container_name: docker-proxy-portainer
restart: unless-stopped
ports:
- "${HOST_IP}:2376:2375"
environment:
- CONTAINERS=1
- IMAGES=1
- NETWORKS=1
- VOLUMES=1
- INFO=1
- EVENTS=1
- PING=1
- VERSION=1
- POST=1
- EXEC=1
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
cap_drop:
- ALL
security_opt:
- no-new-privileges:true

View file

@ -0,0 +1,119 @@
services:
gluetun:
image: qmcgaw/gluetun:v3
container_name: gluetun
cap_add:
- NET_ADMIN
devices:
- /dev/net/tun:/dev/net/tun
environment:
- VPN_SERVICE_PROVIDER=protonvpn
- VPN_TYPE=wireguard
- WIREGUARD_PRIVATE_KEY=${WIREGUARD_PRIVATE_KEY}
- SERVER_COUNTRIES=United States
- SERVER_CITIES=Secaucus,Chicago,New York
- TZ=America/New_York
- PUID=1000
- PGID=1000
- HTTPPROXY=on
- HTTPPROXY_LISTENING_ADDRESS=:38888
- HTTPPROXY_STEALTH=on
- BLOCK_ADS=on
- BLOCK_MALICIOUS=on
- HTTP_CONTROL_SERVER_ADDRESS=:8000
ports:
- 33000:3000 # ruTorrent web interface (via Gluetun)
- 38888:38888 # Gluetun HTTP proxy server
- 38443:443 # HTTPS/SSL traffic passthrough
- 35000:5000 # Additional application port
- 51413:51413 # ruTorrent BitTorrent incoming connections
- 6789:6789 # NZBGet web interface (via Gluetun)
- 8999:80 # HTTP traffic/alternative web interface port
- 38000:8000 # Gluetun control server API
- 38388:8388 # Shadowsocks proxy (TCP/UDP) - currently disabled
volumes:
- ./gluetun:/gluetun
networks:
- download
restart: unless-stopped
deploy:
resources:
limits:
cpus: '1.0'
memory: 512M
reservations:
memory: 128M
labels:
- "autoheal=true"
- "com.centurylinklabs.watchtower.enable=true"
- "homepage.group=Downloads"
- "homepage.name=Gluetun"
- "homepage.icon=gluetun.png"
- "homepage.widget.type=gluetun"
- "homepage.widget.url=http://192.168.1.122:38000"
nzbget:
image: lscr.io/linuxserver/nzbget:latest
container_name: nzbget
environment:
- PUID=1000
- PGID=1000
- TZ=America/New_York
volumes:
- ./nzbget:/config
- /mnt/nas/downloads/nzbget:/downloads
- /mnt/nas/media:/media
network_mode: service:gluetun
deploy:
resources:
limits:
cpus: '2.0'
memory: 2G
reservations:
memory: 256M
depends_on:
- gluetun
restart: unless-stopped
labels:
- "autoheal=true"
- "com.centurylinklabs.watchtower.enable=true"
- "homepage.group=Downloads"
- "homepage.name=NZBGet"
- "homepage.icon=nzbget.png"
- "homepage.href=https://nzb.3ddbrewery.com"
- "homepage.widget.type=nzbget"
- "homepage.widget.url=http://192.168.1.122:6789"
rutorrent:
image: lscr.io/linuxserver/rutorrent:latest
container_name: rutorrent
environment:
- PUID=1000
- PGID=1000
- TZ=America/New_York
volumes:
- ./rutorrent:/config
- /mnt/nas/downloads/rutorrent:/downloads
- /mnt/nas/media:/media
network_mode: service:gluetun
depends_on:
- gluetun
restart: unless-stopped
deploy:
resources:
limits:
cpus: '2.0'
memory: 2G
reservations:
memory: 256M
labels:
- "autoheal=true"
- "com.centurylinklabs.watchtower.enable=true"
- "homepage.group=Downloads"
- "homepage.name=ruTorrent"
- "homepage.icon=rutorrent.png"
- "homepage.href=https://rutorrent.3ddbrewery.com"
networks:
download:
external: true

View file

@ -0,0 +1,60 @@
services:
autoheal:
image: willfarrell/autoheal:latest
container_name: autoheal
restart: unless-stopped
environment:
- AUTOHEAL_CONTAINER_LABEL=autoheal
- AUTOHEAL_INTERVAL=5
- AUTOHEAL_START_PERIOD=0
- AUTOHEAL_DEFAULT_STOP_TIMEOUT=10
- WEBHOOK_URL=https://ntfy.3ddbrewery.com/autoheal-proxmox?title=${HOST_NAME}
volumes:
- /var/run/docker.sock:/var/run/docker.sock
labels:
- "autoheal=true"
watchtower:
image: containrrr/watchtower:latest
container_name: watchtower
restart: unless-stopped
environment:
- WATCHTOWER_LABEL_ENABLE=true
- WATCHTOWER_CLEANUP=true
- WATCHTOWER_INCLUDE_RESTARTING=true
- WATCHTOWER_INCLUDE_STOPPED=false
- WATCHTOWER_POLL_INTERVAL=3600
- WATCHTOWER_TIMEOUT=30s
- WATCHTOWER_NO_RESTART=false
- WATCHTOWER_NOTIFICATIONS=shoutrrr
- WATCHTOWER_NOTIFICATION_URL=ntfy://ntfy.3ddbrewery.com/watchtower-proxmox?title=${HOST_NAME}
- DOCKER_API_VERSION=1.44
volumes:
- /var/run/docker.sock:/var/run/docker.sock
labels:
- "autoheal=true"
- "com.centurylinklabs.watchtower.enable=true"
docker-proxy-portainer:
image: ghcr.io/tecnativa/docker-socket-proxy:latest
container_name: docker-proxy-portainer
restart: unless-stopped
ports:
- "${HOST_IP}:2376:2375"
environment:
- CONTAINERS=1
- IMAGES=1
- NETWORKS=1
- VOLUMES=1
- INFO=1
- EVENTS=1
- PING=1
- VERSION=1
- POST=1
- EXEC=1
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
cap_drop:
- ALL
security_opt:
- no-new-privileges:true

View file

@ -0,0 +1,60 @@
services:
autoheal:
image: willfarrell/autoheal:latest
container_name: autoheal
restart: unless-stopped
environment:
- AUTOHEAL_CONTAINER_LABEL=autoheal
- AUTOHEAL_INTERVAL=5
- AUTOHEAL_START_PERIOD=0
- AUTOHEAL_DEFAULT_STOP_TIMEOUT=10
- WEBHOOK_URL=https://ntfy.3ddbrewery.com/autoheal-proxmox?title=${HOST_NAME}
volumes:
- /var/run/docker.sock:/var/run/docker.sock
labels:
- "autoheal=true"
watchtower:
image: containrrr/watchtower:latest
container_name: watchtower
restart: unless-stopped
environment:
- WATCHTOWER_LABEL_ENABLE=true
- WATCHTOWER_CLEANUP=true
- WATCHTOWER_INCLUDE_RESTARTING=true
- WATCHTOWER_INCLUDE_STOPPED=false
- WATCHTOWER_POLL_INTERVAL=3600
- WATCHTOWER_TIMEOUT=30s
- WATCHTOWER_NO_RESTART=false
- WATCHTOWER_NOTIFICATIONS=shoutrrr
- WATCHTOWER_NOTIFICATION_URL=ntfy://ntfy.3ddbrewery.com/watchtower-proxmox?title=${HOST_NAME}
- DOCKER_API_VERSION=1.44
volumes:
- /var/run/docker.sock:/var/run/docker.sock
labels:
- "autoheal=true"
- "com.centurylinklabs.watchtower.enable=true"
docker-proxy-portainer:
image: ghcr.io/tecnativa/docker-socket-proxy:latest
container_name: docker-proxy-portainer
restart: unless-stopped
ports:
- "${HOST_IP}:2376:2375"
environment:
- CONTAINERS=1
- IMAGES=1
- NETWORKS=1
- VOLUMES=1
- INFO=1
- EVENTS=1
- PING=1
- VERSION=1
- POST=1
- EXEC=1
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
cap_drop:
- ALL
security_opt:
- no-new-privileges:true

View file

@ -0,0 +1,60 @@
services:
autoheal:
image: willfarrell/autoheal:latest
container_name: autoheal
restart: unless-stopped
environment:
- AUTOHEAL_CONTAINER_LABEL=autoheal
- AUTOHEAL_INTERVAL=5
- AUTOHEAL_START_PERIOD=0
- AUTOHEAL_DEFAULT_STOP_TIMEOUT=10
- WEBHOOK_URL=https://ntfy.3ddbrewery.com/autoheal-proxmox?title=${HOST_NAME}
volumes:
- /var/run/docker.sock:/var/run/docker.sock
labels:
- "autoheal=true"
watchtower:
image: containrrr/watchtower:latest
container_name: watchtower
restart: unless-stopped
environment:
- WATCHTOWER_LABEL_ENABLE=true
- WATCHTOWER_CLEANUP=true
- WATCHTOWER_INCLUDE_RESTARTING=true
- WATCHTOWER_INCLUDE_STOPPED=false
- WATCHTOWER_POLL_INTERVAL=3600
- WATCHTOWER_TIMEOUT=30s
- WATCHTOWER_NO_RESTART=false
- WATCHTOWER_NOTIFICATIONS=shoutrrr
- WATCHTOWER_NOTIFICATION_URL=ntfy://ntfy.3ddbrewery.com/watchtower-proxmox?title=${HOST_NAME}
- DOCKER_API_VERSION=1.44
volumes:
- /var/run/docker.sock:/var/run/docker.sock
labels:
- "autoheal=true"
- "com.centurylinklabs.watchtower.enable=true"
docker-proxy-portainer:
image: ghcr.io/tecnativa/docker-socket-proxy:latest
container_name: docker-proxy-portainer
restart: unless-stopped
ports:
- "${HOST_IP}:2376:2375"
environment:
- CONTAINERS=1
- IMAGES=1
- NETWORKS=1
- VOLUMES=1
- INFO=1
- EVENTS=1
- PING=1
- VERSION=1
- POST=1
- EXEC=1
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
cap_drop:
- ALL
security_opt:
- no-new-privileges:true

View file

@ -0,0 +1,60 @@
services:
autoheal:
image: willfarrell/autoheal:latest
container_name: autoheal
restart: unless-stopped
environment:
- AUTOHEAL_CONTAINER_LABEL=autoheal
- AUTOHEAL_INTERVAL=5
- AUTOHEAL_START_PERIOD=0
- AUTOHEAL_DEFAULT_STOP_TIMEOUT=10
- WEBHOOK_URL=https://ntfy.3ddbrewery.com/autoheal-proxmox?title=${HOST_NAME}
volumes:
- /var/run/docker.sock:/var/run/docker.sock
labels:
- "autoheal=true"
watchtower:
image: containrrr/watchtower:latest
container_name: watchtower
restart: unless-stopped
environment:
- WATCHTOWER_LABEL_ENABLE=true
- WATCHTOWER_CLEANUP=true
- WATCHTOWER_INCLUDE_RESTARTING=true
- WATCHTOWER_INCLUDE_STOPPED=false
- WATCHTOWER_POLL_INTERVAL=3600
- WATCHTOWER_TIMEOUT=30s
- WATCHTOWER_NO_RESTART=false
- WATCHTOWER_NOTIFICATIONS=shoutrrr
- WATCHTOWER_NOTIFICATION_URL=ntfy://ntfy.3ddbrewery.com/watchtower-proxmox?title=${HOST_NAME}
- DOCKER_API_VERSION=1.44
volumes:
- /var/run/docker.sock:/var/run/docker.sock
labels:
- "autoheal=true"
- "com.centurylinklabs.watchtower.enable=true"
docker-proxy-portainer:
image: ghcr.io/tecnativa/docker-socket-proxy:latest
container_name: docker-proxy-portainer
restart: unless-stopped
ports:
- "${HOST_IP}:2376:2375"
environment:
- CONTAINERS=1
- IMAGES=1
- NETWORKS=1
- VOLUMES=1
- INFO=1
- EVENTS=1
- PING=1
- VERSION=1
- POST=1
- EXEC=1
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
cap_drop:
- ALL
security_opt:
- no-new-privileges:true

View file

@ -0,0 +1,294 @@
# ~/docker/appdata/arr-stack/docker-compose.yml
# Unified *arr stack for replicant
# Migrated from alien (192.168.1.252)
services:
# ============================================
# INDEXER MANAGEMENT
# ============================================
prowlarr:
image: lscr.io/linuxserver/prowlarr:latest
container_name: prowlarr
restart: unless-stopped
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
volumes:
- ./prowlarr:/config
ports:
- "9696:9696"
networks:
- proxy
- media
deploy:
resources:
limits:
cpus: '1'
memory: 512M
reservations:
memory: 128M
labels:
- "homepage.group=Media"
- "homepage.name=Prowlarr"
- "homepage.icon=prowlarr.png"
- "homepage.href=http://${HOST_IP}:9696"
- "homepage.description=Indexer manager"
# ============================================
# TV / MOVIES / MUSIC / BOOKS
# ============================================
sonarr:
image: ghcr.io/linuxserver/sonarr:latest
container_name: sonarr
restart: unless-stopped
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
volumes:
- ./sonarr:/config
- /volume1/Media:/media
- /volume1/archive:/archive
- /volume1/Downloads/nzbget:/downloads
- /volume1/Downloads/rutorrent/incoming:/incoming
ports:
- "8989:8989"
networks:
- proxy
- media
deploy:
resources:
limits:
cpus: '2'
memory: 1G
reservations:
memory: 256M
labels:
- "homepage.group=Media"
- "homepage.name=Sonarr"
- "homepage.icon=sonarr.png"
- "homepage.href=https://sonarr.3ddbrewery.com"
- "homepage.description=TV Shows"
- "homepage.widget.type=sonarr"
- "homepage.widget.url=http://${HOST_IP}:8989"
- "homepage.widget.key=9d182041bb1245c782b14356e42d3219"
- "homepage.widget.enableQueue=true"
radarr:
image: ghcr.io/linuxserver/radarr:latest
container_name: radarr
restart: unless-stopped
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
volumes:
- ./radarr:/config
- /volume1/Media:/media
- /volume1/archive:/archive
- /volume1/Downloads/nzbget:/downloads
- /volume1/Downloads/rutorrent/incoming:/incoming
ports:
- "7878:7878"
networks:
- proxy
- media
deploy:
resources:
limits:
cpus: '2'
memory: 1G
reservations:
memory: 256M
labels:
- "homepage.group=Media"
- "homepage.name=Radarr"
- "homepage.icon=radarr.png"
- "homepage.href=https://radarr.3ddbrewery.com"
- "homepage.description=Movies"
- "homepage.widget.type=radarr"
- "homepage.widget.url=http://${HOST_IP}:7878"
- "homepage.widget.key=9fd393a7b39b44b4b60eece5317f9d5b"
- "homepage.widget.enableQueue=true"
lidarr:
image: lscr.io/linuxserver/lidarr:latest
container_name: lidarr
restart: unless-stopped
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
volumes:
- ./lidarr:/config
- /volume1/Media:/media
- /volume1/archive:/archive
- /volume1/Downloads/nzbget:/downloads
- /volume1/Downloads/rutorrent:/torrent
- /volume1/Downloads/slskd:/slskd_downloads
ports:
- "8686:8686"
networks:
- proxy
- media
deploy:
resources:
limits:
cpus: '2'
memory: 1G
reservations:
memory: 256M
labels:
- "homepage.group=Media"
- "homepage.name=Lidarr"
- "homepage.icon=lidarr.png"
- "homepage.href=http://${HOST_IP}:8686"
- "homepage.description=Music"
# readarr:
# image: lscr.io/linuxserver/readarr:latest
# container_name: readarr
# restart: unless-stopped
# environment:
# - PUID=${PUID}
# - PGID=${PGID}
# - TZ=${TZ}
# volumes:
# - ./readarr:/config
# - /volume1/Media:/media
# - /volume1/archive:/archive
# - /volume1/Downloads/nzbget:/downloads
# ports:
# - "8787:8787"
# networks:
# - proxy
# - media
# deploy:
# resources:
# limits:
# cpus: '1'
# memory: 1G
# reservations:
# memory: 256M
# labels:
# - "homepage.group=Media"
# - "homepage.name=Readarr"
# - "homepage.icon=readarr.png"
# - "homepage.href=https://readarr.3ddbrewery.com"
# - "homepage.description=Books"
# - "homepage.widget.type=readarr"
# - "homepage.widget.url=http://${HOST_IP}:8787"
# - "homepage.widget.key=76a1180d9a6940b58922efb32dc6dc6d"
# - "homepage.widget.enableQueue=true"
# ============================================
# SUBTITLES
# ============================================
bazarr:
image: lscr.io/linuxserver/bazarr:latest
container_name: bazarr
restart: unless-stopped
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
volumes:
- ./bazarr:/config
- /volume1/Media:/media
- /volume1/archive:/archive
ports:
- "6767:6767"
networks:
- proxy
- media
deploy:
resources:
limits:
cpus: '1'
memory: 512M
reservations:
memory: 128M
labels:
- "homepage.group=Media"
- "homepage.name=Bazarr"
- "homepage.icon=bazarr.png"
- "homepage.href=http://${HOST_IP}:6767"
- "homepage.description=Subtitles"
# ============================================
# PROFILE SYNC
# ============================================
profilarr:
image: santiagosayshey/profilarr:latest
container_name: profilarr
restart: unless-stopped
environment:
- TZ=${TZ}
volumes:
- ./profilarr:/config
ports:
- "6868:6868"
networks:
- proxy
deploy:
resources:
limits:
cpus: '0.5'
memory: 256M
reservations:
memory: 64M
labels:
- "homepage.group=Media"
- "homepage.name=Profilarr"
- "homepage.icon=profilarr.png"
- "homepage.href=http://${HOST_IP}:6868"
- "homepage.description=Profile sync"
# ============================================
# SOULSEEK CLIENT (for Lidarr)
# ============================================
slskd:
image: slskd/slskd:latest
container_name: slskd
restart: unless-stopped
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
- SLSKD_REMOTE_CONFIGURATION=true
volumes:
- ./slskd:/app
- /volume1/Media:/media
- /volume1/archive:/archive
- /volume1/Downloads/nzbget:/nzb-downloads
- /volume1/Downloads/rutorrent:/torrent
- /volume1/Downloads/slskd:/downloads
ports:
- "5030:5030"
- "5031:5031"
- "50300:50300"
networks:
- proxy
- download
deploy:
resources:
limits:
cpus: '1'
memory: 512M
reservations:
memory: 128M
labels:
- "homepage.group=Downloads"
- "homepage.name=slskd"
- "homepage.icon=slskd.png"
- "homepage.href=http://${HOST_IP}:5030"
- "homepage.description=Soulseek"
networks:
proxy:
external: true
media:
external: true
download:
external: true

View file

@ -0,0 +1,54 @@
services:
emby:
image: linuxserver/emby:beta
container_name: emby
hostname: emby
restart: unless-stopped
networks:
- proxy
- media
ports:
- "8096:8096"
- "8920:8920"
environment:
- PUID=1000
- PGID=1000
- TZ=America/New_York
volumes:
- ./:/config
- /volume1/Media:/media
- /volume1/archive:/archive
# Intel Quick Sync GPU passthrough
devices:
- /dev/dri:/dev/dri
group_add:
- "992" # render group for GPU access
deploy:
resources:
limits:
memory: 4G
cpus: '2.0'
labels:
# Homepage dashboard
- "homepage.group=Media"
- "homepage.name=Emby"
- "homepage.icon=emby.png"
- "homepage.href=http://192.168.1.245:8096"
- "homepage.description=Media server"
- "homepage.widget.type=emby"
- "homepage.widget.url=http://192.168.1.245:8096"
- "homepage.widget.key=6e56672959c2423098457f20918faa14"
- "homepage.widget.fields=[\"movies\", \"series\", \"episodes\", \"songs\"]"
# Traefik (uncomment when ready)
# - "traefik.enable=true"
# - "traefik.http.routers.emby.entrypoints=websecure"
# - "traefik.http.routers.emby.rule=Host(`m.3ddbrew.com`)"
# - "traefik.http.routers.emby.tls=true"
# - "traefik.http.routers.emby.tls.certresolver=myresolver"
# - "traefik.http.services.emby.loadbalancer.server.port=8096"
networks:
proxy:
external: true
media:
external: true

View file

@ -0,0 +1,53 @@
services:
lidarr:
# image: blampe/lidarr:latest # Plugin-enabled Lidarr image with slskd support
# image: ghcr.io/hotio/lidarr:pr-plugins
image: lscr.io/linuxserver/lidarr:latest
container_name: lidarr
environment:
- PUID=1000 # Replace with your user ID (run `id -u` to find yours)
- PGID=1000 # Replace with your group ID (run `id -g` to find yours)
- TZ=America/New_York # Replace with your timezone
volumes:
- ./:/config
- ./custom-services.d:/custom-services.d
- ./custom-cont-init.d:/custom-cont-init.d
- /volume1/Media:/media
- /volume1/Downloads/nzbget:/downloads
- /volume1/Downloads/rutorrent:/torrent
- /volume1/archive:/archive
- /volume1/Downloads/slskd:/slskd_downloads # Slskd downloads
ports:
- 8686:8686
networks:
- traefik_proxy
restart: unless-stopped
depends_on:
- slskd
slskd:
image: slskd/slskd:latest
container_name: slskd
environment:
- PUID=1000 # Same as Lidarr
- PGID=1000 # Same as Lidarr
- TZ=America/New_York # Same timezone
- SLSKD_REMOTE_CONFIGURATION=true
volumes:
- ./slskd/config:/app # Slskd config
- /volume1/Media:/media
- /volume1/Downloads/nzbget:/nzb-downloads
- /volume1/Downloads/rutorrent:/torrent
- /volume1/archive:/archive
- /volume1/Downloads/slskd:/downloads # Slskd downloads
ports:
- 5030:5030 # Web interface
- 5031:5031 # API
- 50300:50300 # Soulseek port
networks:
- traefik_proxy
restart: unless-stopped
# network_mode: "container:gluetun"
networks:
traefik_proxy:
external: true

View file

@ -0,0 +1,106 @@
# ~/docker/appdata/mealie/docker-compose.yml
# Mealie - Recipe manager with PostgreSQL backend
# Migrated from alien to replicant
services:
mealie:
image: ghcr.io/mealie-recipes/mealie:latest
container_name: mealie
restart: unless-stopped
depends_on:
mealie_postgres:
condition: service_healthy
ports:
- "9925:9000"
environment:
# User/System
- PUID=1000
- PGID=1000
- TZ=America/New_York
# Application
- BASE_URL=https://food.3ddbrewery.com
- ALLOW_SIGNUP=false
- AUTO_BACKUP_ENABLED=true
- API_PORT=9000
- TOKEN_TIME=720
# PostgreSQL
- DB_ENGINE=postgres
- POSTGRES_USER=mealie
- POSTGRES_PASSWORD=stale-swindle-marrow-equation
- POSTGRES_SERVER=mealie_postgres
- POSTGRES_PORT=5432
- POSTGRES_DB=mealie
# SMTP
- SMTP_HOST=smtp.gmail.com
- SMTP_PORT=587
- SMTP_AUTH_STRATEGY=TLS
- SMTP_FROM_NAME=Mealie
- SMTP_FROM_EMAIL=xoppaw@gmail.com
- SMTP_USER=xoppaw@gmail.com
- SMTP_PASSWORD=tgkyhtjozefgsxsj
# AI (Ollama)
- OPENAI_BASE_URL=http://192.168.1.245:11434/v1
- OPENAI_API_KEY=56
- OPENAI_MODEL=tinyllama
- OPENAI_SEND_DATABASE_DATA=true
- OIDC_AUTH_ENABLED=true
- OIDC_SIGNUP_ENABLED=true
- OIDC_CONFIGURATION_URL=https://id.3ddbrewery.com/application/o/mealie/.well-known/openid-configuration
- OIDC_CLIENT_ID=${OIDC_CLIENT_ID}
- OIDC_CLIENT_SECRET=${OIDC_CLIENT_SECRET}
- OIDC_PROVIDER_NAME=Authentik
- OIDC_AUTO_REDIRECT=false
- OIDC_REMEMBER_ME=true
- FORWARDED_ALLOW_IPS=*
volumes:
- ./:/app/data
networks:
- proxy
- database
deploy:
resources:
limits:
cpus: '2'
memory: 1G
reservations:
memory: 256M
labels:
# Homepage
- "homepage.group=Services"
- "homepage.name=Mealie"
- "homepage.icon=mealie.png"
- "homepage.href=https://food.3ddbrewery.com"
- "homepage.description=Recipe manager"
mealie_postgres:
image: postgres:15
container_name: mealie_postgres
restart: unless-stopped
environment:
- POSTGRES_USER=mealie
- POSTGRES_PASSWORD=stale-swindle-marrow-equation
- POSTGRES_DB=mealie
- TZ=America/New_York
volumes:
- ./postgres:/var/lib/postgresql/data
networks:
- database
deploy:
resources:
limits:
cpus: '1'
memory: 512M
reservations:
memory: 128M
healthcheck:
test: ["CMD-SHELL", "pg_isready -U mealie -d mealie"]
interval: 10s
timeout: 5s
retries: 5
command: postgres -c listen_addresses='*'
networks:
proxy:
external: true
database:
external: true

View file

@ -0,0 +1,45 @@
services:
navidrome:
image: deluan/navidrome:latest
container_name: navidrome
user: 1000:1000
restart: unless-stopped
networks:
- proxy
ports:
- "4533:4533"
environment:
- TZ=America/New_York
- ND_LOGLEVEL=info
- ND_LASTFM_APIKEY=e5344a7783d126cd0eae7e90db5bee9b
- ND_LASTFM_SECRET=d2cfbf94a4509b3eebf069a55544af89
volumes:
- ./data:/data
- /volume1/Media/Music:/music:ro
deploy:
resources:
limits:
memory: 512M
cpus: '0.5'
labels:
# Homepage dashboard
- "homepage.group=Media"
- "homepage.name=Navidrome"
- "homepage.icon=navidrome.png"
- "homepage.href=http://192.168.1.245:4533"
- "homepage.description=Music streaming"
# - "homepage.widget.type=navidrome"
# - "homepage.widget.url=http://192.168.1.245:4533"
# - "homepage.widget.user=YOUR_USERNAME"
# - "homepage.widget.token=YOUR_NAVIDROME_TOKEN"
# Traefik (uncomment when ready)
# - "traefik.enable=true"
# - "traefik.http.routers.navidrome.entrypoints=websecure"
# - "traefik.http.routers.navidrome.rule=Host(`navidrome.yourdomain.com`)"
# - "traefik.http.routers.navidrome.tls=true"
# - "traefik.http.routers.navidrome.tls.certresolver=myresolver"
# - "traefik.http.services.navidrome.loadbalancer.server.port=4533"
networks:
proxy:
external: true

View file

@ -0,0 +1,11 @@
services:
portainer:
image: portainer/portainer-ce:lts
container_name: portainer
restart: always
ports:
- "8000:8000"
- "9443:9443"
volumes:
- ./:/data
- /var/run/docker.sock:/var/run/docker.sock

View file

@ -0,0 +1,19 @@
services:
prowlarr:
image: lscr.io/linuxserver/prowlarr:latest
container_name: prowlarr
environment:
- PUID=1000
- PGID=1000
- TZ=America/New_York
volumes:
- /home/maddox/docker/appdata/prowlarr:/config
ports:
- "9696:9696"
networks:
- traefik_proxy
restart: unless-stopped
networks:
traefik_proxy:
external: true

View file

@ -0,0 +1,35 @@
services:
radarr:
image: ghcr.io/linuxserver/radarr:latest
container_name: radarr
environment:
- PUID=1000
- PGID=1000
- TZ=America/New_York
volumes:
- /home/maddox/docker/appdata/radarr:/config
- /home/maddox/docker/appdata/radarr/custom-services.d:/custom-services.d
- /home/maddox/docker/appdata/radarr/custom-cont-init.d:/custom-cont-init.d
- /volume1/Downloads/rutorrent/incoming:/incoming
- /volume1/archive/movies:/archive/movies
- /volume1/archive/tv:/archive/tv
- /volume1/Media:/media
- /volume1/Downloads/nzbget:/downloads
ports:
- "7878:7878"
networks:
- traefik_proxy
restart: unless-stopped
labels:
- "homepage.group=Media"
- "homepage.href=https://radarr.3ddbrewery.com"
- "homepage.icon=radarr.png"
- "homepage.name=Radarr"
- "homepage.widget.enableQueue=true"
- "homepage.widget.key=9fd393a7b39b44b4b60eece5317f9d5b"
- "homepage.widget.type=radarr"
- "homepage.widget.url=https://movies.3ddbrewery.com"
networks:
traefik_proxy:
external: true

View file

@ -0,0 +1,38 @@
services:
readarr:
image: lscr.io/linuxserver/readarr:develop
container_name: readarr
environment:
- PUID=1000
- PGID=1000
- TZ=America/New_York
volumes:
- /home/maddox/docker/appdata/readarr:/config
- /home/maddox/docker/appdata/readarr/custom-services.d:/custom-services.d
- /home/maddox/docker/appdata/readarr/custom-cont-init.d:/custom-cont-init.d
- /volume1/Media:/media
- /volume1/archive:/archive
- /volume1/Downloads/nzbget:/downloads
ports:
- "8787:8787"
networks:
- traefik_proxy
restart: unless-stopped
deploy:
resources:
limits:
memory: 1G
cpus: '1.0'
labels:
- "homepage.group=Media"
- "homepage.href=https://readarr.3ddbrewery.com"
- "homepage.icon=readarr.png"
- "homepage.name=Readarr"
- "homepage.widget.enableQueue=true"
- "homepage.widget.key=76a1180d9a6940b58922efb32dc6dc6d"
- "homepage.widget.type=readarr"
- "homepage.widget.url=https://readarr.3ddbrewery.com"
networks:
traefik_proxy:
external: true

View file

@ -0,0 +1,36 @@
services:
sonarr:
image: ghcr.io/linuxserver/sonarr:latest
container_name: sonarr
environment:
- PUID=1000
- PGID=1000
- TZ=America/New_York
volumes:
- /home/maddox/docker/appdata/sonarr:/config
- /home/maddox/docker/appdata/sonarr/custom-services.d:/custom-services.d
- /home/maddox/docker/appdata/sonarr/custom-cont-init.d:/custom-cont-init.d
- /volume1/archive/movies:/archive/movies
- /volume1/archive/tv:/archive/tv
- /volume1/Downloads/nzbget/completed/tv:/downloads/completed/tv
- /volume1/Downloads/nzbget:/downloads
- /volume1/Downloads/rutorrent/incoming:/incoming
- /volume1/Media:/media
ports:
- "8989:8989"
networks:
- traefik_proxy
restart: unless-stopped
labels:
- "homepage.group=Media"
- "homepage.href=https://sonarr.3ddbrewery.com"
- "homepage.icon=sonarr.png"
- "homepage.name=Sonarr"
- "homepage.widget.enableQueue=true"
- "homepage.widget.key=9d182041bb1245c782b14356e42d3219"
- "homepage.widget.type=sonarr"
- "homepage.widget.url=https://sonarr.3ddbrewery.com"
networks:
traefik_proxy:
external: true

View file

@ -0,0 +1,60 @@
services:
autoheal:
image: willfarrell/autoheal:latest
container_name: autoheal
restart: unless-stopped
environment:
- AUTOHEAL_CONTAINER_LABEL=autoheal
- AUTOHEAL_INTERVAL=5
- AUTOHEAL_START_PERIOD=0
- AUTOHEAL_DEFAULT_STOP_TIMEOUT=10
- WEBHOOK_URL=https://ntfy.3ddbrewery.com/autoheal-proxmox?title=${HOST_NAME}
volumes:
- /var/run/docker.sock:/var/run/docker.sock
labels:
- "autoheal=true"
watchtower:
image: containrrr/watchtower:latest
container_name: watchtower
restart: unless-stopped
environment:
- WATCHTOWER_LABEL_ENABLE=true
- WATCHTOWER_CLEANUP=true
- WATCHTOWER_INCLUDE_RESTARTING=true
- WATCHTOWER_INCLUDE_STOPPED=false
- WATCHTOWER_POLL_INTERVAL=3600
- WATCHTOWER_TIMEOUT=30s
- WATCHTOWER_NO_RESTART=false
- WATCHTOWER_NOTIFICATIONS=shoutrrr
- WATCHTOWER_NOTIFICATION_URL=ntfy://ntfy.3ddbrewery.com/watchtower-proxmox?title=${HOST_NAME}
- DOCKER_API_VERSION=1.44
volumes:
- /var/run/docker.sock:/var/run/docker.sock
labels:
- "autoheal=true"
- "com.centurylinklabs.watchtower.enable=true"
docker-proxy-portainer:
image: ghcr.io/tecnativa/docker-socket-proxy:latest
container_name: docker-proxy-portainer
restart: unless-stopped
ports:
- "${HOST_IP}:2376:2375"
environment:
- CONTAINERS=1
- IMAGES=1
- NETWORKS=1
- VOLUMES=1
- INFO=1
- EVENTS=1
- PING=1
- VERSION=1
- POST=1
- EXEC=1
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
cap_drop:
- ALL
security_opt:
- no-new-privileges:true

View file

@ -0,0 +1,64 @@
---
- name: Collect docker-compose files from all hosts
hosts: docker_hosts
become: yes
gather_facts: no
tasks:
- name: Set appdata path
set_fact:
appdata_path: "{{ docker_appdata | default('/home/docker/appdata') }}"
- name: Find all docker-compose files
find:
paths: "{{ appdata_path }}"
patterns: "docker-compose.yml,docker-compose.yaml"
recurse: yes
depth: 3
register: compose_files
- name: Show found compose files
debug:
msg: "Found {{ compose_files.files | length }} compose files on {{ inventory_hostname }}"
- name: Create local directory for host
delegate_to: localhost
become: no
file:
path: "{{ playbook_dir }}/../compose-files/{{ inventory_hostname }}"
state: directory
mode: '0755'
- name: Fetch compose files
fetch:
src: "{{ item.path }}"
dest: "{{ playbook_dir }}/../compose-files/{{ inventory_hostname }}/{{ item.path | dirname | basename }}/"
flat: yes
loop: "{{ compose_files.files }}"
loop_control:
label: "{{ item.path | dirname | basename }}/docker-compose.yml"
- name: Also fetch .env.example if exists
fetch:
src: "{{ item.path | dirname }}/.env.example"
dest: "{{ playbook_dir }}/../compose-files/{{ inventory_hostname }}/{{ item.path | dirname | basename }}/"
flat: yes
fail_on_missing: no
loop: "{{ compose_files.files }}"
loop_control:
label: "{{ item.path | dirname | basename }}/.env.example"
- name: Summary
hosts: localhost
gather_facts: no
tasks:
- name: Count collected files
find:
paths: "{{ playbook_dir }}/../compose-files"
patterns: "docker-compose.yml,docker-compose.yaml"
recurse: yes
register: total_files
- name: Show summary
debug:
msg: "Total compose files collected: {{ total_files.files | length }}"

View file

@ -0,0 +1,40 @@
---
- name: Collect .env files as templates (with secrets redacted)
hosts: docker_hosts
become: yes
gather_facts: no
tasks:
- name: Set appdata path
set_fact:
appdata_path: "{{ docker_appdata | default('/home/docker/appdata') }}"
- name: Find all .env files
find:
paths: "{{ appdata_path }}"
patterns: ".env"
recurse: yes
depth: 3
hidden: yes
register: env_files
- name: Read and redact .env files
shell: |
cat "{{ item.path }}" | sed -E 's/(PASSWORD|SECRET|KEY|TOKEN|API_KEY)=.*/\1=REDACTED/gi'
loop: "{{ env_files.files }}"
loop_control:
label: "{{ item.path | dirname | basename }}/.env"
register: redacted_envs
changed_when: false
- name: Save as .env.example
delegate_to: localhost
become: no
copy:
content: "{{ item.stdout }}"
dest: "{{ playbook_dir }}/../compose-files/{{ inventory_hostname }}/{{ item.item.path | dirname | basename }}/.env.example"
mode: '0644'
loop: "{{ redacted_envs.results }}"
loop_control:
label: "{{ item.item.path | dirname | basename }}/.env.example"
when: item.stdout | length > 0

View file

@ -0,0 +1,80 @@
---
- name: Deploy docker-compose files to hosts
hosts: docker_hosts
become: yes
gather_facts: no
vars:
restart_stacks: false
stack_filter: "" # Empty = all stacks, or specify stack name
tasks:
- name: Set appdata path
set_fact:
appdata_path: "{{ docker_appdata | default('/home/docker/appdata') }}"
- name: Find compose files for this host in repo
delegate_to: localhost
become: no
find:
paths: "{{ playbook_dir }}/../compose-files/{{ inventory_hostname }}"
patterns: "docker-compose.yml,docker-compose.yaml"
recurse: yes
register: repo_compose_files
- name: Filter stacks if specified
set_fact:
deploy_files: "{{ repo_compose_files.files | selectattr('path', 'search', stack_filter) | list }}"
when: stack_filter != ""
- name: Use all files if no filter
set_fact:
deploy_files: "{{ repo_compose_files.files }}"
when: stack_filter == ""
- name: Show files to deploy
debug:
msg: "Deploying {{ deploy_files | length }} compose files to {{ inventory_hostname }}"
- name: Ensure stack directories exist
file:
path: "{{ appdata_path }}/{{ item.path | dirname | basename }}"
state: directory
mode: '0755'
loop: "{{ deploy_files }}"
loop_control:
label: "{{ item.path | dirname | basename }}"
- name: Deploy compose files
copy:
src: "{{ item.path }}"
dest: "{{ appdata_path }}/{{ item.path | dirname | basename }}/docker-compose.yml"
mode: '0644'
backup: yes
loop: "{{ deploy_files }}"
loop_control:
label: "{{ item.path | dirname | basename }}/docker-compose.yml"
register: deployed_files
- name: Restart changed stacks
shell: |
cd {{ appdata_path }}/{{ item.item.path | dirname | basename }}
docker compose pull
docker compose up -d
loop: "{{ deployed_files.results }}"
loop_control:
label: "{{ item.item.path | dirname | basename }}"
when:
- restart_stacks | bool
- item.changed
register: restart_results
ignore_errors: yes
- name: Show restart results
debug:
msg: "Restarted: {{ item.item.item.path | dirname | basename }}"
loop: "{{ restart_results.results | default([]) }}"
loop_control:
label: "{{ item.item.item.path | dirname | basename | default('N/A') }}"
when:
- restart_stacks | bool
- item.changed | default(false)

View file

@ -0,0 +1,44 @@
---
- name: Show differences between repo and deployed compose files
hosts: docker_hosts
become: yes
gather_facts: no
tasks:
- name: Set appdata path
set_fact:
appdata_path: "{{ docker_appdata | default('/home/docker/appdata') }}"
- name: Find compose files for this host in repo
delegate_to: localhost
become: no
find:
paths: "{{ playbook_dir }}/../compose-files/{{ inventory_hostname }}"
patterns: "docker-compose.yml,docker-compose.yaml"
recurse: yes
register: repo_compose_files
- name: Compare each file
shell: |
if [ -f "{{ appdata_path }}/{{ item.path | dirname | basename }}/docker-compose.yml" ]; then
diff -u "{{ appdata_path }}/{{ item.path | dirname | basename }}/docker-compose.yml" - < /dev/stdin || true
else
echo "NEW FILE - does not exist on host yet"
fi
args:
stdin: "{{ lookup('file', item.path) }}"
loop: "{{ repo_compose_files.files }}"
loop_control:
label: "{{ item.path | dirname | basename }}"
register: diff_results
changed_when: false
- name: Show differences
debug:
msg: |
=== {{ item.item.path | dirname | basename }} ===
{{ item.stdout if item.stdout else 'No differences' }}
loop: "{{ diff_results.results }}"
loop_control:
label: "{{ item.item.path | dirname | basename }}"
when: item.stdout | length > 0