Clean up containers that are no longer users

This commit is contained in:
Mike Wilson
2026-02-13 09:58:13 -05:00
parent 26ecfcf0cb
commit 568e2240bf
47 changed files with 0 additions and 6015 deletions

View File

@@ -1,5 +0,0 @@
- name: restart searxng
community.docker.docker_compose_v2:
project_src: "{{ install_directory }}/{{ role_name }}"
restarted: true

View File

@@ -1,30 +0,0 @@
- name: Create install directory
file:
path: "{{ install_directory }}/{{ role_name }}"
state: directory
owner: "{{ docker_user }}"
mode: "{{ docker_compose_directory_mask }}"
become: true
- name: Copy docker-compose file to destination
template:
src: docker-compose.yml
dest: "{{ install_directory }}/{{ role_name }}/docker-compose.yml"
owner: "{{ docker_user }}"
mode: "{{ docker_compose_file_mask }}"
validate: docker compose -f %s config
become: true
- name: Copy settings file to destionation
template:
src: settings.yml
dest: "{{ data_dir }}/{{ role_name }}/settings.yml"
owner: "{{ docker_user }}"
become: true
notify: restart searxng
- name: Start docker container
community.docker.docker_compose_v2:
project_src: "{{ install_directory }}/{{ role_name }}"
pull: always
remove_orphans: yes

View File

@@ -1,37 +0,0 @@
networks:
traefik:
external: true
services:
searxng:
container_name: searxng
image: searxng/searxng
restart: unless-stopped
networks:
- traefik
volumes:
- "{{ data_dir }}/{{ role_name }}:/etc/searxng"
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
labels:
traefik.enable: true
traefik.http.routers.searxng.rule: "Host(`search.{{ personal_domain }}`)"
traefik.http.routers.searxng.middlewares: lan-whitelist@file
redis:
image: redis:alpine
restart: unless-stopped
command: redis-server --save "" --appendonly "no"
tmpfs:
- /var/lib/redis
cap_drop:
- ALL
cap_add:
- SETGID
- SETUID
- DAC_OVERRIDE

File diff suppressed because it is too large Load Diff

View File

@@ -1,10 +0,0 @@
searxng_secret_key: !vault |
$ANSIBLE_VAULT;1.1;AES256
33656138666464373665663339363665346566613637626131363335336535313131333265646539
3037373439643964343139383764386364623961383737610a313063613736316437366239663238
65333735633661316463336665353138623264396534383865363134613165636164303765356265
3865626366613966660a313738353339313133393765643136306361373061366132373130656531
61396230346333346636356562353733623332333662653164373630626339376433353663313862
61303230613135336662313531313836363466623162666233646231616333643536303233616231
62353866333465646162633738383866363338383932623335353038393130323932343363653233
62663465386661663262

View File

@@ -1,22 +0,0 @@
server {
listen 3000 default_server;
listen [::]:3000 default_server;
server_name _;
location = /favicon.ico { access_log off; log_not_found off; }
location = /robots.txt { access_log off; log_not_found off; }
client_max_body_size 100M;
location /bar/ {
proxy_pass http://bar-assistant:3000/;
}
location /search/ {
proxy_pass http://meilisearch:7700/;
}
location / {
proxy_pass http://salt-rim:8080/;
}
}

View File

@@ -1,44 +0,0 @@
- name: Create service user
user:
name: "{{ role_name }}"
system: true
register: service_user
become: true
- name: Create install directory
file:
path: "{{ install_directory }}/{{ role_name }}"
state: directory
owner: "{{ docker_user }}"
mode: "{{ docker_compose_directory_mask }}"
become: true
- name: Create data directory
file:
path: "{{ data_dir }}/barassistant/barassistant"
state: directory
owner: 33
mode: "{{ docker_compose_directory_mask }}"
become: true
- name: Copy docker-compose file to destination
template:
src: docker-compose.yml
dest: "{{ install_directory }}/{{ role_name }}/docker-compose.yml"
owner: "{{ docker_user }}"
mode: "{{ docker_compose_file_mask }}"
validate: docker compose -f %s config
become: true
- name: Copy nginx.conf to destination
copy:
src: nginx.conf
dest: "{{ install_directory }}/{{ role_name }}/nginx.conf"
mode: "{{ docker_compose_file_mask }}"
become: true
- name: Start docker container
community.docker.docker_compose_v2:
project_src: "{{ install_directory }}/{{ role_name }}"
pull: always
remove_orphans: yes

View File

@@ -1,71 +0,0 @@
networks:
traefik:
external: true
services:
meilisearch:
image: getmeili/meilisearch:v1.12
restart: unless-stopped
networks:
- default
environment:
MEILI_MASTER_KEY: "{{ meili_master_key }}"
MEILI_ENV: production
volumes:
- "{{ data_dir }}/barassistant/meilisearch:/meili_data"
redis:
image: redis
restart: unless-stopped
networks:
- default
environment:
ALLOW_EMPTY_PASSWORD: "True"
bar-assistant:
container_name: bar-assistant
image: barassistant/server:v5
restart: unless-stopped
networks:
- default
depends_on:
- meilisearch
- redis
environment:
APP_URL: "{{ base_url }}/bar"
LOG_CHANNEL: stderr
MEILISEARCH_KEY: "{{ meili_master_key }}"
MEILISEARCH_HOST: http://meilisearch:7700
REDIS_HOST: redis
ALLOW_REGISTRATION: "True"
volumes:
- "{{ data_dir }}/barassistant/barassistant:/var/www/cocktails/storage/bar-assistant"
salt-rim:
image: barassistant/salt-rim:v4
restart: unless-stopped
networks:
- default
depends_on:
- bar-assistant
environment:
API_URL: "{{ base_url }}/bar"
MEILISEARCH_URL: "{{ base_url }}/search"
BAR_NAME: "Cocktails"
DESCRIPTION: Why is the rum always gone?
DEFAULT_LOCALE: "en-US"
webserver:
image: nginx:alpine
restart: unless-stopped
networks:
- traefik
- default
volumes:
- "./nginx.conf:/etc/nginx/conf.d/default.conf"
labels:
traefik.enable: true
traefik.http.routers.barassistant.rule: "Host(`cocktails.{{ personal_domain }}`)"
traefik.http.services.barassistant.loadbalancer.server.port: 3000
traefik.http.routers.bariassistant.middlewares: lan-whitelist@file

View File

@@ -1,15 +0,0 @@
meili_master_key: !vault |
$ANSIBLE_VAULT;1.1;AES256
61306335316339383330323264646132363837376264646235353562666239386463613431366361
3333666463623564393061616339393164353465633866320a303530613862386466383161623532
61653861653032303232666530623739303231363536316530386566313466333236613331303833
3734656334333366650a366261323139363635316264383966626237396338663030393931313532
33343737316535336539363135333834333462393330663038376132393661323866656132356566
39653732366333306134393965383339336330326566303230613362393366383561303939363937
64396230323664393236303939643337393034646637643766323938663961636639326466653332
30343132636534613835646163643832373835663030326635323236386361346133633964303137
36623631353931343861383232373231613837393936316635393838323466656330653835343932
64333432386133313363626630623837643237616132336664303963323062386365623266623333
343233663635306361333065313334313361
base_url: "https://cocktails.{{ personal_domain }}"

View File

@@ -1,31 +0,0 @@
- name: Create service user
user:
name: "{{ role_name }}"
groups: "{{ media_group }}"
append: yes
system: true
register: service_user
become: true
- name: Create install directory
file:
path: "{{ install_directory }}/{{ role_name }}"
state: directory
owner: "{{ docker_user }}"
mode: "{{ docker_compose_directory_mask }}"
become: true
- name: Copy docker-compose file to destination
template:
src: docker-compose.yml
dest: "{{ install_directory }}/{{ role_name }}/docker-compose.yml"
owner: "{{ docker_user }}"
mode: "{{ docker_compose_file_mask }}"
validate: docker compose -f %s config
become: true
- name: Start docker container
community.docker.docker_compose_v2:
project_src: "{{ install_directory }}/{{ role_name }}"
pull: always
remove_orphans: yes

View File

@@ -1,25 +0,0 @@
networks:
traefik:
external: true
services:
{{ role_name }}:
container_name: "{{ role_name }}"
image: registry.gitlab.com/bockiii/deemix-docker
restart: unless-stopped
networks:
- traefik
environment:
- "PUID={{ service_user.uid }}"
- "PGID={{ media_gid }}"
- "TZ={{ timezone }}"
- "UMASK_SET=002"
- "DEEMIX_SINGLE_USER=true"
volumes:
- "{{ data_dir }}/{{ role_name }}:/config"
- "{{ media_storage_mnt }}/data/import/music/deemix:/downloads"
labels:
traefik.enable: true
traefik.http.routers.{{ role_name }}.rule: "Host(`{{ role_name }}.local.{{ personal_domain }}`)"
traefik.http.routers.{{ role_name }}.middlewares: lan-whitelist@file

View File

@@ -1,22 +0,0 @@
- name: Create install directory
file:
path: "{{ install_directory }}/{{ role_name }}"
state: directory
owner: "{{ docker_user }}"
mode: "{{ docker_compose_directory_mask }}"
become: true
- name: Copy docker-compose file to destination
template:
src: docker-compose.yml
dest: "{{ install_directory }}/{{ role_name }}/docker-compose.yml"
owner: "{{ docker_user }}"
mode: "{{ docker_compose_file_mask }}"
validate: docker compose -f %s config
become: true
- name: Start docker container
community.docker.docker_compose_v2:
project_src: "{{ install_directory }}/{{ role_name }}"
pull: always
remove_orphans: yes

View File

@@ -1,65 +0,0 @@
networks:
firefly_iii:
driver: bridge
traefik:
external: true
services:
app:
image: fireflyiii/core:latest
container_name: firefly_iii_core
restart: unless-stopped
volumes:
- "{{ data_dir }}/firefly3/upload:/var/www/html/storage/upload"
depends_on:
- db
networks:
- traefik
- default
environment:
APP_ENV: local
APP_DEBUG: "false"
APP_KEY: "{{ app_key }}"
SITE_OWNER: {{ owner_email_address }}
DEFAULT_LANGUAGE: "en_US"
DEFAULT_LOCALE: equal
TZ: {{ timezone }}
TRUSTED_PROXIES: "*"
APP_LOG_LEVEL: notice
AUDIT_LOG_LEVEL: info
DB_CONNECTION: mysql
DB_HOST: db
DB_PORT: 3306
DB_DATABASE: firefly
DB_USERNAME: firefly
DB_PASSWORD: firefly
APP_URL: "https://firefly.local.{{ personal_domain }}"
STATIC_CRON_TOKEN: "Y5uNSbJoK4FKUC9gVE5hq8YFEbFmc6BK"
labels:
traefik.enable: true
traefik.http.routers.firefly.rule: "Host(`firefly.local.{{ personal_domain }}`)"
traefik.http.routers.firefly.middlewares: lan-whitelist@file
db:
image: mariadb
restart: always
volumes:
- "{{ data_dir }}/mariadb/firefly3:/var/lib/mysql"
environment:
MYSQL_RANDOM_ROOT_PASSWORD: "yes"
MYSQL_USER: firefly
MYSQL_PASSWORD: firefly
MYSQL_DATABASE: firefly
cron:
#
# To make this work, set STATIC_CRON_TOKEN in your .env file or as an environment variable and replace REPLACEME below
# The STATIC_CRON_TOKEN must be *exactly* 32 characters long
#
image: alpine
restart: always
container_name: firefly_iii_cron
command: sh -c "echo \"0 3 * * * wget -qO- https://firefly.local.{{ personal_domain }}/api/v1/cron/Y5uNSbJoK4FKUC9gVE5hq8YFEbFmc6BK\" | crontab - && crond -f -L /dev/stdout"
networks:
- firefly_iii
- default

View File

@@ -1,17 +0,0 @@
owner_email_address: !vault |
$ANSIBLE_VAULT;1.1;AES256
61373861363433363938396137653461363830323539316438323861326437663364383032363064
6438303462336466373233663366303263313139333830330a623465633166653530633961643162
65303032386661393063393134643436653737666163373833383036316234393563313536353036
3839663034393730340a626361646463636137636535653632343064353461656532656236633865
66636634323434356436313737336635363832333262383331333034313530663463
app_key: !vault |
$ANSIBLE_VAULT;1.1;AES256
31333563616230396135363562313038346138633631613832646463343666643733333366303561
3461316337303862616662333031306231613532383534340a643236306232383466333531626466
33313830646365333935313237663134343033396166623730303030636438656435313462633762
3134643738616365330a636366343736306539666565663866626537303431366633646638663563
32616439336338393663373466323630323733393031633564383737383465313434313230323038
6534636266653166633539326632623165663436323936643031

View File

@@ -1,22 +0,0 @@
- name: Create install directory
file:
path: "{{ install_directory }}/{{ role_name }}"
state: directory
owner: "{{ docker_user }}"
mode: "{{ docker_compose_directory_mask }}"
become: true
- name: Copy docker-compose file to destination
template:
src: docker-compose.yml
dest: "{{ install_directory }}/{{ role_name }}/docker-compose.yml"
owner: "{{ docker_user }}"
mode: "{{ docker_compose_file_mask }}"
validate: docker compose -f %s config
become: true
- name: Start docker container
community.docker.docker_compose_v2:
project_src: "{{ install_directory }}/{{ role_name }}"
pull: always
remove_orphans: yes

View File

@@ -1,27 +0,0 @@
networks:
traefik:
external: true
services:
lubelogger:
container_name: lubelogger
image: ghcr.io/hargata/lubelogger:latest
restart: unless-stopped
networks:
- traefik
environment:
- LC_ALL=en_US.UTF-8
- LANG=en_US.UTF-8
- MailConfig__EmailServer=""
- MailConfig__EmailFrom=""
- MailConfig__Port=587
- MailConfig__Username=""
- MailConfig__Password=""
- LOGGING__LOGLEVEL__DEFAULT=Error
volumes:
- "{{ data_dir }}/{{ role_name }}:/App"
labels:
traefik.enable: true
traefik.http.routers.lubelogger.rule: "Host(`lubelogger.local.{{ personal_domain }}`)"
traefik.http.routers.lubelogger.middlewares: lan-whitelist@file

View File

@@ -1,29 +0,0 @@
- name: Create service user
user:
name: "{{ role_name }}"
system: true
register: service_user
become: true
- name: Create install directory
file:
path: "{{ install_directory }}/{{ role_name }}"
state: directory
owner: "{{ docker_user }}"
mode: "{{ docker_compose_directory_mask }}"
become: true
- name: Copy docker-compose file to destination
template:
src: docker-compose.yml
dest: "{{ install_directory }}/{{ role_name }}/docker-compose.yml"
owner: "{{ docker_user }}"
mode: "{{ docker_compose_file_mask }}"
validate: docker compose -f %s config
become: true
- name: Start docker container
community.docker.docker_compose_v2:
project_src: "{{ install_directory }}/{{ role_name }}"
pull: always
remove_orphans: yes

View File

@@ -1,66 +0,0 @@
networks:
traefik:
external: true
default:
internal: true
services:
mealie-frontend:
image: hkotel/mealie:frontend-v1.0.0beta-5
container_name: mealie-frontend
restart: unless-stopped
depends_on:
- mealie-api
networks:
- default
- traefik
environment:
- API_URL=http://mealie-api:9000 #
volumes:
- "{{ data_dir }}/mealie:/app/data"
labels:
traefik.enable: true
traefik.http.routers.mealie.rule: "Host(`mealie.{{ personal_domain }}`)"
traefik.http.services.mealie.loadbalancer.server.port: 3000
mealie-api:
image: hkotel/mealie:api-v1.0.0beta-5
container_name: mealie-api
restart: unless-stopped
depends_on:
- postgres
networks:
- default
volumes:
- "{{ data_dir }}/mealie:/app/data"
environment:
- ALLOW_SIGNUP=false
- "PUID={{ service_user.uid }}"
- "PGID={{ service_user.uid }}"
- "TZ={{ timezone }}"
- MAX_WORKERS=1
- WEB_CONCURRENCY=1
- "BASE_URL=https://mealie.{{ personal_domain }}"
- DB_ENGINE=postgres
- POSTGRES_USER=mealie
- POSTGRES_PASSWORD=mealie
- POSTGRES_SERVER=postgres
- POSTGRES_PORT=5432
- POSTGRES_DB=mealie
- "DEFAULT_EMAIL={{ email }}"
- TOKEN_TIME=168
dns:
- 10.0.0.1
postgres:
container_name: postgres
image: postgres
restart: always
networks:
- default
volumes:
- "{{ data_dir }}/postgres/mealie:/var/lib/postgresql/data"
environment:
POSTGRES_PASSWORD: mealie
POSTGRES_USER: mealie

View File

@@ -1,7 +0,0 @@
email: !vault |
$ANSIBLE_VAULT;1.1;AES256
30343965383433393930313337303637353362616563313863396433323939393864393436376534
6438663537386464623830316136643461356631316436360a636664323436303464376630616639
62653263633531343733313137303863623562616632313236376466313132636234633438616164
3030303934343761390a663734333566323234613434633636353665623530643262353162383237
66633863376332663064346132356238333561663438643232646463646632656361

View File

@@ -1,4 +0,0 @@
- name: restart ntfy
community.docker.docker_compose_v2:
project_src: "{{ install_directory }}/ntfy"
restarted: true

View File

@@ -1,31 +0,0 @@
- name: Create install directory
file:
path: "{{ install_directory }}/{{ role_name }}"
state: directory
owner: "{{ docker_user }}"
mode: "{{ docker_compose_directory_mask }}"
become: true
- name: Copy docker-compose file to destination
template:
src: docker-compose.yml
dest: "{{ install_directory }}/{{ role_name }}/docker-compose.yml"
owner: "{{ docker_user }}"
mode: "{{ docker_compose_file_mask }}"
validate: docker compose -f %s config
become: true
- name: Install configuration file
template:
src: server.yml
dest: "{{ data_dir }}/ntfy/server.yml"
owner: "{{ docker_user }}"
mode: "{{ docker_compose_file_mask }}"
notify: restart ntfy
become: true
- name: Start docker container
community.docker.docker_compose_v2:
project_src: "{{ install_directory }}/{{ role_name }}"
pull: always
remove_orphans: yes

View File

@@ -1,24 +0,0 @@
networks:
traefik:
external: true
services:
ntfy:
container_name: ntfy
image: binwiederhier/ntfy:latest
restart: unless-stopped
networks:
- traefik
user: "{{ primary_uid }}:{{ primary_gid }}"
environment:
- TZ={{ timezone }}
command:
- serve
volumes:
- "{{ data_dir }}/ntfy:/etc/ntfy"
- /var/cache/ntfy:/var/cache/ntfy
labels:
traefik.enable: true
traefik.http.routers.ntfy.rule: "Host(`push.{{ personal_domain }}`)"
traefik.http.routers.ntfy.middlewares: lan-whitelist@file

View File

@@ -1,279 +0,0 @@
# ntfy server config file
#
# Please refer to the documentation at https://ntfy.sh/docs/config/ for details.
# All options also support underscores (_) instead of dashes (-) to comply with the YAML spec.
# Public facing base URL of the service (e.g. https://ntfy.sh or https://ntfy.example.com)
#
# This setting is required for any of the following features:
# - attachments (to return a download URL)
# - e-mail sending (for the topic URL in the email footer)
# - iOS push notifications for self-hosted servers (to calculate the Firebase poll_request topic)
# - Matrix Push Gateway (to validate that the pushkey is correct)
#
base-url: "https://push.{{ personal_domain }}"
# Listen address for the HTTP & HTTPS web server. If "listen-https" is set, you must also
# set "key-file" and "cert-file". Format: [<ip>]:<port>, e.g. "1.2.3.4:8080".
#
# To listen on all interfaces, you may omit the IP address, e.g. ":443".
# To disable HTTP, set "listen-http" to "-".
#
# listen-http: ":80"
# listen-https:
# Listen on a Unix socket, e.g. /var/lib/ntfy/ntfy.sock
# This can be useful to avoid port issues on local systems, and to simplify permissions.
#
# listen-unix: <socket-path>
# listen-unix-mode: <linux permissions, e.g. 0700>
# Path to the private key & cert file for the HTTPS web server. Not used if "listen-https" is not set.
#
# key-file: <filename>
# cert-file: <filename>
# If set, also publish messages to a Firebase Cloud Messaging (FCM) topic for your app.
# This is optional and only required to save battery when using the Android app.
#
# firebase-key-file: <filename>
# If "cache-file" is set, messages are cached in a local SQLite database instead of only in-memory.
# This allows for service restarts without losing messages in support of the since= parameter.
#
# The "cache-duration" parameter defines the duration for which messages will be buffered
# before they are deleted. This is required to support the "since=..." and "poll=1" parameter.
# To disable the cache entirely (on-disk/in-memory), set "cache-duration" to 0.
# The cache file is created automatically, provided that the correct permissions are set.
#
# The "cache-startup-queries" parameter allows you to run commands when the database is initialized,
# e.g. to enable WAL mode (see https://phiresky.github.io/blog/2020/sqlite-performance-tuning/)).
# Example:
# cache-startup-queries: |
# pragma journal_mode = WAL;
# pragma synchronous = normal;
# pragma temp_store = memory;
# pragma busy_timeout = 15000;
# vacuum;
#
# The "cache-batch-size" and "cache-batch-timeout" parameter allow enabling async batch writing
# of messages. If set, messages will be queued and written to the database in batches of the given
# size, or after the given timeout. This is only required for high volume servers.
#
# Debian/RPM package users:
# Use /var/cache/ntfy/cache.db as cache file to avoid permission issues. The package
# creates this folder for you.
#
# Check your permissions:
# If you are running ntfy with systemd, make sure this cache file is owned by the
# ntfy user and group by running: chown ntfy.ntfy <filename>.
#
# cache-file: <filename>
# cache-duration: "12h"
# cache-startup-queries:
# cache-batch-size: 0
# cache-batch-timeout: "0ms"
# If set, access to the ntfy server and API can be controlled on a granular level using
# the 'ntfy user' and 'ntfy access' commands. See the --help pages for details, or check the docs.
#
# - auth-file is the SQLite user/access database; it is created automatically if it doesn't already exist
# - auth-default-access defines the default/fallback access if no access control entry is found; it can be
# set to "read-write" (default), "read-only", "write-only" or "deny-all".
# - auth-startup-queries allows you to run commands when the database is initialized, e.g. to enable
# WAL mode. This is similar to cache-startup-queries. See above for details.
#
# Debian/RPM package users:
# Use /var/lib/ntfy/user.db as user database to avoid permission issues. The package
# creates this folder for you.
#
# Check your permissions:
# If you are running ntfy with systemd, make sure this user database file is owned by the
# ntfy user and group by running: chown ntfy.ntfy <filename>.
#
# auth-file: <filename>
# auth-default-access: "read-write"
# auth-startup-queries:
# If set, the X-Forwarded-For header is used to determine the visitor IP address
# instead of the remote address of the connection.
#
# WARNING: If you are behind a proxy, you must set this, otherwise all visitors are rate limited
# as if they are one.
#
behind-proxy: true
# If enabled, clients can attach files to notifications as attachments. Minimum settings to enable attachments
# are "attachment-cache-dir" and "base-url".
#
# - attachment-cache-dir is the cache directory for attached files
# - attachment-total-size-limit is the limit of the on-disk attachment cache directory (total size)
# - attachment-file-size-limit is the per-file attachment size limit (e.g. 300k, 2M, 100M)
# - attachment-expiry-duration is the duration after which uploaded attachments will be deleted (e.g. 3h, 20h)
#
attachment-cache-dir: "/var/cache/ntfy/attachments"
# attachment-total-size-limit: "5G"
# attachment-file-size-limit: "15M"
# attachment-expiry-duration: "3h"
# If enabled, allow outgoing e-mail notifications via the 'X-Email' header. If this header is set,
# messages will additionally be sent out as e-mail using an external SMTP server. As of today, only
# SMTP servers with plain text auth and STARTLS are supported. Please also refer to the rate limiting settings
# below (visitor-email-limit-burst & visitor-email-limit-burst).
#
# - smtp-sender-addr is the hostname:port of the SMTP server
# - smtp-sender-user/smtp-sender-pass are the username and password of the SMTP user
# - smtp-sender-from is the e-mail address of the sender
#
# smtp-sender-addr:
# smtp-sender-user:
# smtp-sender-pass:
# smtp-sender-from:
# If enabled, ntfy will launch a lightweight SMTP server for incoming messages. Once configured, users can send
# emails to a topic e-mail address to publish messages to a topic.
#
# - smtp-server-listen defines the IP address and port the SMTP server will listen on, e.g. :25 or 1.2.3.4:25
# - smtp-server-domain is the e-mail domain, e.g. ntfy.sh
# - smtp-server-addr-prefix is an optional prefix for the e-mail addresses to prevent spam. If set to "ntfy-",
# for instance, only e-mails to ntfy-$topic@ntfy.sh will be accepted. If this is not set, all emails to
# $topic@ntfy.sh will be accepted (which may obviously be a spam problem).
#
# smtp-server-listen:
# smtp-server-domain:
# smtp-server-addr-prefix:
# Interval in which keepalive messages are sent to the client. This is to prevent
# intermediaries closing the connection for inactivity.
#
# Note that the Android app has a hardcoded timeout at 77s, so it should be less than that.
#
# keepalive-interval: "45s"
# Interval in which the manager prunes old messages, deletes topics
# and prints the stats.
#
# manager-interval: "1m"
# Defines topic names that are not allowed, because they are otherwise used. There are a few default topics
# that cannot be used (e.g. app, account, settings, ...). To extend the default list, define them here.
#
# Example:
# disallowed-topics:
# - about
# - pricing
# - contact
#
# disallowed-topics:
# Defines if the root route (/) is pointing to the landing page (as on ntfy.sh) or the
# web app. If you self-host, you don't want to change this.
# Can be "app" (default), "home" or "disable" to disable the web app entirely.
#
# web-root: app
# Various feature flags used to control the web app, and API access, mainly around user and
# account management.
#
# - enable-signup allows users to sign up via the web app, or API
# - enable-login allows users to log in via the web app, or API
# - enable-reservations allows users to reserve topics (if their tier allows it)
#
# enable-signup: false
# enable-login: false
# enable-reservations: false
# Server URL of a Firebase/APNS-connected ntfy server (likely "https://ntfy.sh").
#
# iOS users:
# If you use the iOS ntfy app, you MUST configure this to receive timely notifications. You'll like want this:
# upstream-base-url: "https://ntfy.sh"
#
# If set, all incoming messages will publish a "poll_request" message to the configured upstream server, containing
# the message ID of the original message, instructing the iOS app to poll this server for the actual message contents.
# This is to prevent the upstream server and Firebase/APNS from being able to read the message.
#
# upstream-base-url:
# Rate limiting: Total number of topics before the server rejects new topics.
#
# global-topic-limit: 15000
# Rate limiting: Number of subscriptions per visitor (IP address)
#
# visitor-subscription-limit: 30
# Rate limiting: Allowed GET/PUT/POST requests per second, per visitor:
# - visitor-request-limit-burst is the initial bucket of requests each visitor has
# - visitor-request-limit-replenish is the rate at which the bucket is refilled
# - visitor-request-limit-exempt-hosts is a comma-separated list of hostnames, IPs or CIDRs to be
# exempt from request rate limiting. Hostnames are resolved at the time the server is started.
# Example: "1.2.3.4,ntfy.example.com,8.7.6.0/24"
#
# visitor-request-limit-burst: 60
# visitor-request-limit-replenish: "5s"
# visitor-request-limit-exempt-hosts: ""
# Rate limiting: Hard daily limit of messages per visitor and day. The limit is reset
# every day at midnight UTC. If the limit is not set (or set to zero), the request
# limit (see above) governs the upper limit.
#
# visitor-message-daily-limit: 0
# Rate limiting: Allowed emails per visitor:
# - visitor-email-limit-burst is the initial bucket of emails each visitor has
# - visitor-email-limit-replenish is the rate at which the bucket is refilled
#
# visitor-email-limit-burst: 16
# visitor-email-limit-replenish: "1h"
# Rate limiting: Attachment size and bandwidth limits per visitor:
# - visitor-attachment-total-size-limit is the total storage limit used for attachments per visitor
# - visitor-attachment-daily-bandwidth-limit is the total daily attachment download/upload traffic limit per visitor
#
# visitor-attachment-total-size-limit: "100M"
# visitor-attachment-daily-bandwidth-limit: "500M"
# Payments integration via Stripe
#
# - stripe-secret-key is the key used for the Stripe API communication. Setting this values
# enables payments in the ntfy web app (e.g. Upgrade dialog). See https://dashboard.stripe.com/apikeys.
# - stripe-webhook-key is the key required to validate the authenticity of incoming webhooks from Stripe.
# Webhooks are essential up keep the local database in sync with the payment provider. See https://dashboard.stripe.com/webhooks.
#
# stripe-secret-key:
# stripe-webhook-key:
# Logging options
#
# By default, ntfy logs to the console (stderr), with an "info" log level, and in a human-readable text format.
# ntfy supports five different log levels, can also write to a file, log as JSON, and even supports granular
# log level overrides for easier debugging. Some options (log-level and log-level-overrides) can be hot reloaded
# by calling "kill -HUP $pid" or "systemctl reload ntfy".
#
# - log-format defines the output format, can be "text" (default) or "json"
# - log-file is a filename to write logs to. If this is not set, ntfy logs to stderr.
# - log-level defines the default log level, can be one of "trace", "debug", "info" (default), "warn" or "error".
# Be aware that "debug" (and particularly "trace") can be VERY CHATTY. Only turn them on briefly for debugging purposes.
# - log-level-overrides lets you override the log level if certain fields match. This is incredibly powerful
# for debugging certain parts of the system (e.g. only the account management, or only a certain visitor).
# This is an array of strings in the format:
# - "field=value -> level" to match a value exactly, e.g. "tag=manager -> trace"
# - "field -> level" to match any value, e.g. "time_taken_ms -> debug"
# Warning: Using log-level-overrides has a performance penalty. Only use it for temporary debugging.
#
# Example (good for production):
# log-level: info
# log-format: json
# log-file: /var/log/ntfy.log
#
# Example level overrides (for debugging, only use temporarily):
# log-level-overrides:
# - "tag=manager -> trace"
# - "visitor_ip=1.2.3.4 -> debug"
# - "time_taken_ms -> debug"
#
# log-level: info
# log-level-overrides:
# log-format: text
# log-file:

View File

@@ -1,31 +0,0 @@
- name: Create service user
user:
name: "{{ role_name }}"
groups: "{{ media_group }}"
append: yes
system: true
register: service_user
become: true
- name: Create install directory
file:
path: "{{ install_directory }}/{{ role_name }}"
state: directory
owner: "{{ docker_user }}"
mode: "{{ docker_compose_directory_mask }}"
become: true
- name: Copy docker-compose file to destination
template:
src: docker-compose.yml
dest: "{{ install_directory }}/{{ role_name }}/docker-compose.yml"
owner: "{{ docker_user }}"
mode: "{{ docker_compose_file_mask }}"
validate: docker compose -f %s config
become: true
- name: Start docker container
community.docker.docker_compose_v2:
project_src: "{{ install_directory }}/{{ role_name }}"
pull: always
remove_orphans: yes

View File

@@ -1,24 +0,0 @@
networks:
traefik:
external: true
services:
{{ role_name }}:
container_name: "{{ role_name }}"
image: cr.hotio.dev/hotio/readarr
restart: unless-stopped
networks:
- traefik
environment:
- "PUID={{ service_user.uid }}"
- "PGID={{ media_gid }}"
- "TZ={{ timezone }}"
- "UMASK=002"
volumes:
- "{{ data_dir }}/{{ role_name }}:/config"
- "{{ media_storage_mnt }}/data:/data"
labels:
traefik.enable: true
traefik.http.routers.{{ role_name }}.rule: "Host(`{{ role_name }}.local.{{ personal_domain }}`)"
traefik.http.routers.{{ role_name }}.middlewares: lan-whitelist@file

View File

@@ -1,29 +0,0 @@
- name: Create install directory
file:
path: "{{ install_directory }}/{{ role_name }}"
state: directory
owner: "{{ docker_user }}"
mode: "{{ docker_compose_directory_mask }}"
become: true
- name: Copy docker-compose file to destination
template:
src: docker-compose.yml
dest: "{{ install_directory }}/{{ role_name }}/docker-compose.yml"
owner: "{{ docker_user }}"
mode: "{{ docker_compose_file_mask }}"
validate: docker compose -f %s config
become: true
- name: Copy config.js to destination
template:
src: config.js
dest: "{{ install_directory }}/renovate/config.js"
mode: "{{ docker_compose_file_mask }}"
become: true
- name: Start docker container
community.docker.docker_compose_v2:
project_src: "{{ install_directory }}/{{ role_name }}"
pull: always
remove_orphans: yes

View File

@@ -1,14 +0,0 @@
module.exports = {
endpoint: "git.{{ personal_domain }}/api/v2",
token: '{{ renovate_gitea_token }}',
platform: 'gitea',
dryRun: true,
autodiscover: true,
onboarding: false,
redisUrl: 'redis://redis',
repositoryCache: 'enabled',
persistRepoData: true,
binarySource: "docker",
dockerUser: "{{ primary_uid }}",
baseDir: "{{ data_dir }}/renovate",
};

View File

@@ -1,26 +0,0 @@
networks:
docker-socket-proxy:
external: true
services:
renovate:
container_name: renovate
image: renovate/renovate:slim
restart: unless-stopped
depends_on:
- redis
networks:
- docker-socket-proxy
user: "{{ service_user.uid }}:{{ service_user.group }}"
environment:
- TZ={{ timezone }}
- DOCKER_HOST=tcp://docker_socket_proxy:2375
- "RENOVATE_TOKEN={{ renovate_gitea_token }}"
volumes:
- "{{ data_dir }}/renovate:/{{ data_dir }}/renovate" # These must be the same
- ./config.js:/usr/src/app/config.js:ro
redis:
image: redis:7-alpine
restart: unless-stopped

View File

@@ -1,8 +0,0 @@
renovate_gitea_token: !vault |
$ANSIBLE_VAULT;1.1;AES256
61383064643566343633633962376238346137633933643634353564316266656338333665613235
3230613339633561313064393163393537623763393336300a383332626538376335613763313439
64326566393761666266303438313435346535626231376661653863663664623839663431363632
6434306532613065650a636562663030363162396435346262353839653736343530393365633331
65366534333234353239376566326234666566303038396661343137316265306433313235366530
6164656437346131376165613136363161646437343038393266

View File

@@ -1,31 +0,0 @@
- name: Create service user
user:
name: "{{ role_name }}"
groups: "{{ media_group }}"
append: yes
system: true
register: service_user
become: true
- name: Create install directory
file:
path: "{{ install_directory }}/{{ role_name }}"
state: directory
owner: "{{ docker_user }}"
mode: "{{ docker_compose_directory_mask }}"
become: true
- name: Copy docker-compose file to destination
template:
src: docker-compose.yml
dest: "{{ install_directory }}/{{ role_name }}/docker-compose.yml"
owner: "{{ docker_user }}"
mode: "{{ docker_compose_file_mask }}"
validate: docker compose -f %s config
become: true
- name: Start docker containers
community.docker.docker_compose_v2:
project_src: "{{ install_directory }}/{{ role_name }}"
pull: always
remove_orphans: yes

View File

@@ -1,25 +0,0 @@
networks:
traefik:
external: true
services:
sabnzbd:
container_name: sabnzbd
image: lscr.io/linuxserver/sabnzbd:latest
restart: unless-stopped
networks:
- traefik
environment:
- "PUID={{ service_user.uid }}"
- "PGID={{ media_gid }}"
- "TZ={{ timezone }}"
- "UMASK=002"
volumes:
- "{{ data_dir }}/{{ role_name }}:/config"
- "{{ media_storage_mnt }}/data/usenet:/data/usenet"
labels:
traefik.enable: true
traefik.http.routers.{{ role_name }}.rule: "Host(`{{ role_name }}.local.{{ personal_domain }}`)"
traefik.http.routers.{{ role_name }}.middlewares: lan-whitelist@file
traefik.http.services.sabnzbd.loadbalancer.server.port: 8080

View File

@@ -1,4 +0,0 @@
- name: restart synapse
community.docker.docker_compose_v2:
project_src: "{{ install_directory }}/{{ role_name }}"
restarted: true

View File

@@ -1,77 +0,0 @@
- name: Create service user
user:
name: "{{ role_name }}"
system: true
register: service_user
become: true
- name: Create install directory
file:
path: "{{ install_directory }}/{{ role_name }}"
state: directory
owner: "{{ docker_user }}"
mode: "{{ docker_compose_directory_mask }}"
become: true
- name: Copy docker-compose file to destination
template:
src: docker-compose.yml
dest: "{{ install_directory }}/{{ role_name }}/docker-compose.yml"
owner: "{{ docker_user }}"
mode: "{{ docker_compose_file_mask }}"
validate: docker compose -f %s config
become: true
- name: Copy homeserver.yaml to destination
template:
src: homeserver.yaml
dest: "{{ install_directory }}/synapse/homeserver.yaml"
owner: "{{ service_user.uid }}"
mode: "{{ docker_compose_file_mask }}"
notify: restart synapse
become: true
- name: Create config directory and set synapse user to owner
file:
path: "{{ data_dir }}/synapse"
state: directory
owner: "{{ service_user.uid }}"
mode: "{{ docker_compose_directory_mask }}"
become: true
- name: Create nginx config directory
file:
path: "{{ data_dir }}/nginx/synapse/www/.well-known/matrix/"
state: directory
mode: "{{ docker_compose_directory_mask }}"
become: true
- name: Install nginx config file
template:
src: nginx/matrix.conf
dest: "{{ data_dir }}/nginx/synapse/matrix.conf"
owner: "{{ docker_user }}"
mode: "{{ docker_compose_file_mask }}"
become: true
- name: Install well known client file
template:
src: nginx/client.json
dest: "{{ data_dir }}/nginx/synapse/www/.well-known/matrix/client"
owner: "{{ docker_user }}"
mode: "{{ docker_compose_file_mask }}"
become: true
- name: Install well known server file
template:
src: nginx/server.json
dest: "{{ data_dir }}/nginx/synapse/www/.well-known/matrix/server"
owner: "{{ docker_user }}"
mode: "{{ docker_compose_file_mask }}"
become: true
- name: Start docker container
community.docker.docker_compose_v2:
project_src: "{{ install_directory }}/{{ role_name }}"
pull: always
remove_orphans: yes

View File

@@ -1,67 +0,0 @@
networks:
traefik:
external: true
services:
synapse:
container_name: "synapse"
image: matrixdotorg/synapse
restart: unless-stopped
depends_on:
- db
networks:
- traefik
- default
environment:
- "UID={{ service_user.uid }}"
- "GID={{ service_user.uid }}"
- "TZ={{ timezone }}"
volumes:
- "{{ data_dir }}/{{ role_name }}:/data"
- ./homeserver.yaml:/data/homeserver.yaml
labels:
traefik.enable: true
traefik.http.routers.synapse.rule: "Host(`matrix.{{ personal_domain }}`) || (Host(`{{ personal_domain }}`) && PathPrefix(`/_matrix/`))"
db:
image: postgres:14-alpine
restart: unless-stopped
networks:
- default
environment:
- POSTGRES_USER=synapse
- POSTGRES_PASSWORD=synapse
- POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C
volumes:
- "{{ data_dir }}/postgres/synapse:/var/lib/postgresql/data"
redis:
networks:
- default
image: redis:7-alpine
restart: unless-stopped
volumes:
- "{{ data_dir }}/redis/synapse:/data"
admin:
image: awesometechnologies/synapse-admin:latest
restart: unless-stopped
networks:
- traefik
labels:
traefik.enable: true
traefik.http.routers.synapse-admin.rule: "Host(`synapse-admin.local.{{ personal_domain }}`)"
traefik.http.routers.synapse-admin.middlewares: lan-whitelist@file
nginx:
image: nginx:latest
restart: unless-stopped
networks:
- traefik
volumes:
- "{{ data_dir }}/nginx/synapse/matrix.conf:/etc/nginx/conf.d/matrix.conf"
- "{{ data_dir }}/nginx/synapse/www:/var/www"
labels:
traefik.enable: true
traefik.http.routers.matrix.rule: "Host(`{{ personal_domain }}`)"

View File

@@ -1,42 +0,0 @@
server_name: "{{ personal_domain }}"
pid_file: /data/homeserver.pid
public_baseurl: "https://matrix.{{ personal_domain }}"
ip_range_whitelist:
- 10.0.0.0/24
acme:
enabled: false
database:
name: psycopg2
args:
user: synapse
password: synapse
database: synapse
host: db
redis:
enabled: true
host: redis
port: 6379
listeners:
- port: 8008
tls: false
type: http
x_forwarded: true
resources:
- names: [client, federation]
compress: false
registration_shared_secret: "{{ synapse_registration_shared_secret }}"
report_stats: true
media_store_path: /data/media_store
uploads_path: /data/uploads
trusted_key_servers:
- server_name: matrix.org
suppress_key_server_warning: true

View File

@@ -1,5 +0,0 @@
{
"m.homeserver": {
"base_url": "https://matrix.{{ personal_domain }}"
}
}

View File

@@ -1,17 +0,0 @@
server {
listen 80 default_server;
server_name {{ personal_domain }};
# Traefik -> nginx -> synapse
location /_matrix {
proxy_pass http://synapse:8008;
proxy_set_header X-Forwarded-For $remote_addr;
client_max_body_size 128m;
}
location /.well-known/matrix/ {
root /var/www/;
default_type application/json;
add_header Access-Control-Allow-Origin *;
}
}

View File

@@ -1,4 +0,0 @@
{
"m.server": "matrix.{{ personal_domain }}:443"
}

View File

@@ -1,13 +0,0 @@
synapse_registration_shared_secret: !vault |
$ANSIBLE_VAULT;1.1;AES256
33393864663831636636616361333635343366613633353234363761303235333361376230333863
6432326363656232323531636139356663623734313161350a653262636439363334353266393563
61646265303161396630653839346266336335613030623237363537663839306331333064626436
6262623236353061380a306335356365633164313639633031353663373633306539343464376639
37396535386631363866343030653835636437303230333430303033616364383734626563336265
37643164393334316534386266613930383136663934613233313939316533643164623163626334
31396163383132333365383364323866626264323234353939653236386231636536666261616534
37353930663863343533636536356363373432383437643965663636323234303730623434386264
31653131653964376164623039616166376162323235363164303163353363643733643761353264
63393632366139313538656566393239393465653536356131333430323165356263323839666636
353466373866616536383761343036666561

View File

@@ -1,29 +0,0 @@
- name: Create service user
user:
name: "{{ role_name }}"
system: true
register: service_user
become: true
- name: Create install directory
file:
path: "{{ install_directory }}/{{ role_name }}"
state: directory
owner: "{{ docker_user }}"
mode: "{{ docker_compose_directory_mask }}"
become: true
- name: Copy docker-compose file to destination
template:
src: docker-compose.yml
dest: "{{ install_directory }}/{{ role_name }}/docker-compose.yml"
owner: "{{ docker_user }}"
mode: "{{ docker_compose_file_mask }}"
validate: docker compose -f %s config
become: true
- name: Start docker container
community.docker.docker_compose_v2:
project_src: "{{ install_directory }}/{{ role_name }}"
pull: always
remove_orphans: yes

View File

@@ -1,29 +0,0 @@
networks:
traefik:
external: true
services:
unifi-controller:
container_name: unifi-controller
image: lscr.io/linuxserver/unifi-controller:latest
restart: unless-stopped
networks:
- traefik
ports:
- 8443:8443 # WebUI
- 3478:3478/udp # STUN
- 10001:10001/udp # AP discovery
- 8080:8080 # Device communication
environment:
- "PUID={{ service_user.uid }}"
- "PGID={{ service_user.uid }}"
- "TZ={{ timezone }}"
volumes:
- "{{ data_dir }}/{{ role_name }}:/config"
labels:
traefik.enable: true
traefik.http.routers.unifi.rule: "Host(`unifi.local.{{ personal_domain }}`)"
traefik.http.routers.unifi.middlewares: lan-whitelist@file
traefik.http.services.unifi.loadbalancer.server.scheme: https
traefik.http.services.unifi.loadbalancer.server.port: 8443

View File

@@ -1,28 +0,0 @@
upstream wger {
server web:8000;
}
server {
listen 80;
location / {
proxy_pass http://wger;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header X-Forwarded-Host $host:$server_port;
proxy_redirect off;
}
location /static/ {
alias /wger/static/;
}
location /media/ {
alias /wger/media/;
}
# Increase max body size to allow for video uploads
client_max_body_size 100M;
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,60 +0,0 @@
- name: Create install directory
file:
path: "{{ install_directory }}/{{ role_name }}"
state: directory
owner: "{{ docker_user }}"
mode: "{{ docker_compose_directory_mask }}"
become: true
- name: Create database data directory
file:
path: "{{ data_dir }}/postgres/{{ role_name }}"
state: directory
owner: "{{ docker_user }}"
mode: "{{ docker_compose_directory_mask }}"
become: true
- name: Create redis data directory
file:
path: "{{ data_dir }}/redis/{{ role_name }}"
state: directory
owner: "{{ docker_user }}"
mode: "{{ docker_compose_directory_mask }}"
become: true
- name: Create data and config directories
file:
path: "{{ data_dir }}/{{ role_name }}/{{ item }}"
state: directory
owner: "{{ docker_user }}"
mode: "{{ docker_compose_directory_mask }}"
loop:
- config
- static
- media
- celery-beat
- name: Install wger config file (templatized)
template:
src: prod.env
dest: "{{ data_dir }}/{{ role_name }}/config/prod.env"
- name: Install config files
copy:
src: ./
dest: "{{ data_dir }}/{{ role_name }}/config"
- name: Copy docker-compose file to destination
template:
src: docker-compose.yml
dest: "{{ install_directory }}/{{ role_name }}/docker-compose.yml"
owner: "{{ docker_user }}"
mode: "{{ docker_compose_file_mask }}"
validate: docker compose -f %s config
become: true
- name: Start docker container
community.docker.docker_compose_v2:
project_src: "{{ install_directory }}/{{ role_name }}"
pull: always
remove_orphans: yes

View File

@@ -1,132 +0,0 @@
networks:
traefik:
external: true
services:
web:
image: wger/server:latest
depends_on:
db:
condition: service_healthy
cache:
condition: service_healthy
networks:
- default
env_file:
- {{ data_dir }}/{{ role_name }}/config/prod.env
volumes:
- {{ data_dir }}/{{ role_name }}/static:/home/wger/static
- {{ data_dir }}/{{ role_name }}/media:/home/wger/media
expose:
- 8000
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:8000
interval: 10s
timeout: 5s
start_period: 300s
retries: 5
restart: unless-stopped
nginx:
image: nginx:stable
depends_on:
- web
networks:
- traefik
- default
volumes:
- {{ data_dir }}/{{ role_name }}/config/nginx.conf:/etc/nginx/conf.d/default.conf
- {{ data_dir }}/{{ role_name }}/static:/wger/static:ro
- {{ data_dir }}/{{ role_name }}/media:/wger/media:ro
healthcheck:
test: service nginx status
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
restart: unless-stopped
labels:
traefik.enable: true
traefik.http.routers.wger.rule: "Host(`wger.{{ personal_domain }}`)"
#traefik.http.services.wger.loadbalancer.server.port: 8000
#traefik.http.routers.wger.middlewares: lan-whitelist@file
db:
image: postgres:15-alpine
environment:
- POSTGRES_USER=wger
- POSTGRES_PASSWORD=wger
- POSTGRES_DB=wger
- "TZ={{ timezone }}"
networks:
- default
volumes:
- {{ data_dir }}/postgres/{{ role_name }}:/var/lib/postgresql/data/
expose:
- 5432
healthcheck:
test: pg_isready -U wger
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
restart: unless-stopped
cache:
image: redis
expose:
- 6379
networks:
- default
volumes:
- {{ data_dir }}/{{ role_name }}/config/redis.conf:/usr/local/etc/redis/redis.conf
- {{ data_dir }}/redis/{{ role_name }}/data:/data
command: [ "redis-server", "/usr/local/etc/redis/redis.conf"]
healthcheck:
test: redis-cli ping
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
restart: unless-stopped
# You probably want to limit the memory usage of the cache, otherwise it might
# hog all the available memory. Remove or change according to your needs.
#mem_limit: 2gb
celery_worker:
image: wger/server:latest
command: /start-worker
networks:
- default
env_file:
- {{ data_dir }}/{{ role_name }}/config/prod.env
volumes:
- {{ data_dir }}/{{ role_name }}/media:/home/wger/media
depends_on:
web:
condition: service_healthy
healthcheck:
test: celery -A wger inspect ping
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
celery_beat:
image: wger/server:latest
command: /start-beat
networks:
- default
volumes:
- {{ data_dir }}/{{ role_name }}/celery-beat:/home/wger/beat/
env_file:
- {{ data_dir }}/{{ role_name }}/config/prod.env
depends_on:
celery_worker:
condition: service_healthy
# Heads up, if you remove these volumes and use folders directly you need to chown them
# to the UID and GID 1000 even if it doesn't exist on your system. Also, they should
# be readable by everyone.

View File

@@ -1,172 +0,0 @@
SECRET_KEY="{{ DJANGO_SECRET }}"
SIGNING_KEY="{{ JWT_SECRET }}"
TIME_ZONE=America/New_York
TZ=America/New_York
CSRF_TRUSTED_ORIGINS="https://wger.{{ personal_domain }}"
X_FORWARDED_PROTO_HEADER_SET=True
MEDIA_URL="https://wger.{{ personal_domain }}/media/"
STATIC_URL="https://wger.{{ personal_domain }}/static/"
#
# These settings usually don't need changing
#
#
# Application
WGER_INSTANCE=https://wger.de # Wger instance from which to sync exercises, images, etc.
ALLOW_REGISTRATION=True
ALLOW_GUEST_USERS=True
ALLOW_UPLOAD_VIDEOS=True
# Users won't be able to contribute to exercises if their account age is
# lower than this amount in days.
MIN_ACCOUNT_AGE_TO_TRUST=21
# Synchronzing exercises
# It is recommended to keep the local database synchronized with the wger
# instance specified in WGER_INSTANCE since there are new added or translations
# improved. For this you have different possibilities:
# - Sync exercises on startup:
# SYNC_EXERCISES_ON_STARTUP=True
# DOWNLOAD_EXERCISE_IMAGES_ON_STARTUP=True
# - Sync them in the background with celery. This will setup a job that will run
# once a week at a random time (this time is selected once when starting the server)
SYNC_EXERCISES_CELERY=True
SYNC_EXERCISE_IMAGES_CELERY=True
SYNC_EXERCISE_VIDEOS_CELERY=True
# - Manually trigger the process as needed:
# docker compose exec web python3 manage.py sync-exercises
# docker compose exec web python3 manage.py download-exercise-images
# docker compose exec web python3 manage.py download-exercise-videos
# Synchronzing ingredients
# You can also syncronize the ingredients from a remote wger instance, and have
# basically the same options as for the ingredients:
# - Sync them in the background with celery. This will setup a job that will run
# once a week at a random time (this time is selected once when starting the server)
SYNC_INGREDIENTS_CELERY=True
# - Manually trigger the process as needed:
# docker compose exec web python3 manage.py sync-ingredients
# This option controls whether to download ingredients and their images from the
# configured wger instance. When scanning products with the barcode scanner, it is
# possible to dynamically fetch the ingredient if it is not known in the local database.
# Possible values: WGER or None. Requires USE_CELERY to be set to true.
DOWNLOAD_INGREDIENTS_FROM=WGER
# Whether celery is configured and should be used. Can be left to true with
# this setup but can be deactivated if you are using the app in some other way
USE_CELERY=True
#
# Celery
CELERY_BROKER=redis://cache:6379/2
CELERY_BACKEND=redis://cache:6379/2
CELERY_FLOWER_PASSWORD=adminadmin
#
# Database
DJANGO_DB_ENGINE=django.db.backends.postgresql
DJANGO_DB_DATABASE=wger
DJANGO_DB_USER=wger
DJANGO_DB_PASSWORD=wger
DJANGO_DB_HOST=db
DJANGO_DB_PORT=5432
DJANGO_PERFORM_MIGRATIONS=True # Perform any new database migrations on startup
#
# Cache
DJANGO_CACHE_BACKEND=django_redis.cache.RedisCache
DJANGO_CACHE_LOCATION=redis://cache:6379/1
DJANGO_CACHE_TIMEOUT=1296000 # in seconds - 60*60*24*15, 15 Days
DJANGO_CACHE_CLIENT_CLASS=django_redis.client.DefaultClient
# DJANGO_CACHE_CLIENT_PASSWORD=abcde... # Only if you changed the redis config
# DJANGO_CACHE_CLIENT_SSL_KEYFILE=/path/to/ssl_keyfile # Path to an ssl private key.
# DJANGO_CACHE_CLIENT_SSL_CERTFILE=/path/to/ssl_certfile # Path to an ssl certificate.
# DJANGO_CACHE_CLIENT_SSL_CERT_REQS=<none | optional | required> # The string value for the verify_mode.
# DJANGO_CACHE_CLIENT_SSL_CHECK_HOSTNAME=False # If set, match the hostname during the SSL handshake.
#
# Brute force login attacks
# https://django-axes.readthedocs.io/en/latest/index.html
AXES_ENABLED=True
AXES_FAILURE_LIMIT=10
AXES_COOLOFF_TIME=30 # in minutes
AXES_HANDLER=axes.handlers.cache.AxesCacheHandler
AXES_LOCKOUT_PARAMETERS=ip_address
AXES_IPWARE_PROXY_COUNT=1
AXES_IPWARE_META_PRECEDENCE_ORDER=HTTP_X_FORWARDED_FOR,REMOTE_ADDR
#
# Others
DJANGO_DEBUG=False
WGER_USE_GUNICORN=True
EXERCISE_CACHE_TTL=18000 # in seconds - 5*60*60, 5 hours
SITE_URL=http://localhost
#
# JWT auth
ACCESS_TOKEN_LIFETIME=10 # The lifetime duration of the access token, in minutes
REFRESH_TOKEN_LIFETIME=24 # The lifetime duration of the refresh token, in hours
#
# Auth Proxy Authentication
#
# Please read the documentation before enabling this feature:
# https://wger.readthedocs.io/en/latest/administration/auth_proxy.html
AUTH_PROXY_HEADER=''
AUTH_PROXY_TRUSTED_IPS=''
AUTH_PROXY_CREATE_UNKNOWN_USER=False
AUTH_PROXY_USER_EMAIL_HEADER=''
AUTH_PROXY_USER_NAME_HEADER=''
#
# Other possible settings
# Recaptcha keys. You will need to create an account and register your domain
# https://www.google.com/recaptcha/
# RECAPTCHA_PUBLIC_KEY=abcde...
# RECAPTCHA_PRIVATE_KEY=abcde...
USE_RECAPTCHA=False
# Clears the static files before copying the new ones (i.e. just calls collectstatic
# with the appropriate flag: "manage.py collectstatic --no-input --clear"). Usually
# This can be left like this but if you have problems and new static files are not
# being copied correctly, clearing everything might help
DJANGO_CLEAR_STATIC_FIRST=False
#
# Email
# https://docs.djangoproject.com/en/4.1/topics/email/#smtp-backend
# ENABLE_EMAIL=False
# EMAIL_HOST=email.example.com
# EMAIL_PORT=587
# EMAIL_HOST_USER=username
# EMAIL_HOST_PASSWORD=password
# EMAIL_USE_TLS=True
# EMAIL_USE_SSL=False
FROM_EMAIL='wger Workout Manager <wger@example.com>'
# Set your name and email to be notified if an internal server error occurs.
# Needs a working email configuration
# DJANGO_ADMINS=your name,email@example.com
# Whether to compress css and js files into one (of each)
# COMPRESS_ENABLED=True
#
# Django Rest Framework
# The number of proxies in front of the application. In the default configuration
# only nginx is. Change as approtriate if your setup differs. Also note that this
# is only used when throttling API requests.
NUMBER_OF_PROXIES=1
#
# Gunicorn
#
# Additional gunicorn options, change as needed.
# For the number of workers to spawn, a usually recommended value is (2 x $num_cores) + 1
# see:
# - https://docs.gunicorn.org/en/stable/settings.html
# - https://github.com/wger-project/wger/blob/master/extras/docker/production/entrypoint.sh#L95
GUNICORN_CMD_ARGS="--workers 3 --threads 2 --worker-class gthread --proxy-protocol True --timeout 240"

View File

@@ -1,16 +0,0 @@
DJANGO_SECRET: !vault |
$ANSIBLE_VAULT;1.1;AES256
64326466343139613339363438386534363564626662366266353732383831613735613130666663
6464623832646233653332313434303939666633613261640a393132616662326637356263373966
30623465363333306430636462653738353737376635393366623162383437343430336163373832
3931363133376466330a373565353636353932653436306165303664366539333263626566393436
35386366633735373137616238303462616162636362306563343064383764383136
JWT_SECRET: !vault |
$ANSIBLE_VAULT;1.1;AES256
36306265373261313533313237653432663230666162373062373166323061373932366434616532
6538393830396535633434373530626566316538313732620a636439363632666430613938326164
36363432363361653665303965353566623861323331306630316265633430616266363462636362
6132636138306335620a393662663431623566663165383362663138356237343063363239353063
61336633373963356533396132316432356534373930613434326235346639326634