-
Notifications
You must be signed in to change notification settings - Fork 59
/
docker-compose.yml
616 lines (616 loc) · 17.9 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
## UPDATED: 2023-04-12
# changelog: https://github.com/zilexa/Homeserver/blob/master/docker/changelog.txt
##
## COMMON COMMANDS:
# Check for typos: docker-compose config
# Run: docker-compose up -d
# Stop a container: docker-compose stop containername
# Remove a stopped container: docker-compose rm containername
# Stop all containers: docker kill $(docker ps -q)
# Remove stopped containers: docker container prune
# Stop & remove containers: docker rm $(docker ps -a -q)
# Remove everything related to stopped containers: sudo docker system prune --all --volumes --force
#
## Docker-Compose 2.4 the final non-swarm version
#
version: "2.4"
services:
##_______SYSTEM_____
##____________________ Portainer [SYSTEM/Docker]
portainer:
container_name: portainer
image: portainer/portainer-ce
restart: always
networks:
- web-proxy
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- $DOCKERDIR/portainer/data:/data
ports:
- 9000:9000
labels:
caddy: http://docker.o
caddy.reverse_proxy: "{{upstreams 9000}}"
plugsy.name: Docker
plugsy.link: http://docker.o/
plugsy.category: System
##_____________________ Caddy [SYSTEM/web-proxy]
caddy:
container_name: web-proxy
image: lucaslorentz/caddy-docker-proxy:ci-alpine
restart: always
networks:
- web-proxy
environment:
- CADDY_INGRESS_NETWORKS=web-proxy
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- $DOCKERDIR/web-proxy/caddy_data:/data
- $DOCKERDIR/web-proxy/config:/config
extra_hosts:
- host.docker.internal:host-gateway
ports:
- 443:443
- 80:80
labels:
caddy.email: $EMAIL
caddy_0: http://adguard.o
caddy_0.reverse_proxy: host.docker.internal:3000
caddy_1: http://vpn.o
caddy_1.reverse_proxy: host.docker.internal:5000
caddy_2: unifi.o
caddy_2.reverse_proxy: host.docker.internal:8080
plugsy.name: web-proxy [Caddy]
plugsy.category: Cloud
##
##____________________ Plugsy [SYSTEM/Homepage]
plugsy:
container_name: plugsy
image: plugsy/core
restart: always
networks:
- web-proxy
volumes:
- /var/run/docker.sock:/var/run/docker.sock
# - $DOCKERDIR/dashboard/config.json:/config.json
#ports:
# - 8000:3000
extra_hosts:
- g.o:host-gateway
labels:
caddy: http://g.o
caddy.reverse_proxy: "{{upstreams 3000}}"
##
##______PRIVACY_______
##______________________ AdGuard Home [PRIVACY/Blocker]
adguard:
container_name: adguard
image: adguard/adguardhome
restart: always
network_mode: host
volumes:
- $DOCKERDIR/adguardhome/conf:/opt/adguardhome/conf
labels:
plugsy.name: DNS Server [Adguard Home]
plugsy.link: http://adguard.o/
plugsy.category: Network
##____________________ Unbound [PRIVACY/dns]
unbound:
container_name: unbound
image: klutchell/unbound
restart: always
networks:
- unbound
ports:
- 5335:53/tcp
- 5335:53/udp
labels:
plugsy.name: DNS Resolver [Unbound]
plugsy.parents: DNS Server [Adguard Home]
##____________________ Castblock [PRIVACY/Chromecastblocker]
castblock:
container_name: castblock
image: erdnaxeli/castblock:latest
restart: always
network_mode: host
cap_add:
- NET_ADMIN
environment:
DEBUG: true
OFFSET: 1
CATEGORIES: sponsor,interaction
MUTE_ADS: true
labels:
plugsy.name: YT Casting [CastBlock]
plugsy.category: Network
##____________________ Unifi controller [NETWORK/Unifi-controller]
unifi-controller:
image: jacobalberty/unifi
container_name: unifi-controller
restart: "no" # " " is required otherwise this is interpreted as boolean instead of string
network_mode: host
volumes:
- $DOCKERDIR/unifi/data:/unifi/data # required to not loose your configuration
environment:
TZ: $TZ
LOTSOFDEVICES: true
JVM_EXTRA_OPTS: true
JVM_INIT_HEAP_SIZE: 256
JVM_MAX_HEAP_SIZE: 800
#ports:
#- 3478:3478/udp # STUN - Required
#- 8080:8080/tcp # Device/ controller comm. #- Required
#- 8443:8443/tcp # Controller GUI/API as seen in a web browser - Required
#- 10001:10001/udp # Access Point discovery - Recommended
labels:
plugsy.name: WiFi AP [UniFi Controller]
plugsy.category: Network
plugsy.link: http://unifi.o/
##
##_________ACCESS_______
##________________________ VPN-portal [ACCESS/vpn]
VPN-portal:
container_name: vpn-portal
image: ngoduykhanh/wireguard-ui:latest
restart: always
cap_add:
- NET_ADMIN
network_mode: host
environment:
SESSION_SECRET: $WGPORTALSECRET
WGUI_USERNAME: $USER
WGUI_PASSWORD: $PW
WGUI_CONFIG_FILE_PATH: /etc/wireguard/wg0.conf
WGUI_ENDPOINT_ADDRESS: $DOMAIN
WGUI_DNS: $WGIP
WGUI_PERSISTENT_KEEPALIVE: 25
WGUI_SERVER_INTERFACE_ADDRESSES: $WGIP/24
WGUI_SERVER_LISTEN_PORT: $WGPORT
WGUI_SERVER_POST_UP_SCRIPT: $WGPOSTUP
WGUI_SERVER_POST_DOWN_SCRIPT: $WGPOSTDOWN
WGUI_DEFAULT_CLIENT_ALLOWED_IPS: $WGIP/24
WGUI_DEFAULT_CLIENT_EXTRA_ALLOWED_IPS: $LAN_ADDRESS_RANGE
SMTP_HOSTNAME: $SMTP
SMTP_PORT: $SMTPPORT
SMTP_USERNAME: $SMTPUSER
SMTP_PASSWORD: $SMTPPASS
SMTP_AUTH_TYPE: LOGIN
EMAIL_FROM_ADDRESS: $RECIPIENT
EMAIL_FROM_NAME: $SMTPUSER
logging:
driver: json-file
options:
max-size: 15m
volumes:
- $DOCKERDIR/vpn-portal/db:/app/db
- /etc/wireguard:/etc/wireguard
labels:
plugsy.name: VPN Portal [Wireguard UI]
plugsy.link: http://vpn.o/
plugsy.category: System
##
##____________________ Guacamole [CLOUD/remote-desktop]
guacamole:
container_name: guacamole
image: maxwaldorf/guacamole
restart: always
networks:
- web-proxy
environment:
EXTENSIONS: auth-quickconnect,auth-totp # add ,auth-totp if exposed to the internet, for 2FA
volumes:
- $DOCKERDIR/guacamole:/config
labels:
caddy: remote.$DOMAIN
caddy.reverse_proxy: "{{upstreams 8080}}"
plugsy.name: Remote Desktop [Guacamole]
plugsy.link: https://remote.$DOMAIN
plugsy.category: Cloud
##
##________CLOUD______
##_____________________ Firefox Sync [CLOUD/Browser]
# generate secret.txt first see docker-config.sh
firefox-sync:
container_name: firefox-sync
image: crazymax/firefox-syncserver:latest
restart: always
networks:
- web-proxy
environment:
FF_SYNCSERVER_PUBLIC_URL: https://firefox.$DOMAIN
FF_SYNCSERVER_SECRET: $FFSYNCSECRET
FF_SYNCSERVER_FORWARDED_ALLOW_IPS: '*'
FF_SYNCSERVER_FORCE_WSGI_ENVIRON: true
FF_SYNCSERVER_ALLOW_NEW_USERS: false
FF_SYNCSERVER_LOGLEVEL: debug
FF_SYNCSERVER_ACCESSLOG: true
volumes:
- $DOCKERDIR/firefox-sync:/data
labels:
caddy: firefox.$DOMAIN
caddy.reverse_proxy: "{{upstreams 5000}}"
plugsy.name: Browser [Firefox Sync]
plugsy.category: Cloud
##
##_____________________ Bitwarden [CLOUD/Password-manager]
vaultwarden:
container_name: vaultwarden
image: vaultwarden/server
restart: always
healthcheck:
interval: 5m # to test the container, change to 10s. To prevent constant logfile activity, change to a few minutes
networks:
- web-proxy
volumes:
- $DOCKERDIR/vaultwarden:/data
environment:
WEBSOCKET_ENABLED: true
DOMAIN: vault.$DOMAIN
SIGNUPS_ALLOWED: false
ADMIN_TOKEN: $VAULTWARDENTOKEN
labels:
caddy: vault.$DOMAIN
caddy.reverse_proxy_0: "{{upstreams 80}}"
# Required extra headers
caddy.encode: gzip
caddy.header.X-XSS-Protection: '"1; mode=block;"'
caddy.header.X-Frame-Options: "DENY"
caddy.header.X-Content-Type-Options: "none"
caddy.reverse_proxy_1: "/notifications/hub/negotiate {{upstreams 80}}"
caddy.reverse_proxy_2: "/notifications/hub {{upstreams 3012}}"
plugsy.name: Password Manager [Vaultwarden]
plugsy.link: https://vault.$DOMAIN
plugsy.category: Cloud
##
##____________________ FileRun [CLOUD/FileRun]
filerun:
container_name: filerun
image: filerun/filerun
restart: always
networks:
- web-proxy
- filerun
environment:
FR_DB_HOST: filerun-db
FR_DB_PORT: 3306
FR_DB_NAME: filerun-db
FR_DB_USER: $USER
FR_DB_PASS: $PW_DB
APACHE_RUN_USER: $USER
APACHE_RUN_USER_ID: $PUID
APACHE_RUN_GROUP: $USER
APACHE_RUN_GROUP_ID: $PGID
depends_on:
- filerun-db
volumes:
- $DOCKERDIR/filerun/html:/var/www/html
- $DATAPOOL/users:/user-files
- /user-files/snapshots/ # This excludes the $DATAPOOL/users/snapshots folder on your host. Otherwise Filerun will index all those snapshots.
labels:
caddy: drive.$DOMAIN
caddy.reverse_proxy: "{{upstreams 80}}"
caddy.reverse_proxy.header_up: "Host drive.$DOMAIN"
# Required extra headers
caddy.file_server: "" # required for fileservers
caddy.encode: gzip # required for fileservers
caddy.header.Strict-Transport-Security: '"max-age=15768000;"' # Recommended security hardening for fileservers
caddy.header.X-XSS-Protection: '"1; mode=block;"' # Recommended security hardening for fileservers
caddy.header.X-Content-Type-Options: "nosniff" # Seems required to open files in OnlyOffice
caddy.header.X-Frame-Options: "SAMEORIGIN" # Seems required to open files in OnlyOffice
plugsy.name: Drive [FileRun]
plugsy.link: https://drive.$DOMAIN
plugsy.category: Cloud
##____________________ Filerun database [CLOUD/FileRun/db]
filerun-db:
container_name: filerun-db
image: mariadb:10.1
restart: always
networks:
- filerun
environment:
MYSQL_ROOT_PASSWORD: $PW_DB
MYSQL_USER: $USER
MYSQL_PASSWORD: $PW_DB
MYSQL_DATABASE: filerun-db
volumes:
- $DOCKERDIR/filerun/db:/var/lib/mysql
labels:
plugsy.name: FileRun database
plugsy.parents: Drive [FileRun]
##____________________ OnlyOffice for Filerun [CLOUD/FileRun/office]
office:
container_name: office
image: onlyoffice/documentserver
networks:
- web-proxy
- filerun
restart: always
environment:
JWT_ENABLED: true
JWT_SECRET: $ONLYOFFICEJWT
AMQP_URI: amqp://office-rabbitmq:5672
DB_HOST: office-db
DB_PORT: 3306
DB_TYPE: mariadb
DB_NAME: office-db
DB_USER: $USER
DB_PWD: $PW_DB
volumes:
- $DOCKERDIR/filerun/office/data:/var/www/onlyoffice/Data
- $DOCKERDIR/filerun/office/log:/var/log/onlyoffice
- /usr/share/fonts
labels:
caddy: office.$DOMAIN
caddy.reverse_proxy: "{{upstreams 80}}"
caddy.encode: gzip
plugsy.name: OnlyOffice
plugsy.parents: Drive [FileRun]
office-rabbitmq:
container_name: office-rabbitmq
image: rabbitmq
restart: always
networks:
- filerun
volumes:
- $DOCKERDIR/filerun/office/rabbitmq:/var/lib/rabbitmq
labels:
plugsy.name: OnlyOffice rabbitmq
plugsy.parents: Drive [FileRun]
office-db:
container_name: office-db
image: mariadb:latest
networks:
- filerun
restart: always
environment:
MYSQL_ROOT_PASSWORD: $PW_DB
MYSQL_USER: $USER
MYSQL_PASSWORD: $PW_DB
MYSQL_DATABASE: office-db
volumes:
- $DOCKERDIR/filerun/office/db:/var/lib/mysql
labels:
plugsy.name: OnlyOffice database
plugsy.parents: Drive [FileRun]
##________MEDIA________
##_____________________ Jellyfin [MEDIA/Library]
jellyfin:
container_name: jellyfin
image: cr.hotio.dev/hotio/jellyfin
restart: always
networks:
- web-proxy
# Required for Intel QuickSync/VAAPI hardware accelerated video encoding/transcoding
devices:
- /dev/dri/renderD128:/dev/dri/renderD128
- /dev/dri/card0:/dev/dri/card0
environment:
PUID: $PUID
PGID: $PGID
TZ: $TZ
UMASK_SET: 002 #optional
volumes:
- $DOCKERDIR/jellyfin/config:/config
- $DATAPOOL/media:/data
ports:
- 8096:8096
labels:
caddy: http://jellyfin.o
caddy.reverse_proxy: "{{upstreams 8096}}"
plugsy.name: Mediaserver [Jellyfin]
plugsy.link: http://jellyfin.o/
plugsy.category: Media
##____________________ VPN-proxy [MEDIA/vpn-client-for-media]
VPN-proxy:
container_name: VPN-proxy
image: thrnz/docker-wireguard-pia
restart: always
networks:
- web-proxy
cap_add:
- NET_ADMIN
#- SYS_MODULE might not be needed with a 5.6+ kernel?
#- SYS_MODULE
# Mounting the tun device may be necessary for userspace implementations
#devices:
#- /dev/net/tun:/dev/net/tun
privileged: true
sysctls:
# wg-quick fails to set this without --privileged, so set it here instead if needed
- net.ipv4.conf.all.src_valid_mark=1
# May as well disable ipv6. Should be blocked anyway.
- net.ipv6.conf.default.disable_ipv6=1
- net.ipv6.conf.all.disable_ipv6=1
- net.ipv6.conf.lo.disable_ipv6=1
healthcheck:
test: ping -c 1 www.google.com || exit 1
interval: 5m # While testing container, change to 10s. To prevent constant logfile activity, change to a few minutes
timeout: 10s
start_period: 10s
retries: 3
environment:
LOCAL_NETWORK: $LAN_ADDRESS_RANGE,$WGIP/24
LOC: de-frankfurt
USER: $VPN_USER_PIA
PASS: $VPN_PW_PIA
#KEEPALIVE: 25
VPNDNS: 192.168.88.1
PORT_FORWARDING: 1
PORT_PERSIST: 0
PORT_SCRIPT: /pia-shared/updateport-qb.sh
#WG_USERSPACE: 1
volumes:
# Auth token is stored here
- $DOCKERDIR/vpn-proxy/pia:/pia
# If enabled, the forwarded port is dumped to /pia-shared/port.dat for potential use in other containers
- $DOCKERDIR/vpn-proxy/pia-shared:/pia-shared
# The container has no recovery logic. Use a healthcheck to catch disconnects.
ports:
- 9090:8080 #Qbittorrent webUI
labels:
caddy: http://downloads.o
caddy.reverse_proxy: "{{upstreams 8080}}"
plugsy.name: VPN-Proxy [PIA]
plugsy.parents: Downloads [QBittorrent]
##
##____________________ Transmission [MEDIA/download-client]
qbittorrent:
container_name: qbittorrent
image: cr.hotio.dev/hotio/qbittorrent
depends_on:
- VPN-proxy
network_mode: service:VPN-proxy
restart: always
environment:
PUID: $PUID
PGID: $PGID
TZ: $TZ
volumes:
- $DOCKERDIR/qbittorrent:/config
- $DATAPOOL/media/incoming:/Media/incoming
labels:
plugsy.name: Downloads [QBittorrent]
plugsy.link: http://downloads.o/
plugsy.category: Media
##
##____________________ Prowlarr [MEDIA/torrent-proxy for Sonarr&Radarr]
prowlarr:
container_name: prowlarr
image: cr.hotio.dev/hotio/prowlarr:testing
networks:
- web-proxy
depends_on:
- qbittorrent
restart: always
environment:
PUID: $PUID
PGID: $PGID
UMASK: 002
TZ: $TZ
volumes:
- $DOCKERDIR/prowlarr/config:/config
- $DATAPOOL/media/incoming:/Media/incoming
ports:
- 9696:9696
labels:
caddy: http://torrents.o
caddy.reverse_proxy: "{{upstreams 9696}}"
plugsy.name: Search [Prowlarr]
plugsy.link: http://torrents.o/
plugsy.category: Media
##
##____________________ Sonarr [MEDIA/PVR-TVshows]
sonarr:
container_name: sonarr
image: cr.hotio.dev/hotio/sonarr
networks:
- web-proxy
depends_on:
- prowlarr
- qbittorrent
restart: always
environment:
PUID: $PUID
PGID: $PGID
UMASK: 002
TZ: $TZ
volumes:
- $DOCKERDIR/sonarr/config:/config
- $DATAPOOL/media:/Media
ports:
- 8989:8989
labels:
caddy: sonarr.o
caddy.reverse_proxy: "{{upstreams 8989}}"
plugsy.name: Series [Sonarr]
plugsy.link: http://sonarr.o/
plugsy.category: Media
##
##____________________ Radarr [MEDIA/PVR-Movies]
radarr:
container_name: radarr
image: cr.hotio.dev/hotio/radarr
networks:
- web-proxy
depends_on:
- prowlarr
- qbittorrent
restart: always
environment:
PUID: $PUID
PGID: $PGID
UMASK: 002
TZ: $TZ
volumes:
- $DOCKERDIR/radarr/config:/config
- $DATAPOOL/media:/Media
ports:
- 7878:7878
labels:
caddy: radarr.o
caddy.reverse_proxy: "{{upstreams 7878}}"
plugsy.name: Movies [Radarr]
plugsy.link: http://radarr.o/
plugsy.category: Media
##
##____________________ Bazarr [MEDIA/subtitles]
bazarr:
container_name: bazarr
image: cr.hotio.dev/hotio/bazarr
networks:
- web-proxy
depends_on:
- sonarr
- radarr
restart: always
environment:
PUID: $PUID
PGID: $PGID
UMASK: 002
TZ: $TZ
volumes:
- $DOCKERDIR/bazarr/config:/config
- $DATAPOOL/media:/Media
ports:
- 6767:6767
labels:
caddy: http://bazarr.o
caddy.reverse_proxy: "{{upstreams 6767}}"
plugsy.name: Subs [Bazarr]
plugsy.link: http://bazarr.o/
plugsy.category: Media
##
##____________________ Lidarr [MEDIA/PVR-Music]
lidarr:
container_name: lidarr
image: cr.hotio.dev/hotio/lidarr
networks:
- web-proxy
depends_on:
- prowlarr
- qbittorrent
restart: always
environment:
PUID: $PUID
PGID: $PGID
UMASK: 002
TZ: $TZ
volumes:
- $DOCKERDIR/lidarr/config:/config
- $DATAPOOL/media:/Media
ports:
- 8686:8686
labels:
caddy: http://lidarr.o
caddy.reverse_proxy: "{{upstreams 8686}}"
plugsy.name: Music [Lidarr]
plugsy.link: http://lidarr.o/
plugsy.category: Media
#
networks:
web-proxy:
external: true
filerun:
driver: bridge
unbound:
driver: bridge