-
-
Notifications
You must be signed in to change notification settings - Fork 105
/
Copy pathdocker-compose.yml
200 lines (189 loc) · 5.72 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
# This is a full-featured example for running bitmagnet through a gluetun VPN connection with Grafana/Prometheus observability services.
# See https://bitmagnet.io/setup/installation.html for a minimal docker-compose example.
services:
bitmagnet:
container_name: bitmagnet
image: ghcr.io/bitmagnet-io/bitmagnet:latest
volumes:
- ./config:/root/.config/bitmagnet
# Mount data folder (currently only used for logs when file rotation is enabled):
- ./data/bitmagnet:/root/.local/share/bitmagnet
restart: unless-stopped
environment:
# Enable logging to rotating files for ingest to Loki:
- LOG_FILE_ROTATOR_ENABLED=true
- POSTGRES_HOST=postgres
- POSTGRES_PASSWORD=postgres
# - TMDB_API_KEY=your_api_key
network_mode: service:gluetun
depends_on:
postgres:
condition: service_healthy
command:
- worker
- run
# Run all workers:
- --all
# Or enable individual workers:
# - --keys=http_server
# - --keys=queue_server
# - --keys=dht_crawler
gluetun:
container_name: bitmagnet-gluetun
image: qmcgaw/gluetun:latest
cap_add:
- NET_ADMIN
ports:
# The bitmagnet ports must be exposed by the gluetun container:
- "3333:3333"
# BitTorrent ports:
- "3334:3334/tcp"
- "3334:3334/udp"
environment:
# Put your personal gluetun/VPN account config and credentials here:
# (See https://github.com/qdm12/gluetun-wiki/tree/main/setup/providers)
- VPN_SERVICE_PROVIDER=your_vpn_provider
devices:
# See https://github.com/qdm12/gluetun/issues/2538
- /dev/net/tun:/dev/net/tun
restart: always
# Host names must be manually mapped here for bitmagnet to resolve them:
extra_hosts:
- "postgres:192.168.55.11"
networks:
bitmagnet:
ipv4_address: 192.168.55.10
postgres:
image: postgres:16-alpine
container_name: bitmagnet-postgres
volumes:
- ./data/postgres:/var/lib/postgresql/data
networks:
bitmagnet:
ipv4_address: 192.168.55.11
ports:
- "5432:5432"
shm_size: 1g
restart: unless-stopped
environment:
- POSTGRES_PASSWORD=postgres
- POSTGRES_DB=bitmagnet
- PGUSER=postgres
healthcheck:
test: ["CMD-SHELL", "pg_isready"]
interval: 10s
start_period: 20s
# Optional observability services:
# (ensure you have the config files in the `observability` directory in your project root)
# Visualization and dashboards:
grafana:
container_name: bitmagnet-grafana
image: grafana/grafana:latest
environment:
- GF_PATHS_PROVISIONING=/etc/grafana/provisioning
- GF_AUTH_ANONYMOUS_ENABLED=true
- GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
volumes:
- ./data/grafana:/var/lib/grafana
# Provisioned data sources and dashboards:
- ./observability/grafana.datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml
- ./observability/grafana.dashboards.yaml:/etc/grafana/provisioning/dashboards/dashboards.yaml
- ./observability/grafana-dashboards:/etc/dashboards
ports:
- "3000:3000"
networks:
- bitmagnet
# Metrics and logs scraping:
grafana-agent:
container_name: bitmagnet-grafana-agent
image: grafana/agent:latest
volumes:
- ./data/bitmagnet/logs:/var/log/bitmagnet
# Mount the logs and metrics scraping configurations:
- ./observability/grafana-agent.config.river:/etc/agent-config/config.river
command:
- run
- /etc/agent-config/config.river
- --server.http.listen-addr=0.0.0.0:12345
environment:
HOSTNAME: grafana-agent
AGENT_MODE: flow
LOKI_HOST: loki:3100
POSTGRES_HOST: postgres:5432
REDIS_HOST: redis:6379
# The bitmagnet host must be mapped to the gluetun container:
extra_hosts:
- "bitmagnet:192.168.55.10"
ports:
- "12345:12345"
depends_on:
- gluetun
- loki
networks:
- bitmagnet
# Metrics storage and aggregation:
prometheus:
image: prom/prometheus:latest
container_name: bitmagnet-prometheus
command:
- "--config.file=/etc/prometheus/prometheus.yml"
ports:
- 9090:9090
# The bitmagnet host must be mapped to the gluetun container:
extra_hosts:
- "bitmagnet:192.168.55.10"
restart: unless-stopped
volumes:
- ./data/prometheus:/prometheus
- ./observability/prometheus.config.yaml:/etc/prometheus/prometheus.yml
networks:
- bitmagnet
# Log storage and aggregation:
loki:
container_name: bitmagnet-loki
image: grafana/loki:latest
ports:
- "3100:3100"
volumes:
- ./data/loki:/loki
- ./observability/loki.config.yaml:/etc/loki/local-config.yaml
command: -config.file=/etc/loki/local-config.yaml
networks:
- bitmagnet
# Runtime profiling:
pyroscope:
container_name: bitmagnet-pyroscope
image: grafana/pyroscope:latest
deploy:
restart_policy:
condition: on-failure
ports:
- "4040:4040"
command:
- "server"
volumes:
- ./observability/pyroscope.config.yaml:/etc/pyroscope/server.yaml
- ./data/pyroscope:/var/lib/pyroscope
networks:
- bitmagnet
# Prometheus metrics for Postgres:
postgres-exporter:
image: prometheuscommunity/postgres-exporter:latest
container_name: bitmagnet-postgres-exporter
ports:
- 9187:9187
environment:
DATA_SOURCE_NAME: "postgresql://postgres:postgres@postgres:5432/bitmagnet?sslmode=disable"
links:
- postgres
- prometheus
networks:
- bitmagnet
networks:
bitmagnet:
driver: bridge
ipam:
driver: default
config:
- subnet: 192.168.55.0/24
gateway: 192.168.55.1