Skip to content

Commit

Permalink
Add production Docker configuration
Browse files Browse the repository at this point in the history
  • Loading branch information
Sébastien Dunne Fulmer authored and philipbelesky committed Dec 16, 2020
1 parent 75e12c6 commit 3570957
Show file tree
Hide file tree
Showing 9 changed files with 266 additions and 14 deletions.
4 changes: 2 additions & 2 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ ENV PYTHONUNBUFFERED 1

# Setup Node/NPM
RUN apt-get update
RUN apt-get install -y curl
RUN apt-get install -y curl nginx
RUN curl -sL https://deb.nodesource.com/setup_12.x | bash -
RUN apt-get install -y nodejs

Expand All @@ -24,7 +24,7 @@ RUN git config --global url."https://".insteadOf git://

# Install our node/python requirements
RUN npm install -g [email protected]
RUN pip install -r ./config/requirements_core.txt
RUN pip install -r ./config/requirements_docker.txt
RUN npm install --only=production

# Compile all the static files
Expand Down
12 changes: 12 additions & 0 deletions ProcfileMulti.docker
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# This is the 'actual' Procfile run by heroku (via Honcho)

# Nginx is the reverse proxy
# This allows static files to be served directly and to split traffic between
# the WSGI and ASGI servers
nginx: nginx -p . -c ./config/nginx.conf

# WSGI server handles the standard django routes; optimised for high performance
wsgi: gunicorn wsgi:application --config './config/gunicorn.conf'

# ASGI server handles the asychronous routes (websockets)
asgi: python ./tabbycat/run-asgi.py
14 changes: 14 additions & 0 deletions bin/docker-run-honcho.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
#!/usr/bin/env bash
# Shorthand to migrate and run honcho in docker

cd tabbycat

# Migrate (can't do it during build; no db connnection)
python ./manage.py migrate --no-input

# Needed to ensure daphne works properly
rm -f /tmp/asgi.socket /tmp/asgi.socket.lock

# Run honcho
cd ..
honcho -f ./ProcfileMulti.docker start
7 changes: 7 additions & 0 deletions bin/docker-run-worker.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
#!/usr/bin/env bash
# Shorthand to run django worker in docker

cd tabbycat

# Run worker
python ./manage.py runworker notifications adjallocation venues
120 changes: 120 additions & 0 deletions config/nginx.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
# This is customised from https://github.com/heroku/heroku-buildpack-nginx.git
# Done so in order to properly proxy to both and asgi and wasgi server
# Super useful template:
# https://github.com/CLClark/fcc-stock-trading-app/blob/9017f001255718c2e0fd24eb8267df02267d6cd8/config/nginx.conf.erb

daemon off;
# Heroku dynos have at least 4 cores.
worker_processes 4;

events {
use epoll;
accept_mutex on;
worker_connections 1024;
}

http {
gzip on;
gzip_comp_level 4;
gzip_min_length 1100;
gzip_proxied any;
gzip_types
text/css
text/javascript
text/xml
text/plain
application/javascript
application/x-javascript
application/json;

server_tokens off;

log_format l2met 'measure#nginx.service=$request_time request_id=$http_x_request_id';
access_log /dev/stdout;
error_log /dev/stdout info;

include /etc/nginx/mime.types;
default_type application/octet-stream;
sendfile on;

# Must read the body in 5 seconds.
client_body_timeout 5;

upstream wsgi_server {
server unix:/tmp/wsgi.socket fail_timeout=0;
}

upstream asgi_server {
server unix:/tmp/asgi.socket;
}

# As per https://www.nginx.com/blog/websocket-nginx/
map $http_upgrade $connection_upgrade {
default upgrade;
'' close; # TODO; try delete
}

# As per https://github.com/varspool/Wrench/issues/100
# This is needed to maintain the websocket connection; otherwise
# it closes in the limit defined by keepalive_timeout
# DOCS: Defines a timeout for reading a response from the proxied server. The timeout is set only between two successive read operations, not for the transmission of the whole response. If the proxied server does not transmit anything within this time, the connection is closed.
# In practice this seems to be essentially the maximum time a socket will live for
# Sending data seems to reset this process. Unclear what the interaction is
# with the the keepalive_timeout && # Enabling keep-alive timeouts below
proxy_read_timeout 1800s; # 30m

server {
listen 8000;
server_name _;
# Set as-is this stops websocket connection for lasting as long as specified TODO: return to default once other settings compensate?
# DOCS: A timeout during which a keep-alive client connection will stay open on the server side
keepalive_timeout 6000; # 100m

# Actual websocket paths to listen on
location /ws/ {
try_files $uri @proxy_to_ws;
}

location @proxy_to_ws {
# Repeating earlier Uvicorn config
# Don't set X-Forwarded-Port; sets infinite redirects
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;

proxy_redirect off;
proxy_buffering off;
proxy_pass http://asgi_server;

# Websocket specific changes
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;

# Enabling keep-alive
# As per https://ma.ttias.be/enable-keepalive-connections-in-nginx-upstream-proxy-configurations/
# Set at 100minutes — may mean that passive pages which haven't
# received any socket data need to be refreshed after that time.
proxy_read_timeout 6000; # 100m
proxy_connect_timeout 6000; # 100m
proxy_set_header Connection "";
}

# Serve static files directly from the directory
location /static/ {
alias /tcd/tabbycat/staticfiles/;
autoindex on;
access_log off;
add_header Cache-Control "public";
expires 7d;
etag on;
}

# Pass to wsgi server
location / {
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_redirect off;
proxy_pass http://wsgi_server;
}
}
}
8 changes: 8 additions & 0 deletions config/requirements_docker.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
-r requirements_core.txt

# Serving
honcho==1.0.1 # Allows a Procfile to run multiple processes
gunicorn==20.0.4 # Primary server for wsgi

# Cache
django-redis==4.12.1 # Use redis for cache (on heroku; local optional)
65 changes: 65 additions & 0 deletions docker-compose.prod.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
# Docker-compose is a way to run multiple containers at once and connect them
# This sets up and runs postgres, redis, honcho and the django worker as services
# Reference: https://docs.docker.com/compose/compose-file/

# Initial setup with
# $ docker-compose up

# Can run management commands with
# $ docker-compose run web /code/manage.py whatever

version: '3'

services:
db:
image: postgres:12
environment:
- POSTGRES_PASSWORD=tabbycat
- POSTGRES_USER=tabbycat
- POSTGRES_DB=tabbycat
volumes:
- pgdata:/var/lib/postgresql/data

redis:
image: redis:6
volumes:
- redis_data:/data

web:
build: .
image: tabbycat:2.6.0-dev
# Hack to wait until Postgres is up before running things
command: ["./bin/docker-wait.sh", "--timeout=0", "db:5432", "--", "./bin/docker-run-honcho.sh"]
depends_on:
- db
- redis
expose:
- "8000"
environment:
- DISABLE_SENTRY=1
- DOCKER_REDIS=1
- IN_DOCKER=1
ports:
- "127.0.0.1:8000:8000"
volumes:
- ./tabbycat/settings:/tcd/tabbycat/settings
working_dir: /tcd

worker:
image: tabbycat:2.6.0-dev
# Hack to wait until migration is done before running things
command: ["./bin/docker-wait.sh", "--timeout=0", "web:8000", "--", "./bin/docker-run-worker.sh"]
depends_on:
- db
- redis
environment:
- DISABLE_SENTRY=1
- DOCKER_REDIS=1
- IN_DOCKER=1
volumes:
- ./tabbycat/settings:/tcd/tabbycat/settings
working_dir: /tcd

volumes:
pgdata:
redis_data:
12 changes: 7 additions & 5 deletions tabbycat/asgi.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,19 +7,21 @@
import os

import django
import sentry_sdk

from channels.routing import get_default_application
from sentry_sdk.integrations.asgi import SentryAsgiMiddleware
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
from sentry_sdk.integrations.redis import RedisIntegration

from settings.core import TABBYCAT_VERSION

os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")

if not os.environ.get('DISABLE_SENTRY'):
import sentry_sdk

from sentry_sdk.integrations.asgi import SentryAsgiMiddleware
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
from sentry_sdk.integrations.redis import RedisIntegration

sentry_sdk.init(
dsn="https://[email protected]/185382",
integrations=[
Expand Down
38 changes: 31 additions & 7 deletions tabbycat/settings/docker.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,40 @@
# Docker
# ==============================================================================

DEBUG = True # Just to be sure
import os

ALLOWED_HOSTS = ["*"]

DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'tabbycat',
'USER': 'tabbycat',
'PASSWORD': 'tabbycat',
'HOST': 'db',
'PORT': 5432, # Non-standard to prevent collisions,
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'tabbycat',
'USER': 'tabbycat',
'PASSWORD': 'tabbycat',
'HOST': 'db',
'PORT': 5432, # Non-standard to prevent collisions,
}
}

if bool(int(os.environ['DOCKER_REDIS'])) if 'DOCKER_REDIS' in os.environ else False:
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://redis:6379/1",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"SOCKET_CONNECT_TIMEOUT": 5,
"SOCKET_TIMEOUT": 60,
},
},
}

CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels_redis.core.RedisChannelLayer",
"CONFIG": {
"hosts": [("redis", 6379)],
"group_expiry": 10800,
},
},
}

0 comments on commit 3570957

Please sign in to comment.