From 6577b61ef51fef7ce3b0d0ea5e1a64f8939e96ab Mon Sep 17 00:00:00 2001 From: Josue Date: Wed, 22 Mar 2023 22:38:21 -0400 Subject: [PATCH] refactor: remove supabase, add postgres, fixed tests Co-authored-by: Josue --- .github/workflows/e2e-tests-ci.yml | 2 +- .gitignore | 3 + config/env.development | 6 +- config/env.production | 2 +- config/env.staging | 2 +- docker/development.yml | 14 +- docker/docker-compose.yml | 7 + docker/production.yml | 5 + docker/supabase/.gitignore | 5 - docker/supabase/backup/Dockerfile | 13 -- .../supabase/backup/cron-tasks/create-backup | 9 - docker/supabase/backup/restore-backup.sh | 27 --- docker/supabase/dev/data.sql | 48 ----- docker/supabase/dev/docker-compose.dev.yml | 20 --- docker/supabase/docker-compose.yml | 164 ------------------ docker/supabase/supabase-development.yml | 31 ---- docker/supabase/supabase-production.yml | 25 --- docker/supabase/volumes/api/kong.yml | 148 ---------------- .../volumes/db/init/00-initial-schema.sql | 48 ----- .../volumes/db/init/01-auth-schema.sql | 145 ---------------- .../volumes/db/init/02-storage-schema.sql | 116 ------------- .../volumes/db/init/03-post-setup.sql | 68 -------- pnpm-lock.yaml | 4 + src/api/parser/Dockerfile | 6 +- src/api/parser/env.local | 2 +- src/api/parser/package.json | 5 +- src/api/parser/schema.prisma | 81 +++++++++ src/api/parser/src/data/feed.js | 2 +- src/api/parser/src/parser.js | 2 +- .../utils/__mocks__/{supabase.js => db.js} | 12 +- src/api/parser/src/utils/db.js | 155 +++++++++++++++++ src/api/parser/src/utils/storage.js | 2 +- src/api/parser/src/utils/supabase.js | 123 ------------- src/api/parser/test/e2e/parser-flow.test.js | 2 +- src/api/parser/test/feed-processor.test.js | 4 +- src/api/parser/test/feed.test.js | 4 +- src/api/parser/test/post.test.js | 4 +- src/api/parser/test/storage.test.js | 4 +- 38 files changed, 299 insertions(+), 1021 deletions(-) delete mode 100644 docker/supabase/.gitignore delete mode 100644 docker/supabase/backup/Dockerfile delete mode 100755 docker/supabase/backup/cron-tasks/create-backup delete mode 100755 docker/supabase/backup/restore-backup.sh delete mode 100644 docker/supabase/dev/data.sql delete mode 100644 docker/supabase/dev/docker-compose.dev.yml delete mode 100644 docker/supabase/docker-compose.yml delete mode 100644 docker/supabase/supabase-development.yml delete mode 100644 docker/supabase/supabase-production.yml delete mode 100644 docker/supabase/volumes/api/kong.yml delete mode 100644 docker/supabase/volumes/db/init/00-initial-schema.sql delete mode 100644 docker/supabase/volumes/db/init/01-auth-schema.sql delete mode 100644 docker/supabase/volumes/db/init/02-storage-schema.sql delete mode 100644 docker/supabase/volumes/db/init/03-post-setup.sql create mode 100644 src/api/parser/schema.prisma rename src/api/parser/src/utils/__mocks__/{supabase.js => db.js} (95%) create mode 100644 src/api/parser/src/utils/db.js delete mode 100644 src/api/parser/src/utils/supabase.js diff --git a/.github/workflows/e2e-tests-ci.yml b/.github/workflows/e2e-tests-ci.yml index cc940502ee..223539fa5d 100644 --- a/.github/workflows/e2e-tests-ci.yml +++ b/.github/workflows/e2e-tests-ci.yml @@ -12,7 +12,7 @@ jobs: node-version: [16.x, 18.x] env: # Whichever Docker containers are needed to pass e2e tests, define them here - DOCKER_CONTAINERS: 'sso feed-discovery auth login kong rest meta elasticsearch redis posts traefik' + DOCKER_CONTAINERS: 'sso feed-discovery login elasticsearch redis posts traefik' # Postgres database connection string to run db migration DATABASE_URL: 'postgresql://postgres:your-super-secret-and-long-postgres-password@localhost/postgres' diff --git a/.gitignore b/.gitignore index 14839c0b22..902013fb9f 100644 --- a/.gitignore +++ b/.gitignore @@ -41,6 +41,9 @@ certs/ # Redis data storage redis-data/ +# Postgres +db + .pnpm-debug.log # Turborepo diff --git a/config/env.development b/config/env.development index 14a5187fff..1e478d4d15 100644 --- a/config/env.development +++ b/config/env.development @@ -15,7 +15,7 @@ DOCKER_BUILDKIT=1 # so it will work on Windows and Unix, see # https://docs.docker.com/compose/reference/envvars/#compose_file COMPOSE_PATH_SEPARATOR=; -COMPOSE_FILE=docker/docker-compose.yml;docker/development.yml;docker/supabase/docker-compose.yml;docker/supabase/supabase-development.yml +COMPOSE_FILE=docker/docker-compose.yml;docker/development.yml # The host where the Telescope 1.0 front-end and back-end are run. @@ -262,7 +262,7 @@ MAX_POSTS_PER_PAGE=5 # Supabase Services ################################################################################ -SUPABASE_URL=http://localhost/v1/supabase +SUPABASE_URL=postgresql://postgres:your-super-secret-and-long-postgres-password@db/postgres # Auth @@ -303,4 +303,4 @@ KONG_HTTPS_PORT=8912 POSTGRES_PORT=8913 ## DB connection string -DATABASE_URL=postgresql://postgres:your-super-secret-and-long-postgres-password@localhost/postgres +DATABASE_URL=postgresql://postgres:your-super-secret-and-long-postgres-password@db/postgres diff --git a/config/env.production b/config/env.production index 3f9b32f44e..af73bb78d8 100644 --- a/config/env.production +++ b/config/env.production @@ -15,7 +15,7 @@ DOCKER_BUILDKIT=1 # so it will work on Windows and Unix, see # https://docs.docker.com/compose/reference/envvars/#compose_file COMPOSE_PATH_SEPARATOR=; -COMPOSE_FILE=docker/docker-compose.yml;docker/production.yml;docker/supabase/docker-compose.yml;docker/supabase/supabase-production.yml +COMPOSE_FILE=docker/docker-compose.yml;docker/production.yml # The host where the Telescope 1.0 front-end and back-end are run. diff --git a/config/env.staging b/config/env.staging index 14212c9f0c..7f8aef7731 100644 --- a/config/env.staging +++ b/config/env.staging @@ -15,7 +15,7 @@ DOCKER_BUILDKIT=1 # so it will work on Windows and Unix, see # https://docs.docker.com/compose/reference/envvars/#compose_file COMPOSE_PATH_SEPARATOR=; -COMPOSE_FILE=docker/docker-compose.yml;docker/production.yml;docker/supabase/docker-compose.yml;docker/supabase/supabase-production.yml +COMPOSE_FILE=docker/docker-compose.yml;docker/production.yml # The host where the Telescope 1.0 front-end and back-end are run. diff --git a/docker/development.yml b/docker/development.yml index f7985ed65d..6255d09786 100644 --- a/docker/development.yml +++ b/docker/development.yml @@ -77,10 +77,6 @@ services: context: ../src/api/parser cache_from: - docker.cdot.systems/parser:buildcache - environment: - # In development and testing, the Parser service needs to contact the Supabase - # service directly via Docker vs through the http://localhost/v1/supabase domain. - - SUPABASE_URL=http://kong:8000 depends_on: - elasticsearch - traefik @@ -108,10 +104,6 @@ services: context: ../src/api/sso cache_from: - docker.cdot.systems/sso:buildcache - environment: - # In development and testing, the SSO service needs to contact the Supabase - # service directly via Docker vs through the http://localhost/v1/supabase domain. - - SUPABASE_URL=http://kong:8000 depends_on: - test-web-content - traefik @@ -140,3 +132,9 @@ services: context: ../src/api/dependency-discovery cache_from: - docker.cdot.systems/dependency-discovery:buildcache + + db: + ports: + - '5432:5432' + volumes: + - ../db:/var/lib/postgresql/data diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 5c6df3cf08..c0113ba800 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -331,3 +331,10 @@ services: - 'traefik.http.middlewares.rss_bridge_prefix.stripprefix.prefixes=/${API_VERSION}/rss-bridge' # Add our middleware to the router - 'traefik.http.routers.rss_bridge.middlewares=rss_bridge_redirect,rss_bridge_prefix' + + db: + image: postgres:15-alpine + container_name: 'db' + restart: unless-stopped + environment: + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} diff --git a/docker/production.yml b/docker/production.yml index a7cff8ed42..0c7549c1d9 100644 --- a/docker/production.yml +++ b/docker/production.yml @@ -169,3 +169,8 @@ services: ] depends_on: - studio + + db: + volumes: + # We keep the actual data in a volume outside of git, so it survives restarts + - ../../supabase/volumes/db/data:/var/lib/postgresql/data diff --git a/docker/supabase/.gitignore b/docker/supabase/.gitignore deleted file mode 100644 index e5c6762c07..0000000000 --- a/docker/supabase/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -volumes/db/data -volumes/db/init/data.sql -volumes/storage -.env -test.http diff --git a/docker/supabase/backup/Dockerfile b/docker/supabase/backup/Dockerfile deleted file mode 100644 index 7ffe6852aa..0000000000 --- a/docker/supabase/backup/Dockerfile +++ /dev/null @@ -1,13 +0,0 @@ -FROM alpine:3.15 - -# Install the Postgres client utilities -RUN apk add --no-cache postgresql14-client - -WORKDIR /backup-scripts - -COPY ./restore-backup.sh ./ - -# Create directory to hold the database dumps -RUN mkdir /var/opt/pg_dumps - -CMD ["crond", "-f", "-l", "8"] diff --git a/docker/supabase/backup/cron-tasks/create-backup b/docker/supabase/backup/cron-tasks/create-backup deleted file mode 100755 index a0edb76f9d..0000000000 --- a/docker/supabase/backup/cron-tasks/create-backup +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/sh - -TIMESTAMP="$(date +%Y-%m-%d-%H%M%S)" - -pg_dump --format=custom \ - --host=$PG_HOST_NAME \ - --port=$PG_HOST_PORT \ - --dbname="dbname=$PG_DB_NAME password=$PG_USER_PASSWORD" \ - --username=$PG_USER_NAME > "/var/opt/pg_dumps/$TIMESTAMP.dump" diff --git a/docker/supabase/backup/restore-backup.sh b/docker/supabase/backup/restore-backup.sh deleted file mode 100755 index 1d1cb70dd9..0000000000 --- a/docker/supabase/backup/restore-backup.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/sh - -# Usage of the script: -# -# ./restore-backup.sh -# e.g. -# ./restore-backup.sh /home/user/backups/Apr-02-2022-backup.dump - -BACKUP_FILE="$1" - -# The command may not drop and recreate the -# database if you do not close all connections -# available to it -# -# Most of the time, it is safe to get a few errors -# because the script will try to create tables that -# already exist. However, to avoid any weird -# complications, it is recommended that you drop and -# recreate the database. If you are using this, -# we already have backups, so you shouldn't worry -# too much if you drop the database. -pg_restore --format=custom \ - --clean \ - --if-exists \ - --enable-row-security \ - --disable-triggers \ - --dbname="host=$PG_HOST_NAME port=$PG_HOST_PORT dbname=$PG_DB_NAME user=$PG_USER_NAME password=$PG_USER_PASSWORD" < "$BACKUP_FILE" diff --git a/docker/supabase/dev/data.sql b/docker/supabase/dev/data.sql deleted file mode 100644 index 2328004184..0000000000 --- a/docker/supabase/dev/data.sql +++ /dev/null @@ -1,48 +0,0 @@ -create table profiles ( - id uuid references auth.users not null, - updated_at timestamp with time zone, - username text unique, - avatar_url text, - website text, - - primary key (id), - unique(username), - constraint username_length check (char_length(username) >= 3) -); - -alter table profiles enable row level security; - -create policy "Public profiles are viewable by the owner." - on profiles for select - using ( auth.uid() = id ); - -create policy "Users can insert their own profile." - on profiles for insert - with check ( auth.uid() = id ); - -create policy "Users can update own profile." - on profiles for update - using ( auth.uid() = id ); - --- Set up Realtime -begin; - drop publication if exists supabase_realtime; - create publication supabase_realtime; -commit; -alter publication supabase_realtime add table profiles; - --- Set up Storage -insert into storage.buckets (id, name) -values ('avatars', 'avatars'); - -create policy "Avatar images are publicly accessible." - on storage.objects for select - using ( bucket_id = 'avatars' ); - -create policy "Anyone can upload an avatar." - on storage.objects for insert - with check ( bucket_id = 'avatars' ); - -create policy "Anyone can update an avatar." - on storage.objects for update - with check ( bucket_id = 'avatars' ); diff --git a/docker/supabase/dev/docker-compose.dev.yml b/docker/supabase/dev/docker-compose.dev.yml deleted file mode 100644 index 8ea3772b4d..0000000000 --- a/docker/supabase/dev/docker-compose.dev.yml +++ /dev/null @@ -1,20 +0,0 @@ -version: '3.8' - -services: - mail: - container_name: supabase-mail - image: inbucket/inbucket:stable - ports: - - '2500:2500' # SMTP - - '9000:9000' # web interface - - '1100:1100' # POP3 - meta: - ports: - - 5555:8080 - db: - volumes: - - /var/lib/postgresql/data - - ./supabase/dev/data.sql:/docker-entrypoint-initdb.d/data.sql - storage: - volumes: - - /var/lib/storage diff --git a/docker/supabase/docker-compose.yml b/docker/supabase/docker-compose.yml deleted file mode 100644 index 86d8646b71..0000000000 --- a/docker/supabase/docker-compose.yml +++ /dev/null @@ -1,164 +0,0 @@ -# This is the main Supabase container definition, and is meant to -# be run with one of supabase-development.yml or supabase-production.yml. - -services: - studio: - container_name: supabase-studio - image: supabase/studio:latest - restart: unless-stopped - environment: - SUPABASE_URL: http://kong:8000 - STUDIO_PG_META_URL: http://meta:8080 - SUPABASE_REST_URL: ${SUPABASE_URL}/rest/v1/ - SUPABASE_ANON_KEY: ${ANON_KEY} - SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY} - - kong: - container_name: supabase-kong - image: kong:2.1 - restart: unless-stopped - environment: - KONG_DATABASE: 'off' - KONG_DECLARATIVE_CONFIG: /var/lib/kong/kong.yml - # https://github.com/supabase/cli/issues/14 - KONG_DNS_ORDER: LAST,A,CNAME - KONG_PLUGINS: request-transformer,cors,key-auth,acl - depends_on: - - traefik - labels: - # Enable Traefik - - 'traefik.enable=true' - # Traefik routing for the kong service at /v1/supabase - - 'traefik.http.routers.kong.rule=PathPrefix(`/${API_VERSION}/supabase`)' - # Specify the kong service port - - 'traefik.http.services.kong.loadbalancer.server.port=8000' - # Add middleware to this route to strip the /v1/supabase prefix - - 'traefik.http.middlewares.strip_kong_prefix.stripprefix.prefixes=/${API_VERSION}/supabase' - - 'traefik.http.middlewares.strip_kong_prefix.stripprefix.forceSlash=true' - - 'traefik.http.routers.kong.middlewares=strip_kong_prefix' - - auth: - container_name: supabase-auth - image: supabase/gotrue:v2.5.22 - depends_on: - - db - restart: unless-stopped - environment: - GOTRUE_API_HOST: 0.0.0.0 - GOTRUE_API_PORT: 9999 - - GOTRUE_DB_DRIVER: postgres - GOTRUE_DB_DATABASE_URL: postgres://postgres:${POSTGRES_PASSWORD}@db:5432/postgres?search_path=auth - - GOTRUE_SITE_URL: ${SITE_URL} - GOTRUE_URI_ALLOW_LIST: ${ADDITIONAL_REDIRECT_URLS} - GOTRUE_DISABLE_SIGNUP: ${DISABLE_SIGNUP} - - GOTRUE_JWT_SECRET: ${JWT_SECRET} - GOTRUE_JWT_EXP: ${JWT_EXPIRY} - GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated - - GOTRUE_EXTERNAL_EMAIL_ENABLED: ${ENABLE_EMAIL_SIGNUP} - GOTRUE_MAILER_AUTOCONFIRM: ${ENABLE_EMAIL_AUTOCONFIRM} - GOTRUE_SMTP_ADMIN_EMAIL: ${SMTP_ADMIN_EMAIL} - GOTRUE_SMTP_HOST: ${SMTP_HOST} - GOTRUE_SMTP_PORT: ${SMTP_PORT} - GOTRUE_SMTP_USER: ${SMTP_USER} - GOTRUE_SMTP_PASS: ${SMTP_PASS} - GOTRUE_SMTP_SENDER_NAME: ${SMTP_SENDER_NAME} - GOTRUE_MAILER_URLPATHS_INVITE: /auth/v1/verify - GOTRUE_MAILER_URLPATHS_CONFIRMATION: /auth/v1/verify - GOTRUE_MAILER_URLPATHS_RECOVERY: /auth/v1/verify - GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: /auth/v1/verify - - GOTRUE_EXTERNAL_PHONE_ENABLED: ${ENABLE_PHONE_SIGNUP} - GOTRUE_SMS_AUTOCONFIRM: ${ENABLE_PHONE_AUTOCONFIRM} - - rest: - container_name: supabase-rest - image: postgrest/postgrest:v9.0.0 - depends_on: - - db - restart: unless-stopped - environment: - PGRST_DB_URI: postgres://postgres:${POSTGRES_PASSWORD}@db:5432/postgres - PGRST_DB_SCHEMAS: public,storage - PGRST_DB_ANON_ROLE: anon - PGRST_JWT_SECRET: ${JWT_SECRET} - PGRST_DB_USE_LEGACY_GUCS: 'false' - - realtime: - container_name: supabase-realtime - image: supabase/realtime:v0.19.3 - depends_on: - - db - restart: unless-stopped - environment: - DB_HOST: db - DB_PORT: 5432 - DB_NAME: postgres - DB_USER: postgres - DB_PASSWORD: ${POSTGRES_PASSWORD} - DB_SSL: 'false' - PORT: 4000 - JWT_SECRET: ${JWT_SECRET} - REPLICATION_MODE: RLS - REPLICATION_POLL_INTERVAL: 100 - SECURE_CHANNELS: 'true' - SLOT_NAME: supabase_realtime_rls - TEMPORARY_SLOT: 'true' - command: > - bash -c "./prod/rel/realtime/bin/realtime eval Realtime.Release.migrate - && ./prod/rel/realtime/bin/realtime start" - - storage: - container_name: supabase-storage - image: supabase/storage-api:v0.10.0 - depends_on: - - db - - rest - restart: unless-stopped - environment: - ANON_KEY: ${ANON_KEY} - SERVICE_KEY: ${SERVICE_ROLE_KEY} - POSTGREST_URL: http://rest:3000 - PGRST_JWT_SECRET: ${JWT_SECRET} - DATABASE_URL: postgres://postgres:${POSTGRES_PASSWORD}@db:5432/postgres - PGOPTIONS: -c search_path=storage,public - FILE_SIZE_LIMIT: 52428800 - STORAGE_BACKEND: file - FILE_STORAGE_BACKEND_PATH: /var/lib/storage - TENANT_ID: stub - # TODO: https://github.com/supabase/storage-api/issues/55 - REGION: stub - GLOBAL_S3_BUCKET: stub - - meta: - container_name: supabase-meta - image: supabase/postgres-meta:v0.33.3 - depends_on: - - db - restart: unless-stopped - environment: - PG_META_PORT: 8080 - PG_META_DB_HOST: db - PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD} - - db: - container_name: supabase-db - image: supabase/postgres:14.1.0 - command: postgres -c config_file=/etc/postgresql/postgresql.conf - restart: unless-stopped - environment: - POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} - - pg_backup: - build: ./supabase/backup - container_name: 'pg-backup-cron-job' - depends_on: - - db - environment: - PG_HOST_NAME: db - PG_DB_NAME: postgres - PG_USER_NAME: postgres - PG_USER_PASSWORD: ${POSTGRES_PASSWORD} diff --git a/docker/supabase/supabase-development.yml b/docker/supabase/supabase-development.yml deleted file mode 100644 index fbeb051811..0000000000 --- a/docker/supabase/supabase-development.yml +++ /dev/null @@ -1,31 +0,0 @@ -# This is meant to be used with ./docker-compose.yml in development -services: - studio: - ports: - - ${STUDIO_PORT}:3000/tcp - - storage: - volumes: - - ./supabase/volumes/storage:/var/lib/storage - - db: - ports: - # XXX: we need access to the postgres port on localhost for our e2e tests in dev, - # since they need to wait on the postgres db to become ready before starting. - # See src/api/sso/jest.config.e2e.js - - '5432:5432' - volumes: - - ./supabase/volumes/db/init:/docker-entrypoint-initdb.d - - pg_backup: - environment: - PG_HOST_PORT: 5432 - volumes: - # Directory that will contain the database dumps - - ./supabase/volumes/backups/:/var/opt/pg_dumps/ - # Directory where all daily cron task scripts reside - - ./supabase/backup/cron-tasks:/etc/periodic/daily:ro - - kong: - volumes: - - ./supabase/volumes/api/kong.yml:/var/lib/kong/kong.yml diff --git a/docker/supabase/supabase-production.yml b/docker/supabase/supabase-production.yml deleted file mode 100644 index e86050b8e2..0000000000 --- a/docker/supabase/supabase-production.yml +++ /dev/null @@ -1,25 +0,0 @@ -# This is meant to be used with ./docker-compose.yml in production -services: - storage: - volumes: - - ../../supabase/volumes/storage:/var/lib/storage - - db: - volumes: - # We keep the actual data in a volume outside of git, so it survives restarts - - ../../supabase/volumes/db/data:/var/lib/postgresql/data - # We pull in .sql migrations, schema files from the git repo - - ./supabase/volumes/db/init:/docker-entrypoint-initdb.d - - pg_backup: - environment: - PG_HOST_PORT: 5432 - volumes: - # Directory that will contain the database dumps - - ../../supabase/volumes/backups/:/var/opt/pg_dumps/ - # Directory where all daily cron task scripts reside - - ./supabase/backup/cron-tasks:/etc/periodic/daily:ro - - kong: - volumes: - - ../../config/kong.yml:/var/lib/kong/kong.yml diff --git a/docker/supabase/volumes/api/kong.yml b/docker/supabase/volumes/api/kong.yml deleted file mode 100644 index 8cf7f3ceda..0000000000 --- a/docker/supabase/volumes/api/kong.yml +++ /dev/null @@ -1,148 +0,0 @@ -_format_version: '1.1' - -### -### Consumers / Users -### -consumers: - - username: anon - keyauth_credentials: - - key: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE - - username: service_role - keyauth_credentials: - - key: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q - -### -### Access Control List -### -acls: - - consumer: anon - group: anon - - consumer: service_role - group: admin - -### -### API Routes -### -services: - ## Open Auth routes - - name: auth-v1-open - url: http://auth:9999/verify - routes: - - name: auth-v1-open - strip_path: true - paths: - - /auth/v1/verify - plugins: - - name: cors - - name: auth-v1-open-callback - url: http://auth:9999/callback - routes: - - name: auth-v1-open-callback - strip_path: true - paths: - - /auth/v1/callback - plugins: - - name: cors - - name: auth-v1-open-authorize - url: http://auth:9999/authorize - routes: - - name: auth-v1-open-authorize - strip_path: true - paths: - - /auth/v1/authorize - plugins: - - name: cors - - ## Secure Auth routes - - name: auth-v1 - _comment: 'GoTrue: /auth/v1/* -> http://auth:9999/*' - url: http://auth:9999/ - routes: - - name: auth-v1-all - strip_path: true - paths: - - /auth/v1/ - plugins: - - name: cors - - name: key-auth - config: - hide_credentials: false - - name: acl - config: - hide_groups_header: true - allow: - - admin - - anon - - ## Secure REST routes - - name: rest-v1 - _comment: 'PostgREST: /rest/v1/* -> http://rest:3000/*' - url: http://rest:3000/ - routes: - - name: rest-v1-all - strip_path: true - paths: - - /rest/v1/ - plugins: - - name: cors - - name: key-auth - config: - hide_credentials: true - - name: acl - config: - hide_groups_header: true - allow: - - admin - - anon - - ## Secure Realtime routes - - name: realtime-v1 - _comment: 'Realtime: /realtime/v1/* -> ws://realtime:4000/socket/*' - url: http://realtime:4000/socket/ - routes: - - name: realtime-v1-all - strip_path: true - paths: - - /realtime/v1/ - plugins: - - name: cors - - name: key-auth - config: - hide_credentials: false - - name: acl - config: - hide_groups_header: true - allow: - - admin - - anon - - ## Storage routes: the storage server manages its own auth - - name: storage-v1 - _comment: 'Storage: /storage/v1/* -> http://storage:5000/*' - url: http://storage:5000/ - routes: - - name: storage-v1-all - strip_path: true - paths: - - /storage/v1/ - plugins: - - name: cors - - ## Secure Database routes - - name: meta - _comment: 'pg-meta: /pg/* -> http://pg-meta:8080/*' - url: http://meta:8080/ - routes: - - name: meta-all - strip_path: true - paths: - - /pg/ - plugins: - - name: key-auth - config: - hide_credentials: false - - name: acl - config: - hide_groups_header: true - allow: - - admin diff --git a/docker/supabase/volumes/db/init/00-initial-schema.sql b/docker/supabase/volumes/db/init/00-initial-schema.sql deleted file mode 100644 index 474b866773..0000000000 --- a/docker/supabase/volumes/db/init/00-initial-schema.sql +++ /dev/null @@ -1,48 +0,0 @@ --- Set up realtime -create schema if not exists realtime; --- create publication supabase_realtime; -- defaults to empty publication -create publication supabase_realtime; - --- Supabase super admin -create user supabase_admin; -alter user supabase_admin with superuser createdb createrole replication bypassrls; - --- Extension namespacing -create schema if not exists extensions; -create extension if not exists "uuid-ossp" with schema extensions; -create extension if not exists pgcrypto with schema extensions; -create extension if not exists pgjwt with schema extensions; - --- Set up auth roles for the developer -create role anon nologin noinherit; -create role authenticated nologin noinherit; -- "logged in" user: web_user, app_user, etc -create role service_role nologin noinherit bypassrls; -- allow developers to create JWT's that bypass their policies - -create user authenticator noinherit; -grant anon to authenticator; -grant authenticated to authenticator; -grant service_role to authenticator; -grant supabase_admin to authenticator; - -grant usage on schema public to postgres, anon, authenticated, service_role; -alter default privileges in schema public grant all on tables to postgres, anon, authenticated, service_role; -alter default privileges in schema public grant all on functions to postgres, anon, authenticated, service_role; -alter default privileges in schema public grant all on sequences to postgres, anon, authenticated, service_role; - --- Allow Extensions to be used in the API -grant usage on schema extensions to postgres, anon, authenticated, service_role; - --- Set up namespacing -alter user supabase_admin SET search_path TO public, extensions; -- don't include the "auth" schema - --- These are required so that the users receive grants whenever "supabase_admin" creates tables/function -alter default privileges for user supabase_admin in schema public grant all - on sequences to postgres, anon, authenticated, service_role; -alter default privileges for user supabase_admin in schema public grant all - on tables to postgres, anon, authenticated, service_role; -alter default privileges for user supabase_admin in schema public grant all - on functions to postgres, anon, authenticated, service_role; - --- Set short statement/query timeouts for API roles -alter role anon set statement_timeout = '3s'; -alter role authenticated set statement_timeout = '8s'; diff --git a/docker/supabase/volumes/db/init/01-auth-schema.sql b/docker/supabase/volumes/db/init/01-auth-schema.sql deleted file mode 100644 index 3544f9afb0..0000000000 --- a/docker/supabase/volumes/db/init/01-auth-schema.sql +++ /dev/null @@ -1,145 +0,0 @@ - -CREATE SCHEMA IF NOT EXISTS auth AUTHORIZATION supabase_admin; - --- auth.users definition - -CREATE TABLE auth.users ( - instance_id uuid NULL, - id uuid NOT NULL UNIQUE, - aud varchar(255) NULL, - "role" varchar(255) NULL, - email varchar(255) NULL UNIQUE, - encrypted_password varchar(255) NULL, - confirmed_at timestamptz NULL, - invited_at timestamptz NULL, - confirmation_token varchar(255) NULL, - confirmation_sent_at timestamptz NULL, - recovery_token varchar(255) NULL, - recovery_sent_at timestamptz NULL, - email_change_token varchar(255) NULL, - email_change varchar(255) NULL, - email_change_sent_at timestamptz NULL, - last_sign_in_at timestamptz NULL, - raw_app_meta_data jsonb NULL, - raw_user_meta_data jsonb NULL, - is_super_admin bool NULL, - created_at timestamptz NULL, - updated_at timestamptz NULL, - CONSTRAINT users_pkey PRIMARY KEY (id) -); -CREATE INDEX users_instance_id_email_idx ON auth.users USING btree (instance_id, email); -CREATE INDEX users_instance_id_idx ON auth.users USING btree (instance_id); -comment on table auth.users is 'Auth: Stores user login data within a secure schema.'; - --- auth.refresh_tokens definition - -CREATE TABLE auth.refresh_tokens ( - instance_id uuid NULL, - id bigserial NOT NULL, - "token" varchar(255) NULL, - user_id varchar(255) NULL, - revoked bool NULL, - created_at timestamptz NULL, - updated_at timestamptz NULL, - CONSTRAINT refresh_tokens_pkey PRIMARY KEY (id) -); -CREATE INDEX refresh_tokens_instance_id_idx ON auth.refresh_tokens USING btree (instance_id); -CREATE INDEX refresh_tokens_instance_id_user_id_idx ON auth.refresh_tokens USING btree (instance_id, user_id); -CREATE INDEX refresh_tokens_token_idx ON auth.refresh_tokens USING btree (token); -comment on table auth.refresh_tokens is 'Auth: Store of tokens used to refresh JWT tokens once they expire.'; - --- auth.instances definition - -CREATE TABLE auth.instances ( - id uuid NOT NULL, - uuid uuid NULL, - raw_base_config text NULL, - created_at timestamptz NULL, - updated_at timestamptz NULL, - CONSTRAINT instances_pkey PRIMARY KEY (id) -); -comment on table auth.instances is 'Auth: Manages users across multiple sites.'; - --- auth.audit_log_entries definition - -CREATE TABLE auth.audit_log_entries ( - instance_id uuid NULL, - id uuid NOT NULL, - payload json NULL, - created_at timestamptz NULL, - CONSTRAINT audit_log_entries_pkey PRIMARY KEY (id) -); -CREATE INDEX audit_logs_instance_id_idx ON auth.audit_log_entries USING btree (instance_id); -comment on table auth.audit_log_entries is 'Auth: Audit trail for user actions.'; - --- auth.schema_migrations definition - -CREATE TABLE auth.schema_migrations ( - "version" varchar(255) NOT NULL, - CONSTRAINT schema_migrations_pkey PRIMARY KEY ("version") -); -comment on table auth.schema_migrations is 'Auth: Manages updates to the auth system.'; - -INSERT INTO auth.schema_migrations (version) -VALUES ('20171026211738'), - ('20171026211808'), - ('20171026211834'), - ('20180103212743'), - ('20180108183307'), - ('20180119214651'), - ('20180125194653'); - -create or replace function auth.uid() -returns uuid -language sql stable -as $$ - select - coalesce( - current_setting('request.jwt.claim.sub', true), - (current_setting('request.jwt.claims', true)::jsonb ->> 'sub') - )::uuid -$$; - -create or replace function auth.role() -returns text -language sql stable -as $$ - select - coalesce( - current_setting('request.jwt.claim.role', true), - (current_setting('request.jwt.claims', true)::jsonb ->> 'role') - )::text -$$; - -create or replace function auth.email() -returns text -language sql stable -as $$ - select - coalesce( - current_setting('request.jwt.claim.email', true), - (current_setting('request.jwt.claims', true)::jsonb ->> 'email') - )::text -$$; - --- usage on auth functions to API roles -GRANT USAGE ON SCHEMA auth TO anon, authenticated, service_role; - --- Supabase super admin -CREATE USER supabase_auth_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; -GRANT ALL PRIVILEGES ON SCHEMA auth TO supabase_auth_admin; -GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA auth TO supabase_auth_admin; -GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA auth TO supabase_auth_admin; -ALTER USER supabase_auth_admin SET search_path = "auth"; -ALTER table "auth".users OWNER TO supabase_auth_admin; -ALTER table "auth".refresh_tokens OWNER TO supabase_auth_admin; -ALTER table "auth".audit_log_entries OWNER TO supabase_auth_admin; -ALTER table "auth".instances OWNER TO supabase_auth_admin; -ALTER table "auth".schema_migrations OWNER TO supabase_auth_admin; - -ALTER FUNCTION "auth"."uid" OWNER TO supabase_auth_admin; -ALTER FUNCTION "auth"."role" OWNER TO supabase_auth_admin; -ALTER FUNCTION "auth"."email" OWNER TO supabase_auth_admin; -GRANT EXECUTE ON FUNCTION "auth"."uid"() TO PUBLIC; -GRANT EXECUTE ON FUNCTION "auth"."role"() TO PUBLIC; -GRANT EXECUTE ON FUNCTION "auth"."email"() TO PUBLIC; diff --git a/docker/supabase/volumes/db/init/02-storage-schema.sql b/docker/supabase/volumes/db/init/02-storage-schema.sql deleted file mode 100644 index ba891b018b..0000000000 --- a/docker/supabase/volumes/db/init/02-storage-schema.sql +++ /dev/null @@ -1,116 +0,0 @@ -CREATE SCHEMA IF NOT EXISTS storage AUTHORIZATION supabase_admin; - -grant usage on schema storage to postgres, anon, authenticated, service_role; -alter default privileges in schema storage grant all on tables to postgres, anon, authenticated, service_role; -alter default privileges in schema storage grant all on functions to postgres, anon, authenticated, service_role; -alter default privileges in schema storage grant all on sequences to postgres, anon, authenticated, service_role; - -CREATE TABLE "storage"."buckets" ( - "id" text not NULL, - "name" text NOT NULL, - "owner" uuid, - "created_at" timestamptz DEFAULT now(), - "updated_at" timestamptz DEFAULT now(), - CONSTRAINT "buckets_owner_fkey" FOREIGN KEY ("owner") REFERENCES "auth"."users"("id"), - PRIMARY KEY ("id") -); -CREATE UNIQUE INDEX "bname" ON "storage"."buckets" USING BTREE ("name"); - -CREATE TABLE "storage"."objects" ( - "id" uuid NOT NULL DEFAULT extensions.uuid_generate_v4(), - "bucket_id" text, - "name" text, - "owner" uuid, - "created_at" timestamptz DEFAULT now(), - "updated_at" timestamptz DEFAULT now(), - "last_accessed_at" timestamptz DEFAULT now(), - "metadata" jsonb, - CONSTRAINT "objects_bucketId_fkey" FOREIGN KEY ("bucket_id") REFERENCES "storage"."buckets"("id"), - CONSTRAINT "objects_owner_fkey" FOREIGN KEY ("owner") REFERENCES "auth"."users"("id"), - PRIMARY KEY ("id") -); -CREATE UNIQUE INDEX "bucketid_objname" ON "storage"."objects" USING BTREE ("bucket_id","name"); -CREATE INDEX name_prefix_search ON storage.objects(name text_pattern_ops); - -ALTER TABLE storage.objects ENABLE ROW LEVEL SECURITY; - -CREATE FUNCTION storage.foldername(name text) - RETURNS text[] - LANGUAGE plpgsql -AS $function$ -DECLARE -_parts text[]; -BEGIN - select string_to_array(name, '/') into _parts; - return _parts[1:array_length(_parts,1)-1]; -END -$function$; - -CREATE FUNCTION storage.filename(name text) - RETURNS text - LANGUAGE plpgsql -AS $function$ -DECLARE -_parts text[]; -BEGIN - select string_to_array(name, '/') into _parts; - return _parts[array_length(_parts,1)]; -END -$function$; - -CREATE FUNCTION storage.extension(name text) - RETURNS text - LANGUAGE plpgsql -AS $function$ -DECLARE -_parts text[]; -_filename text; -BEGIN - select string_to_array(name, '/') into _parts; - select _parts[array_length(_parts,1)] into _filename; - -- @todo return the last part instead of 2 - return split_part(_filename, '.', 2); -END -$function$; - -CREATE FUNCTION storage.search(prefix text, bucketname text, limits int DEFAULT 100, levels int DEFAULT 1, offsets int DEFAULT 0) - RETURNS TABLE ( - name text, - id uuid, - updated_at TIMESTAMPTZ, - created_at TIMESTAMPTZ, - last_accessed_at TIMESTAMPTZ, - metadata jsonb - ) - LANGUAGE plpgsql -AS $function$ -DECLARE -_bucketId text; -BEGIN - -- will be replaced by migrations when server starts - -- saving space for cloud-init -END -$function$; - --- create migrations table --- https://github.com/ThomWright/postgres-migrations/blob/master/src/migrations/0_create-migrations-table.sql --- we add this table here and not let it be auto-created so that the permissions are properly applied to it -CREATE TABLE IF NOT EXISTS storage.migrations ( - id integer PRIMARY KEY, - name varchar(100) UNIQUE NOT NULL, - hash varchar(40) NOT NULL, -- sha1 hex encoded hash of the file name and contents, to ensure it hasn't been altered since applying the migration - executed_at timestamp DEFAULT current_timestamp -); - -CREATE USER supabase_storage_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; -GRANT ALL PRIVILEGES ON SCHEMA storage TO supabase_storage_admin; -GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA storage TO supabase_storage_admin; -GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA storage TO supabase_storage_admin; -ALTER USER supabase_storage_admin SET search_path = "storage"; -ALTER table "storage".objects owner to supabase_storage_admin; -ALTER table "storage".buckets owner to supabase_storage_admin; -ALTER table "storage".migrations OWNER TO supabase_storage_admin; -ALTER function "storage".foldername(text) owner to supabase_storage_admin; -ALTER function "storage".filename(text) owner to supabase_storage_admin; -ALTER function "storage".extension(text) owner to supabase_storage_admin; -ALTER function "storage".search(text,text,int,int,int) owner to supabase_storage_admin; diff --git a/docker/supabase/volumes/db/init/03-post-setup.sql b/docker/supabase/volumes/db/init/03-post-setup.sql deleted file mode 100644 index 64dc7399fd..0000000000 --- a/docker/supabase/volumes/db/init/03-post-setup.sql +++ /dev/null @@ -1,68 +0,0 @@ -ALTER ROLE postgres SET search_path TO "\$user",public,extensions; -CREATE OR REPLACE FUNCTION extensions.notify_api_restart() -RETURNS event_trigger -LANGUAGE plpgsql -AS $$ -BEGIN - NOTIFY pgrst, 'reload schema'; -END; -$$; -CREATE EVENT TRIGGER api_restart ON ddl_command_end -EXECUTE PROCEDURE extensions.notify_api_restart(); -COMMENT ON FUNCTION extensions.notify_api_restart IS 'Sends a notification to the API to restart. If your database schema has changed, this is required so that Supabase can rebuild the relationships.'; - --- Trigger for pg_cron -CREATE OR REPLACE FUNCTION extensions.grant_pg_cron_access() -RETURNS event_trigger -LANGUAGE plpgsql -AS $$ -DECLARE - schema_is_cron bool; -BEGIN - schema_is_cron = ( - SELECT n.nspname = 'cron' - FROM pg_event_trigger_ddl_commands() AS ev - LEFT JOIN pg_catalog.pg_namespace AS n - ON ev.objid = n.oid - ); - - IF schema_is_cron - THEN - grant usage on schema cron to postgres with grant option; - - alter default privileges in schema cron grant all on tables to postgres with grant option; - alter default privileges in schema cron grant all on functions to postgres with grant option; - alter default privileges in schema cron grant all on sequences to postgres with grant option; - - alter default privileges for user supabase_admin in schema cron grant all - on sequences to postgres with grant option; - alter default privileges for user supabase_admin in schema cron grant all - on tables to postgres with grant option; - alter default privileges for user supabase_admin in schema cron grant all - on functions to postgres with grant option; - - grant all privileges on all tables in schema cron to postgres with grant option; - - END IF; - -END; -$$; -CREATE EVENT TRIGGER issue_pg_cron_access ON ddl_command_end WHEN TAG in ('CREATE SCHEMA') -EXECUTE PROCEDURE extensions.grant_pg_cron_access(); -COMMENT ON FUNCTION extensions.grant_pg_cron_access IS 'Grants access to pg_cron'; - --- Supabase dashboard user -CREATE ROLE dashboard_user NOSUPERUSER CREATEDB CREATEROLE REPLICATION; -GRANT ALL ON DATABASE postgres TO dashboard_user; -GRANT ALL ON SCHEMA auth TO dashboard_user; -GRANT ALL ON SCHEMA extensions TO dashboard_user; -GRANT ALL ON SCHEMA storage TO dashboard_user; -GRANT ALL ON ALL TABLES IN SCHEMA auth TO dashboard_user; -GRANT ALL ON ALL TABLES IN SCHEMA extensions TO dashboard_user; --- GRANT ALL ON ALL TABLES IN SCHEMA storage TO dashboard_user; -GRANT ALL ON ALL SEQUENCES IN SCHEMA auth TO dashboard_user; -GRANT ALL ON ALL SEQUENCES IN SCHEMA storage TO dashboard_user; -GRANT ALL ON ALL SEQUENCES IN SCHEMA extensions TO dashboard_user; -GRANT ALL ON ALL ROUTINES IN SCHEMA auth TO dashboard_user; -GRANT ALL ON ALL ROUTINES IN SCHEMA storage TO dashboard_user; -GRANT ALL ON ALL ROUTINES IN SCHEMA extensions TO dashboard_user; diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index b29d573d43..0221710ba3 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -148,6 +148,7 @@ importers: '@babel/core': 7.17.10 '@bull-board/api': 3.10.7 '@bull-board/express': 3.10.4 + '@prisma/client': 4.9.0 '@senecacdot/eslint-config-telescope': 1.1.0 '@senecacdot/satellite': ^1.29.0 '@supabase/supabase-js': 1.29.4 @@ -164,6 +165,7 @@ importers: nock: 13.2.4 nodemon: 2.0.16 normalize-url: 6.1.0 + prisma: 4.9.0 rimraf: 3.0.2 rss-parser: 3.12.0 sanitize-html: 2.5.3 @@ -171,6 +173,7 @@ importers: '@babel/core': 7.17.10 '@bull-board/api': 3.10.7 '@bull-board/express': 3.10.4 + '@prisma/client': 4.9.0_prisma@4.9.0 '@senecacdot/satellite': 1.29.0 '@supabase/supabase-js': 1.29.4 bull: 3.29.3 @@ -180,6 +183,7 @@ importers: linkify-html: 3.0.5_linkifyjs@3.0.5 linkifyjs: 3.0.5 normalize-url: 6.1.0 + prisma: 4.9.0 rss-parser: 3.12.0 sanitize-html: 2.5.3 devDependencies: diff --git a/src/api/parser/Dockerfile b/src/api/parser/Dockerfile index 3693edbf46..d05d2983a5 100644 --- a/src/api/parser/Dockerfile +++ b/src/api/parser/Dockerfile @@ -1,6 +1,6 @@ ## Base ########################################################################### # Set up the base layer -FROM node:16 as base +FROM node:16-alpine3.15 as base RUN npm i -g pnpm @@ -12,11 +12,13 @@ FROM base as dependencies COPY package.json ./ +COPY ./schema.prisma . + RUN pnpm install --prod ## Deploy ######################################################################## # Stage for running our app -FROM node:16-alpine3.15 as deploy +FROM base as deploy WORKDIR /app diff --git a/src/api/parser/env.local b/src/api/parser/env.local index 7edaa48c31..3b4555c4c2 100644 --- a/src/api/parser/env.local +++ b/src/api/parser/env.local @@ -38,6 +38,6 @@ PARSER_PORT = 10000 ################################################################################ # Supabase Secrets -SUPABASE_URL=http://localhost/v1/supabase +SUPABASE_URL="postgresql://postgres:your-super-secret-and-long-postgres-password@localhost/postgres" SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q diff --git a/src/api/parser/package.json b/src/api/parser/package.json index cef50562cf..07e2564103 100644 --- a/src/api/parser/package.json +++ b/src/api/parser/package.json @@ -12,7 +12,8 @@ "lint": "pnpm eslint", "lint-time": "pnpm eslint-time", "test": "jest -c jest.config.js", - "clean": "pnpm rimraf .turbo node_modules || pnpm rimraf node_modules" + "clean": "pnpm rimraf .turbo node_modules || pnpm rimraf node_modules", + "postinstall": "prisma generate --schema=./schema.prisma" }, "repository": "Seneca-CDOT/telescope", "license": "BSD-2-Clause", @@ -33,6 +34,8 @@ "linkify-html": "3.0.5", "linkifyjs": "3.0.5", "normalize-url": "6.1.0", + "prisma": "4.9.0", + "@prisma/client": "4.9.0", "rss-parser": "3.12.0", "sanitize-html": "2.5.3" }, diff --git a/src/api/parser/schema.prisma b/src/api/parser/schema.prisma new file mode 100644 index 0000000000..1683891226 --- /dev/null +++ b/src/api/parser/schema.prisma @@ -0,0 +1,81 @@ +generator client { + provider = "prisma-client-js" +} + +datasource db { + provider = "postgresql" + url = env("DATABASE_URL") +} + +enum FeedType { + blog + youtube + twitch +} + +// Planet CDOT Feed List, see https://wiki.cdot.senecacollege.ca/wiki/Planet_CDOT_Feed_List +model feeds { + url String @id + id String // A short hashed id generated from the URL by Satellite, + user_id String? // optional, a user can claim an existing feed when they register + wiki_author_name String? // wiki owner of a feed, maybe unused when the feed is linked with an actual user + html_url String? //actual URL the feed refers to, could be a blog URL, a YouTube or Twitch channel + type FeedType? @default(blog) + invalid Boolean? @default(false) + flagged Boolean? @default(false) + telescope_profiles telescope_profiles? @relation(fields: [user_id], references: [id], onDelete: Cascade, onUpdate: NoAction) + + @@unique([id]) +} + +// In the GitHub API, pull requests are considered issues, too. +// Therefore, we would group both pull requests and issues in a single +// table. For more information, read: +// https://docs.github.com/en/rest/reference/issues and +// https://docs.github.com/en/rest/reference/pulls +model github_issues { + id Int @id @default(autoincrement()) + number Int + repo Int + type String @db.Citext + github_repositories github_repositories @relation(fields: [repo], references: [id], onDelete: Cascade, onUpdate: NoAction) + + @@unique([repo, number]) +} + +model github_repositories { + id Int @id @default(autoincrement()) + name String + owner Int + github_users github_users @relation(fields: [owner], references: [id], onDelete: Cascade, onUpdate: NoAction) + github_issues github_issues[] + + @@unique([owner, name]) +} + +model github_users { + id Int @id @default(autoincrement()) + login String @db.Citext // the user name of the user, known as the `login` field in GitHub API responses + type String @db.Citext + github_repositories github_repositories[] +} + +model quotes { + quote_id Int @id @default(autoincrement()) + author_name String + blog_url String + quote String +} + +model telescope_profiles { + id String @id + registered_at DateTime @default(dbgenerated("timezone('utc'::text, now())")) @db.Timestamptz(6) + updated_at DateTime @default(dbgenerated("timezone('utc'::text, now())")) @db.Timestamptz(6) + display_name String @unique @db.Citext + first_name String + last_name String + email String @unique @db.Citext + github_username String @unique @db.Citext + github_avatar_url String + feeds feeds[] +} diff --git a/src/api/parser/src/data/feed.js b/src/api/parser/src/data/feed.js index 58a979a34d..1fb9ce5f23 100644 --- a/src/api/parser/src/data/feed.js +++ b/src/api/parser/src/data/feed.js @@ -19,7 +19,7 @@ const { getFlaggedFeeds, setFlaggedFeed, unsetFlaggedFeed, -} = require('../utils/supabase'); +} = require('../utils/db'); const { deletePost } = require('../utils/indexer'); diff --git a/src/api/parser/src/parser.js b/src/api/parser/src/parser.js index 503d625d6c..056a70b125 100644 --- a/src/api/parser/src/parser.js +++ b/src/api/parser/src/parser.js @@ -1,7 +1,7 @@ const { logger } = require('@senecacdot/satellite'); const { feedQueue } = require('./feed/queue'); const Feed = require('./data/feed'); -const { getAllFeeds } = require('./utils/supabase'); +const { getAllFeeds } = require('./utils/db'); /** * Adds the feed to the database if necessary, or gets a more complete diff --git a/src/api/parser/src/utils/__mocks__/supabase.js b/src/api/parser/src/utils/__mocks__/db.js similarity index 95% rename from src/api/parser/src/utils/__mocks__/supabase.js rename to src/api/parser/src/utils/__mocks__/db.js index e756dd326d..e8966397be 100644 --- a/src/api/parser/src/utils/__mocks__/supabase.js +++ b/src/api/parser/src/utils/__mocks__/db.js @@ -11,6 +11,7 @@ module.exports = { feeds = []; feedIds = new Set(); }, + /** * @param {Array} feedObjects */ @@ -21,6 +22,7 @@ module.exports = { feedIds.add(id); return uniqueFeeds.concat({ id, invalid: false, flagged: false }); } + return uniqueFeeds; }, []); feeds = feeds.concat(mockFeeds); @@ -35,16 +37,20 @@ module.exports = { }); return Promise.resolve(); }, + getInvalidFeeds: () => { const invalidFeedIds = feeds.filter((feed) => feed.flagged).map((feed) => ({ id: feed.id })); return Promise.resolve(invalidFeedIds); }, + isInvalid: (id) => { const targetFeed = feeds.find((feed) => feed.id === id); return Promise.resolve(!!targetFeed.invalid); }, + // Flagged feed related functions getAllFeeds: jest.fn().mockImplementation(() => Promise.resolve(feeds)), + setFlaggedFeed: jest.fn().mockImplementation((id) => { feeds.forEach((feed) => { if (feed.id === id) { @@ -53,6 +59,7 @@ module.exports = { }); return Promise.resolve(); }), + unsetFlaggedFeed: jest.fn().mockImplementation((id) => { feeds.forEach((feed) => { if (feed.id === id) { @@ -66,8 +73,11 @@ module.exports = { const flaggedFeedIds = feeds.filter((feed) => feed.flagged).map((feed) => feed.id); return Promise.resolve(flaggedFeedIds); }), + isFlagged: jest.fn().mockImplementation((id) => { - const targetFeed = feeds.find((feed) => feed.id === id); + const targetFeed = feeds.find((feed) => { + return feed.id === id; + }); return Promise.resolve(!!targetFeed.flagged); }), }; diff --git a/src/api/parser/src/utils/db.js b/src/api/parser/src/utils/db.js new file mode 100644 index 0000000000..44862a8efe --- /dev/null +++ b/src/api/parser/src/utils/db.js @@ -0,0 +1,155 @@ +const { logger } = require('@senecacdot/satellite'); +const hash = require('@senecacdot/satellite/src/hash'); +const { PrismaClient } = require('@prisma/client'); +const normalizeUrl = require('normalize-url'); + +const { SUPABASE_URL, SERVICE_ROLE_KEY } = process.env; + +const prisma = new PrismaClient({ + datasources: { + db: { + url: SUPABASE_URL, + }, + }, +}); + +if (!SUPABASE_URL || !SERVICE_ROLE_KEY) { + logger.error('SUPABASE_URL or SERVICE_ROLE_KEY is missing'); + process.exit(1); +} + +module.exports = { + async getAllFeeds() { + try { + const data = await prisma.feeds.findMany({ + select: { + wiki_author_name: true, + url: true, + telescope_profiles: { select: { display_name: true, github_username: true } }, + }, + }); + + return data.map((feed) => ({ + // Prefer the a user's display name if present, fallback to wiki name otherwise + author: feed.telescope_profiles?.display_name || feed.wiki_author_name, + url: feed.url, + githubUsername: feed.telescope_profiles?.github_username, + })); + } catch (e) { + logger.error({ e }); + throw Error(e.message, "can't fetch feeds from db"); + } + }, + + // Invalid feed related functions + async setInvalidFeed(id) { + try { + await prisma.feeds.update({ + where: { id }, + data: { invalid: true }, + }); + } catch (e) { + logger.error({ e }); + throw Error(e.message, `can't invalidate feed ${id} in db`); + } + }, + + async getInvalidFeeds() { + try { + const invalidFeeds = await prisma.feeds.findMany({ + where: { invalid: true }, + }); + return invalidFeeds; + } catch (e) { + logger.error({ e }); + throw Error(e.message, "can't fetch invalid feeds in db"); + } + }, + + async isInvalid(id) { + try { + const invalidFeed = await prisma.feeds.findUnique({ + select: { invalid: true }, + where: { id }, + }); + return invalidFeed.invalid; + } catch (e) { + logger.error({ e }); + throw Error(e.message, `can't fetch feed ${id} from db`); + } + }, + + // Flagged feed related functions + async setFlaggedFeed(id) { + try { + await prisma.feeds.update({ + where: { id }, + data: { flagged: true }, + }); + } catch (e) { + logger.error({ e }); + throw Error(e.message, `can't flag feed ${id} in db`); + } + }, + + async unsetFlaggedFeed(id) { + try { + await prisma.feeds.update({ + where: { id }, + data: { flagged: false }, + }); + } catch (e) { + logger.error({ e }); + throw Error(e.message, `can't unflag feed ${id} in db`); + } + }, + + async getFlaggedFeeds() { + try { + const flaggedFeeds = await prisma.feeds.findMany({ + where: { flagged: true }, + }); + return flaggedFeeds.map((feed) => feed.id); + } catch (e) { + logger.error({ e }); + throw Error(e.message, "can't flagged feeds from db"); + } + }, + + async isFlagged(id) { + try { + const flaggedFeed = await prisma.feeds.findUnique({ + select: { flagged: true }, + where: { id }, + }); + return flaggedFeed.flagged; + } catch (e) { + logger.error({ e }); + throw Error(e.message, `can't fetch feed ${id} from db`); + } + }, + + async addFeeds(feeds) { + try { + await prisma.feeds.createMany({ + data: [ + ...feeds.map((feed) => ({ + url: feed.url, + id: hash(normalizeUrl(feed.url)), + wiki_author_name: feed.author, + invalid: false, + flagged: false, + type: 'blog', + html_url: null, + user_id: null, + })), + ], + }); + } catch (e) { + if (e) { + logger.error({ e }); + throw Error(e.message, "can't insert feeds to supabase"); + } + } + }, +}; diff --git a/src/api/parser/src/utils/storage.js b/src/api/parser/src/utils/storage.js index cbaaab615e..6a73f00994 100644 --- a/src/api/parser/src/utils/storage.js +++ b/src/api/parser/src/utils/storage.js @@ -1,5 +1,5 @@ const { logger, Redis } = require('@senecacdot/satellite'); -const { isFlagged } = require('./supabase'); +const { isFlagged } = require('./db'); const redis = Redis(); diff --git a/src/api/parser/src/utils/supabase.js b/src/api/parser/src/utils/supabase.js deleted file mode 100644 index 3a9dbd73d8..0000000000 --- a/src/api/parser/src/utils/supabase.js +++ /dev/null @@ -1,123 +0,0 @@ -const { logger } = require('@senecacdot/satellite'); -const hash = require('@senecacdot/satellite/src/hash'); -const { createClient } = require('@supabase/supabase-js'); -const normalizeUrl = require('normalize-url'); - -const { SUPABASE_URL, SERVICE_ROLE_KEY } = process.env; - -const supabase = createClient(SUPABASE_URL, SERVICE_ROLE_KEY); - -if (!SUPABASE_URL || !SERVICE_ROLE_KEY) { - logger.error('SUPBASE_URL or SERVICE_ROLE_KEY is missing'); - process.exit(1); -} - -module.exports = { - async getAllFeeds() { - const { data, error } = await supabase - .from('feeds') - .select('wiki_author_name, url, telescope_profiles (display_name, github_username)'); - - if (error) { - logger.error({ error }); - throw Error(error.message, "can't fetch feeds from supabase"); - } - - return data.map((feed) => ({ - // Prefer the a user's display name if present, fallback to wiki name otherwise - author: feed.telescope_profiles?.display_name || feed.wiki_author_name, - url: feed.url, - githubUsername: feed.telescope_profiles?.github_username, - })); - }, - - // Invalid feed related functions - async setInvalidFeed(id) { - const { error } = await supabase.from('feeds').update({ invalid: true }).eq('id', id); - - if (error) { - logger.error({ error }); - throw Error(error.message, `can't invalidate feed ${id} in supabase`); - } - }, - - async getInvalidFeeds() { - const { data: invalidFeeds, error } = await supabase.from('feeds').select().is('invalid', true); - if (error) { - logger.error({ error }); - throw Error(error.message, "can't fetch invalid feeds in supabase"); - } - return invalidFeeds; - }, - async isInvalid(id) { - const { data: invalidFeed, error } = await supabase - .from('feeds') - .select('invalid') - .eq('id', id) - .limit(1); - - if (error) { - logger.error({ error }); - throw Error(error.message, `can't fetch feed ${id} from supabase`); - } - return invalidFeed.invalid; - }, - - // Flagged feed related functions - async setFlaggedFeed(id) { - const { error } = await supabase.from('feeds').update({ flagged: true }).eq('id', id); - - if (error) { - logger.error({ error }); - throw Error(error.message, `can't flag feed ${id} in supabase`); - } - }, - async unsetFlaggedFeed(id) { - const { error } = await supabase.from('feeds').update({ flagged: false }).eq('id', id); - - if (error) { - logger.error({ error }); - throw Error(error.message, `can't unflag feed ${id} in supabase`); - } - }, - async getFlaggedFeeds() { - const { data: flaggedFeeds, error } = await supabase.from('feeds').select().eq('flagged', true); - - if (error) { - logger.error({ error }); - throw Error(error.message, `can't flagged feeds from supabase`); - } - return flaggedFeeds.map((feed) => feed.id); - }, - async isFlagged(id) { - const { data: flaggedFeed, error } = await supabase - .from('feeds') - .select('flagged') - .eq('id', id) - .limit(1); - - if (error) { - logger.error({ error }); - throw Error(error.message, `can't fetch feed ${id} from supabase`); - } - return flaggedFeed.flagged; - }, - async addFeeds(feeds) { - const { error } = await supabase.from('feeds').insert( - feeds.map((feed) => ({ - url: feed.url, - id: hash(normalizeUrl(feed.url)), - wiki_author_name: feed.author, - invalid: false, - flagged: false, - type: 'blog', - html_url: null, - user_id: null, - })) - ); - if (error) { - logger.error({ error }); - throw Error(error.message, "can't insert feeds to supabase"); - } - }, -}; diff --git a/src/api/parser/test/e2e/parser-flow.test.js b/src/api/parser/test/e2e/parser-flow.test.js index 0966176c5e..e4527530d3 100644 --- a/src/api/parser/test/e2e/parser-flow.test.js +++ b/src/api/parser/test/e2e/parser-flow.test.js @@ -4,7 +4,7 @@ const normalizeUrl = require('normalize-url'); const { loadFeedsIntoQueue, invalidateFeed } = require('../../src/parser'); const feedWorker = require('../../src/feed/worker'); const { feedQueue } = require('../../src/feed/queue'); -const { addFeeds, getInvalidFeeds, getAllFeeds } = require('../../src/utils/supabase'); +const { addFeeds, getInvalidFeeds, getAllFeeds } = require('../../src/utils/db'); const urlToId = (url) => hash(normalizeUrl(url)); diff --git a/src/api/parser/test/feed-processor.test.js b/src/api/parser/test/feed-processor.test.js index aca79fbeba..5547fd51cd 100644 --- a/src/api/parser/test/feed-processor.test.js +++ b/src/api/parser/test/feed-processor.test.js @@ -3,8 +3,8 @@ const processor = require('../src/feed/processor'); const Feed = require('../src/data/feed'); jest.mock('../src/utils/indexer'); -jest.mock('../src/utils/supabase'); -const { __setMockFeeds } = require('../src/utils/supabase'); +jest.mock('../src/utils/db'); +const { __setMockFeeds } = require('../src/utils/db'); describe('Feed Processor Tests', () => { const createFeed = (url) => diff --git a/src/api/parser/test/feed.test.js b/src/api/parser/test/feed.test.js index beaa4073f6..b0ac3ea321 100644 --- a/src/api/parser/test/feed.test.js +++ b/src/api/parser/test/feed.test.js @@ -6,8 +6,8 @@ const Post = require('../src/data/post'); const urlToId = (url) => hash(normalizeUrl(url)); -jest.mock('../src/utils/supabase'); -const { __setMockFeeds, __resetMockFeeds } = require('../src/utils/supabase'); +jest.mock('../src/utils/db'); +const { __setMockFeeds, __resetMockFeeds } = require('../src/utils/db'); describe('Post data class tests', () => { const data = { diff --git a/src/api/parser/test/post.test.js b/src/api/parser/test/post.test.js index 9c993d5583..7b0d717d2b 100644 --- a/src/api/parser/test/post.test.js +++ b/src/api/parser/test/post.test.js @@ -26,8 +26,8 @@ const Post = require('../src/data/post'); const Feed = require('../src/data/feed'); jest.mock('../src/utils/indexer'); -jest.mock('../src/utils/supabase'); -const { __setMockFeeds } = require('../src/utils/supabase'); +jest.mock('../src/utils/db'); +const { __setMockFeeds } = require('../src/utils/db'); describe('Post data class tests', () => { let feed; diff --git a/src/api/parser/test/storage.test.js b/src/api/parser/test/storage.test.js index 5afe07e98b..9fb313566a 100644 --- a/src/api/parser/test/storage.test.js +++ b/src/api/parser/test/storage.test.js @@ -13,8 +13,8 @@ const { } = require('../src/utils/storage'); const Feed = require('../src/data/feed'); -jest.mock('../src/utils/supabase'); -const { __setMockFeeds } = require('../src/utils/supabase'); +jest.mock('../src/utils/db'); +const { __setMockFeeds } = require('../src/utils/db'); describe('Storage tests for feeds', () => { const feed1 = new Feed('James Smith', 'http://seneca.co/jsmith', 'user');