From e2e930fd9d7b64565d30499929f73d178da24942 Mon Sep 17 00:00:00 2001 From: James Arthur Date: Wed, 1 Jun 2022 16:17:40 +0200 Subject: [PATCH] Initial commit. --- .formatter.exs | 4 + .gitignore | 26 + README.md | 87 +++ TODO.md | 26 + config/config.exs | 17 + config/dev.exs | 19 + config/prod.exs | 4 + config/runtime.exs | 12 + config/test.exs | 6 + databases.yaml | 37 ++ init-user-db.sh | 16 + lib/electric.ex | 3 + lib/electric/application.ex | 14 + lib/electric/replication.ex | 68 +++ lib/electric/replication/changes.ex | 7 + lib/electric/replication/client.ex | 37 ++ lib/electric/replication/config.ex | 29 + lib/electric/replication/producer.ex | 206 +++++++ mix.exs | 34 ++ mix.lock | 7 + postgres.conf | 798 +++++++++++++++++++++++++++ test/electric_test.exs | 8 + test/test_helper.exs | 1 + 23 files changed, 1466 insertions(+) create mode 100644 .formatter.exs create mode 100644 .gitignore create mode 100644 README.md create mode 100644 TODO.md create mode 100644 config/config.exs create mode 100644 config/dev.exs create mode 100644 config/prod.exs create mode 100644 config/runtime.exs create mode 100644 config/test.exs create mode 100644 databases.yaml create mode 100755 init-user-db.sh create mode 100644 lib/electric.ex create mode 100644 lib/electric/application.ex create mode 100644 lib/electric/replication.ex create mode 100644 lib/electric/replication/changes.ex create mode 100644 lib/electric/replication/client.ex create mode 100644 lib/electric/replication/config.ex create mode 100644 lib/electric/replication/producer.ex create mode 100644 mix.exs create mode 100644 mix.lock create mode 100644 postgres.conf create mode 100644 test/electric_test.exs create mode 100644 test/test_helper.exs diff --git a/.formatter.exs b/.formatter.exs new file mode 100644 index 00000000..d2cda26e --- /dev/null +++ b/.formatter.exs @@ -0,0 +1,4 @@ +# Used by "mix format" +[ + inputs: ["{mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"] +] diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..c6a295ed --- /dev/null +++ b/.gitignore @@ -0,0 +1,26 @@ +# The directory Mix will write compiled artifacts to. +/_build/ + +# If you run "mix test --cover", coverage assets end up here. +/cover/ + +# The directory Mix downloads your dependencies sources to. +/deps/ + +# Where third-party dependencies like ExDoc output generated docs. +/doc/ + +# Ignore .fetch files in case you like to edit your project deps locally. +/.fetch + +# If the VM crashes, it generates a dump, let's ignore it too. +erl_crash.dump + +# Also ignore archive artifacts (built via "mix archive.build"). +*.ez + +# Ignore package tarball (built via "mix hex.build"). +electric-*.tar + +# Temporary files, for example, from tests. +/tmp/ diff --git a/README.md b/README.md new file mode 100644 index 00000000..5301eb09 --- /dev/null +++ b/README.md @@ -0,0 +1,87 @@ + +# Electric + +This is a POC of Postgres active-active replication using Vaxine. + +## Pre-reqs + +Docker and Elixir 1.13. + +## Run databases + +```sh +docker-compose -f databases.yaml up +``` + +## Run app + +```sh +mix run --no-halt +``` + +## Generate workload + +For now, manually issue some SQL statements, e.g.: + +``` +psql -h localhost -p 54321 -U electric -d electric +... +electric=# INSERT INTO entries (content) VALUES ('a'); +electric=# select * from entries; +electric=# update entries set content = 'b'; +``` + +Then view the app logs, should look a bit like: + +``` +{:message, + %Broadway.Message{ + acknowledger: {Electric.Replication, :ack_id, {#PID<0.218.0>, {0, 24336352}}}, + batch_key: :default, + batch_mode: :bulk, + batcher: :default, + data: %Electric.Replication.Changes.Transaction{ + changes: [ + %Electric.Replication.Changes.NewRecord{ + record: %{ + "content" => "a", + "id" => "9be3b616-17e9-4264-9f33-5bdb36c48443" + }, + relation: {"public", "entries"} + } + ], + commit_timestamp: ~U[2022-06-01 14:07:56Z] + }, + metadata: %{}, + status: :ok + }} +{:ack, {0, 24336352}} +{:message, + %Broadway.Message{ + acknowledger: {Electric.Replication, :ack_id, {#PID<0.218.0>, {0, 24336568}}}, + batch_key: :default, + batch_mode: :bulk, + batcher: :default, + data: %Electric.Replication.Changes.Transaction{ + changes: [ + %Electric.Replication.Changes.UpdatedRecord{ + old_record: %{ + "content" => "a", + "id" => "9be3b616-17e9-4264-9f33-5bdb36c48443" + }, + record: %{ + "content" => "b", + "id" => "9be3b616-17e9-4264-9f33-5bdb36c48443" + }, + relation: {"public", "entries"} + } + ], + commit_timestamp: ~U[2022-06-01 14:08:39Z] + }, + metadata: %{}, + status: :ok + }} +``` + +Note the `old_record` as well as the `new_record`. + diff --git a/TODO.md b/TODO.md new file mode 100644 index 00000000..39e33450 --- /dev/null +++ b/TODO.md @@ -0,0 +1,26 @@ +# Todo + +Step 1: + +- [ ] two Postgres database nodes (same single table schema for now) +- [ ] setup logical replication between them + +Step 2: + +- [ ] consume and decode logical replication stream from Postgres A +- [ ] implement enough of the backend server logical replication protocol in order to replicate through Elixir +- [ ] encode and produce logical replication stream for Postgres B + +Step 3: + +- [ ] write changes into Antidote +- [ ] somehow handle `LSN` polling / pull from Postgres B +- [ ] query relevant materialised values from Antidote +- [ ] construct into encodable stream + +Step 4: + +- [ ] add a third Postgres +- [ ] pair each Postgres with an Antidote +- [ ] replicate between Antidotes +- [ ] demonstrate Postgres replication working with TCC+ diff --git a/config/config.exs b/config/config.exs new file mode 100644 index 00000000..8736de15 --- /dev/null +++ b/config/config.exs @@ -0,0 +1,17 @@ +# This file is responsible for configuring your application +# and its dependencies with the aid of the Mix.Config module. +# +# This configuration file is loaded before any dependency and +# is restricted to this project. + +# General application configuration +import Config + +# Configures Elixir's Logger +config :logger, :console, + format: "$time $metadata[$level] $message\n", + metadata: [:request_id] + +# Import environment specific config. This must remain at the bottom +# of this file so it overrides the configuration defined above. +import_config "#{Mix.env()}.exs" diff --git a/config/dev.exs b/config/dev.exs new file mode 100644 index 00000000..edbbc7c5 --- /dev/null +++ b/config/dev.exs @@ -0,0 +1,19 @@ +import Config + +# Configure your database +config :electric, Electric.Replication, + epgsql: %{ + host: 'localhost', + port: 54321, + database: 'electric', + username: 'electric', + password: 'password', + replication: 'database', + ssl: false + }, + producer: Electric.Replication.Producer, + publication: "all_tables", + slot: "all_changes" + +# Do not include metadata nor timestamps in development logs +config :logger, :console, format: "[$level] $message\n" diff --git a/config/prod.exs b/config/prod.exs new file mode 100644 index 00000000..3e655635 --- /dev/null +++ b/config/prod.exs @@ -0,0 +1,4 @@ +import Config + +# Do not print debug messages in production +config :logger, level: :info diff --git a/config/runtime.exs b/config/runtime.exs new file mode 100644 index 00000000..bfc331d4 --- /dev/null +++ b/config/runtime.exs @@ -0,0 +1,12 @@ +# This file is executed after the code compilation on all environments +# (dev, test, and prod) - for both Mix and releases. +# +# We use it for runtime configuration of releases in production -- +# because that allows us to read environment variables at runtime +# rather than compile time. + +import Config + +if config_env() == :prod do + throw(:NotImplemented) +end diff --git a/config/test.exs b/config/test.exs new file mode 100644 index 00000000..91d521dd --- /dev/null +++ b/config/test.exs @@ -0,0 +1,6 @@ +import Config + +config :electric, Electric.Replication, producer: Broadway.DummyProducer + +# Print only warnings and errors during test +config :logger, level: :warn diff --git a/databases.yaml b/databases.yaml new file mode 100644 index 00000000..a6709e17 --- /dev/null +++ b/databases.yaml @@ -0,0 +1,37 @@ +# Run using `docker-compose -f databases.yaml up`. +version: '3.1' + +services: + db_a: + image: postgres + restart: always + environment: + POSTGRES_DB: electric + POSTGRES_USER: electric + POSTGRES_PASSWORD: password + ports: + - "54321:5432" + volumes: + - ./init-user-db.sh:/docker-entrypoint-initdb.d/init-user-db.sh:ro + - ./postgres.conf:/etc/postgresql.conf:ro + entrypoint: + - docker-entrypoint.sh + - -c + - config_file=/etc/postgresql.conf + + db_b: + image: postgres + restart: always + environment: + POSTGRES_DB: electric + POSTGRES_USER: electric + POSTGRES_PASSWORD: password + ports: + - "54322:5432" + volumes: + - ./init-user-db.sh:/docker-entrypoint-initdb.d/init-user-db.sh:ro + - ./postgres.conf:/etc/postgresql.conf:ro + entrypoint: + - docker-entrypoint.sh + - -c + - config_file=/etc/postgresql.conf diff --git a/init-user-db.sh b/init-user-db.sh new file mode 100755 index 00000000..5d30908b --- /dev/null +++ b/init-user-db.sh @@ -0,0 +1,16 @@ +#!/bin/bash +set -e + +psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" "dbname=$POSTGRES_DB replication=database" <<-EOSQL + CREATE EXTENSION IF NOT EXISTS "uuid-ossp"; + + CREATE TABLE entries ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + content VARCHAR(64) NOT NULL + ); + + ALTER TABLE entries REPLICA IDENTITY FULL; + + CREATE PUBLICATION all_tables FOR ALL TABLES; + CREATE_REPLICATION_SLOT all_changes LOGICAL pgoutput NOEXPORT_SNAPSHOT; +EOSQL diff --git a/lib/electric.ex b/lib/electric.ex new file mode 100644 index 00000000..572dd892 --- /dev/null +++ b/lib/electric.ex @@ -0,0 +1,3 @@ +defmodule Electric do + @moduledoc false +end diff --git a/lib/electric/application.ex b/lib/electric/application.ex new file mode 100644 index 00000000..162b08a7 --- /dev/null +++ b/lib/electric/application.ex @@ -0,0 +1,14 @@ +defmodule Electric.Application do + @moduledoc false + + use Application + + def start(_type, _args) do + children = [ + Electric.Replication + ] + + opts = [strategy: :one_for_one, name: Electric.Supervisor] + Supervisor.start_link(children, opts) + end +end diff --git a/lib/electric/replication.ex b/lib/electric/replication.ex new file mode 100644 index 00000000..a0cab40e --- /dev/null +++ b/lib/electric/replication.ex @@ -0,0 +1,68 @@ +defmodule Electric.Replication do + use Broadway + + alias Broadway.Message + alias __MODULE__ + + alias Replication.Config + alias Replication.Changes.Transaction + + def start_link(_opts) do + Broadway.start_link( + Replication, + name: Replication, + producer: [ + module: {Config.producer(), []}, + transformer: {Replication, :transform, []}, + concurrency: 1 + ], + processors: [ + default: [concurrency: 1] + ] + ) + end + + def transform({txn, end_lsn, conn}, _opts) do + %Message{ + data: txn, + acknowledger: {__MODULE__, :ack_id, {conn, end_lsn}} + } + end + + @impl true + def handle_message(_, %Message{data: %Transaction{changes: changes}} = message, _) do + IO.inspect({:message, message}) + + errors = + changes + |> Enum.reduce([], &handle_change/2) + + message = + case errors do + [] -> + message + + reason -> + Message.failed(message, reason) + end + + message + end + + def handle_change(_, acc), do: acc + + def ack(:ack_id, [], []), do: nil + def ack(:ack_id, _, [_head | _tail]), do: throw("XXX ack failure handling not yet implemented") + + def ack(:ack_id, successful, []) do + last_message = + successful + |> Enum.reverse() + |> Enum.at(0) + + %{acknowledger: {_, _, {conn, end_lsn}}} = last_message + IO.inspect({:ack, end_lsn}) + + Replication.Client.acknowledge_lsn(conn, end_lsn) + end +end diff --git a/lib/electric/replication/changes.ex b/lib/electric/replication/changes.ex new file mode 100644 index 00000000..e674fb75 --- /dev/null +++ b/lib/electric/replication/changes.ex @@ -0,0 +1,7 @@ +defmodule Electric.Replication.Changes do + defmodule(Transaction, do: defstruct([:changes, :commit_timestamp])) + defmodule(NewRecord, do: defstruct([:relation, :record])) + defmodule(UpdatedRecord, do: defstruct([:relation, :old_record, :record])) + defmodule(DeletedRecord, do: defstruct([:relation, :old_record])) + defmodule(TruncatedRelation, do: defstruct([:relation])) +end diff --git a/lib/electric/replication/client.ex b/lib/electric/replication/client.ex new file mode 100644 index 00000000..ce68adef --- /dev/null +++ b/lib/electric/replication/client.ex @@ -0,0 +1,37 @@ +defmodule Electric.Replication.Client do + @moduledoc """ + Database replication client. + + Uses `:epgsql` for it's `start_replication` function. Note that epgsql + doesn't support connecting via a unix socket. + """ + + def connect(%{} = config) do + :epgsql.connect(config) + end + + @doc """ + Start consuming logical replication feed using a given `publication` and `slot`. + + The handler can be a pid or a module implementing the `handle_x_log_data` callback. + + Returns `:ok` on success. + """ + def start_replication(conn, publication, slot, handler) do + opts = 'proto_version \'1\', publication_names \'#{publication}\'' + + conn + |> :epgsql.start_replication(slot, handler, [], '0/0', opts) + end + + @doc """ + Confirm successful processing of a WAL segment. + + Returns `:ok` on success. + """ + def acknowledge_lsn(conn, {xlog, offset} = _lsn_tup) do + <> = <> + + :epgsql.standby_status_update(conn, decimal_lsn, decimal_lsn) + end +end diff --git a/lib/electric/replication/config.ex b/lib/electric/replication/config.ex new file mode 100644 index 00000000..5ef4bd9d --- /dev/null +++ b/lib/electric/replication/config.ex @@ -0,0 +1,29 @@ +defmodule Electric.Replication.Config do + @moduledoc """ + Replication config helpers. + """ + + def config do + Application.get_env(:electric, Electric.Replication) + end + + def epgsql do + config() + |> Keyword.get(:epgsql) + end + + def producer do + config() + |> Keyword.get(:producer) + end + + def publication do + config() + |> Keyword.get(:publication) + end + + def slot do + config() + |> Keyword.get(:slot) + end +end diff --git a/lib/electric/replication/producer.ex b/lib/electric/replication/producer.ex new file mode 100644 index 00000000..ea66fc6d --- /dev/null +++ b/lib/electric/replication/producer.ex @@ -0,0 +1,206 @@ +defmodule Electric.Replication.Producer do + use GenStage + + alias PgoutputDecoder.Messages.{ + Begin, + Commit, + Relation, + Insert, + Update, + Delete, + Truncate, + Type + } + + alias Electric.Replication.{ + Client, + Config + } + + alias Electric.Replication.Changes.{ + Transaction, + NewRecord, + UpdatedRecord, + DeletedRecord, + TruncatedRelation + } + + defmodule State do + defstruct conn: nil, + demand: 0, + queue: nil, + relations: %{}, + transaction: nil, + types: %{} + end + + def start_link(opts) do + GenStage.start_link(__MODULE__, opts) + end + + @impl true + def init(_) do + {:ok, conn} = Config.epgsql() |> Client.connect() + + slot = Config.slot() + publication = Config.publication() + + :ok = Client.start_replication(conn, publication, slot, self()) + + {:producer, %State{conn: conn, queue: :queue.new()}} + end + + @impl true + def handle_info({:epgsql, _pid, {:x_log_data, _start_lsn, _end_lsn, binary_msg}}, state) do + binary_msg + |> PgoutputDecoder.decode_message() + |> process_message(state) + end + + @impl true + def handle_info(_msg, state) do + {:noreply, [], state} + end + + defp process_message(%Begin{} = msg, state) do + tx = %Transaction{changes: [], commit_timestamp: msg.commit_timestamp} + + {:noreply, [], %{state | transaction: {msg.final_lsn, tx}}} + end + + defp process_message(%Type{}, state), do: {:noreply, [], state} + + defp process_message(%Relation{} = msg, state) do + {:noreply, [], %{state | relations: Map.put(state.relations, msg.id, msg)}} + end + + defp process_message(%Insert{} = msg, state) do + relation = Map.get(state.relations, msg.relation_id) + + data = data_tuple_to_map(relation.columns, msg.tuple_data) + + new_record = %NewRecord{relation: {relation.namespace, relation.name}, record: data} + + {lsn, txn} = state.transaction + txn = %{txn | changes: Enum.reverse([new_record | txn.changes])} + + {:noreply, [], %{state | transaction: {lsn, txn}}} + end + + defp process_message(%Update{} = msg, state) do + relation = Map.get(state.relations, msg.relation_id) + + old_data = data_tuple_to_map(relation.columns, msg.old_tuple_data) + data = data_tuple_to_map(relation.columns, msg.tuple_data) + + updated_record = %UpdatedRecord{ + relation: {relation.namespace, relation.name}, + old_record: old_data, + record: data + } + + {lsn, txn} = state.transaction + txn = %{txn | changes: Enum.reverse([updated_record | txn.changes])} + + {:noreply, [], %{state | transaction: {lsn, txn}}} + end + + defp process_message(%Delete{} = msg, state) do + relation = Map.get(state.relations, msg.relation_id) + + data = + data_tuple_to_map( + relation.columns, + msg.old_tuple_data || msg.changed_key_tuple_data + ) + + deleted_record = %DeletedRecord{ + relation: {relation.namespace, relation.name}, + old_record: data + } + + {lsn, txn} = state.transaction + txn = %{txn | changes: Enum.reverse([deleted_record | txn.changes])} + + {:noreply, [], %{state | transaction: {lsn, txn}}} + end + + defp process_message(%Truncate{} = msg, state) do + truncated_relations = + for truncated_relation <- msg.truncated_relations do + relation = Map.get(state.relations, truncated_relation) + + %TruncatedRelation{ + relation: {relation.namespace, relation.name} + } + end + + {lsn, txn} = state.transaction + txn = %{txn | changes: Enum.reverse(truncated_relations ++ txn.changes)} + + {:noreply, [], %{state | transaction: {lsn, txn}}} + end + + # When we have a new event, enqueue it and see if there's any + # pending demand we can meet by dispatching events. + defp process_message( + %Commit{lsn: commit_lsn, end_lsn: end_lsn}, + %State{transaction: {current_txn_lsn, txn}, conn: conn, queue: queue} = state + ) + when commit_lsn == current_txn_lsn do + event = {txn, end_lsn, conn} + + queue = :queue.in(event, queue) + state = %{state | queue: queue, transaction: nil} + + dispatch_events(state, []) + end + + # When we have new demand, add it to any pending demand and see if we can + # meet it by dispatching events. + @impl true + def handle_demand(incoming_demand, %{demand: pending_demand} = state) do + state = %{state | demand: incoming_demand + pending_demand} + + dispatch_events(state, []) + end + + # When we're done exhausting demand, emit events. + defp dispatch_events(%{demand: 0} = state, events) do + emit_events(state, events) + end + + defp dispatch_events(%{demand: demand, queue: queue} = state, events) do + case :queue.out(queue) do + # If the queue has events, recurse to accumulate them + # as long as there is demand. + {{:value, event}, queue} -> + state = %{state | demand: demand - 1, queue: queue} + + dispatch_events(state, [event | events]) + + # When the queue is empty, emit any accumulated events. + {:empty, queue} -> + state = %{state | queue: queue} + + emit_events(state, events) + end + end + + defp emit_events(state, []) do + {:noreply, [], state} + end + + defp emit_events(state, events) do + {:noreply, Enum.reverse(events), state} + end + + # TODO: Typecast to meaningful Elixir types here later + defp data_tuple_to_map(_columns, nil), do: %{} + + defp data_tuple_to_map(columns, tuple_data) do + for {column, index} <- Enum.with_index(columns, 1), + do: {column.name, :erlang.element(index, tuple_data)}, + into: %{} + end +end diff --git a/mix.exs b/mix.exs new file mode 100644 index 00000000..71ae2df4 --- /dev/null +++ b/mix.exs @@ -0,0 +1,34 @@ +defmodule Electric.MixProject do + use Mix.Project + + def project do + [ + app: :electric, + version: "0.1.0", + elixir: "~> 1.13", + elixirc_paths: elixirc_paths(Mix.env()), + start_permanent: Mix.env() == :prod, + deps: deps() + ] + end + + # Run "mix help compile.app" to learn about applications. + def application do + [ + mod: {Electric.Application, []}, + extra_applications: [:logger] + ] + end + + # Run "mix help deps" to learn about dependencies. + defp deps do + [ + {:broadway, "~> 0.6"}, + {:epgsql, "~> 4.2"}, + {:pgoutput_decoder, "~> 0.1.0"} + ] + end + + # Specifies which paths to compile per environment. + defp elixirc_paths(_), do: ["lib"] +end diff --git a/mix.lock b/mix.lock new file mode 100644 index 00000000..6a204125 --- /dev/null +++ b/mix.lock @@ -0,0 +1,7 @@ +%{ + "broadway": {:hex, :broadway, "0.6.2", "ef8e0d257420c72f0e600958cf95556835d9921ad14be333493083226458791a", [:mix], [{:gen_stage, "~> 1.0", [hex: :gen_stage, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "f4f93704304a736c984cd6ed884f697415f68eb50906f4dc5d641926366ad8fa"}, + "epgsql": {:hex, :epgsql, "4.6.0", "a403534d03811c867e79405bb18d6061a5fcef92acf0236a9150ac93748ce143", [:rebar3], [], "hexpm", "6be4910a02551f21eab6f6f27d97b0d4b1dffc617f1a64c8c95fdd45652fa9e1"}, + "gen_stage": {:hex, :gen_stage, "1.1.2", "b1656cd4ba431ed02c5656fe10cb5423820847113a07218da68eae5d6a260c23", [:mix], [], "hexpm", "9e39af23140f704e2b07a3e29d8f05fd21c2aaf4088ff43cb82be4b9e3148d02"}, + "pgoutput_decoder": {:hex, :pgoutput_decoder, "0.1.0", "d4ffae6e58a563f2e6de8a0495d9f9afbe2f4ac75d6805419cd4a0d05f414c00", [:mix], [], "hexpm", "4dbecbe4eb8de728178fd129ccba810bccafa9a8769c6714c8b7b22963081c27"}, + "telemetry": {:hex, :telemetry, "0.4.3", "a06428a514bdbc63293cd9a6263aad00ddeb66f608163bdec7c8995784080818", [:rebar3], [], "hexpm", "eb72b8365ffda5bed68a620d1da88525e326cb82a75ee61354fc24b844768041"}, +} diff --git a/postgres.conf b/postgres.conf new file mode 100644 index 00000000..8bb4a269 --- /dev/null +++ b/postgres.conf @@ -0,0 +1,798 @@ +# ----------------------------- +# PostgreSQL configuration file +# ----------------------------- +# +# This file consists of lines of the form: +# +# name = value +# +# (The "=" is optional.) Whitespace may be used. Comments are introduced with +# "#" anywhere on a line. The complete list of parameter names and allowed +# values can be found in the PostgreSQL documentation. +# +# The commented-out settings shown in this file represent the default values. +# Re-commenting a setting is NOT sufficient to revert it to the default value; +# you need to reload the server. +# +# This file is read on server startup and when the server receives a SIGHUP +# signal. If you edit the file on a running system, you have to SIGHUP the +# server for the changes to take effect, run "pg_ctl reload", or execute +# "SELECT pg_reload_conf()". Some parameters, which are marked below, +# require a server shutdown and restart to take effect. +# +# Any parameter can also be given as a command-line option to the server, e.g., +# "postgres -c log_connections=on". Some parameters can be changed at run time +# with the "SET" SQL command. +# +# Memory units: B = bytes Time units: us = microseconds +# kB = kilobytes ms = milliseconds +# MB = megabytes s = seconds +# GB = gigabytes min = minutes +# TB = terabytes h = hours +# d = days + + +#------------------------------------------------------------------------------ +# FILE LOCATIONS +#------------------------------------------------------------------------------ + +# The default values of these variables are driven from the -D command-line +# option or PGDATA environment variable, represented here as ConfigDir. + +#data_directory = 'ConfigDir' # use data in another directory + # (change requires restart) +#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file + # (change requires restart) +#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file + # (change requires restart) + +# If external_pid_file is not explicitly set, no extra PID file is written. +#external_pid_file = '' # write an extra PID file + # (change requires restart) + + +#------------------------------------------------------------------------------ +# CONNECTIONS AND AUTHENTICATION +#------------------------------------------------------------------------------ + +# - Connection Settings - + +listen_addresses = '*' + # comma-separated list of addresses; + # defaults to 'localhost'; use '*' for all + # (change requires restart) +#port = 5432 # (change requires restart) +#max_connections = 100 # (change requires restart) +#superuser_reserved_connections = 3 # (change requires restart) +#unix_socket_directories = '/tmp' # comma-separated list of directories + # (change requires restart) +#unix_socket_group = '' # (change requires restart) +#unix_socket_permissions = 0777 # begin with 0 to use octal notation + # (change requires restart) +#bonjour = off # advertise server via Bonjour + # (change requires restart) +#bonjour_name = '' # defaults to the computer name + # (change requires restart) + +# - TCP settings - +# see "man tcp" for details + +#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; + # 0 selects the system default +#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; + # 0 selects the system default +#tcp_keepalives_count = 0 # TCP_KEEPCNT; + # 0 selects the system default +#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds; + # 0 selects the system default + +#client_connection_check_interval = 0 # time between checks for client + # disconnection while running queries; + # 0 for never + +# - Authentication - + +#authentication_timeout = 1min # 1s-600s +#password_encryption = scram-sha-256 # scram-sha-256 or md5 +#db_user_namespace = off + +# GSSAPI using Kerberos +#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab' +#krb_caseins_users = off + +# - SSL - + +#ssl = off +#ssl_ca_file = '' +#ssl_cert_file = 'server.crt' +#ssl_crl_file = '' +#ssl_crl_dir = '' +#ssl_key_file = 'server.key' +#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers +#ssl_prefer_server_ciphers = on +#ssl_ecdh_curve = 'prime256v1' +#ssl_min_protocol_version = 'TLSv1.2' +#ssl_max_protocol_version = '' +#ssl_dh_params_file = '' +#ssl_passphrase_command = '' +#ssl_passphrase_command_supports_reload = off + + +#------------------------------------------------------------------------------ +# RESOURCE USAGE (except WAL) +#------------------------------------------------------------------------------ + +# - Memory - + +#shared_buffers = 32MB # min 128kB + # (change requires restart) +#huge_pages = try # on, off, or try + # (change requires restart) +#huge_page_size = 0 # zero for system default + # (change requires restart) +#temp_buffers = 8MB # min 800kB +#max_prepared_transactions = 0 # zero disables the feature + # (change requires restart) +# Caution: it is not advisable to set max_prepared_transactions nonzero unless +# you actively intend to use prepared transactions. +#work_mem = 4MB # min 64kB +#hash_mem_multiplier = 1.0 # 1-1000.0 multiplier on hash table work_mem +#maintenance_work_mem = 64MB # min 1MB +#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem +#logical_decoding_work_mem = 64MB # min 64kB +#max_stack_depth = 2MB # min 100kB +#shared_memory_type = mmap # the default is the first option + # supported by the operating system: + # mmap + # sysv + # windows + # (change requires restart) +#dynamic_shared_memory_type = posix # the default is the first option + # supported by the operating system: + # posix + # sysv + # windows + # mmap + # (change requires restart) +#min_dynamic_shared_memory = 0MB # (change requires restart) + +# - Disk - + +#temp_file_limit = -1 # limits per-process temp file space + # in kilobytes, or -1 for no limit + +# - Kernel Resources - + +#max_files_per_process = 1000 # min 64 + # (change requires restart) + +# - Cost-Based Vacuum Delay - + +#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables) +#vacuum_cost_page_hit = 1 # 0-10000 credits +#vacuum_cost_page_miss = 2 # 0-10000 credits +#vacuum_cost_page_dirty = 20 # 0-10000 credits +#vacuum_cost_limit = 200 # 1-10000 credits + +# - Background Writer - + +#bgwriter_delay = 200ms # 10-10000ms between rounds +#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables +#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round +#bgwriter_flush_after = 0 # measured in pages, 0 disables + +# - Asynchronous Behavior - + +#backend_flush_after = 0 # measured in pages, 0 disables +#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching +#maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching +#max_worker_processes = 8 # (change requires restart) +#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers +#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers +#max_parallel_workers = 8 # maximum number of max_worker_processes that + # can be used in parallel operations +#parallel_leader_participation = on +#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate + # (change requires restart) + + +#------------------------------------------------------------------------------ +# WRITE-AHEAD LOG +#------------------------------------------------------------------------------ + +# - Settings - + +wal_level = logical # minimal, replica, or logical + # (change requires restart) +#fsync = on # flush data to disk for crash safety + # (turning this off can cause + # unrecoverable data corruption) +#synchronous_commit = on # synchronization level; + # off, local, remote_write, remote_apply, or on +#wal_sync_method = fsync # the default is the first option + # supported by the operating system: + # open_datasync + # fdatasync (default on Linux and FreeBSD) + # fsync + # fsync_writethrough + # open_sync +#full_page_writes = on # recover from partial page writes +#wal_log_hints = off # also do full page writes of non-critical updates + # (change requires restart) +#wal_compression = off # enable compression of full-page writes +#wal_init_zero = on # zero-fill new WAL files +#wal_recycle = on # recycle WAL files +#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers + # (change requires restart) +#wal_writer_delay = 200ms # 1-10000 milliseconds +#wal_writer_flush_after = 1MB # measured in pages, 0 disables +#wal_skip_threshold = 2MB + +#commit_delay = 0 # range 0-100000, in microseconds +#commit_siblings = 5 # range 1-1000 + +# - Checkpoints - + +#checkpoint_timeout = 5min # range 30s-1d +#checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0 +#checkpoint_flush_after = 0 # measured in pages, 0 disables +#checkpoint_warning = 30s # 0 disables +#max_wal_size = 1GB +#min_wal_size = 80MB + +# - Archiving - + +#archive_mode = off # enables archiving; off, on, or always + # (change requires restart) +#archive_command = '' # command to use to archive a logfile segment + # placeholders: %p = path of file to archive + # %f = file name only + # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' +#archive_timeout = 0 # force a logfile segment switch after this + # number of seconds; 0 disables + +# - Archive Recovery - + +# These are only used in recovery mode. + +#restore_command = '' # command to use to restore an archived logfile segment + # placeholders: %p = path of file to restore + # %f = file name only + # e.g. 'cp /mnt/server/archivedir/%f %p' +#archive_cleanup_command = '' # command to execute at every restartpoint +#recovery_end_command = '' # command to execute at completion of recovery + +# - Recovery Target - + +# Set these only when performing a targeted recovery. + +#recovery_target = '' # 'immediate' to end recovery as soon as a + # consistent state is reached + # (change requires restart) +#recovery_target_name = '' # the named restore point to which recovery will proceed + # (change requires restart) +#recovery_target_time = '' # the time stamp up to which recovery will proceed + # (change requires restart) +#recovery_target_xid = '' # the transaction ID up to which recovery will proceed + # (change requires restart) +#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed + # (change requires restart) +#recovery_target_inclusive = on # Specifies whether to stop: + # just after the specified recovery target (on) + # just before the recovery target (off) + # (change requires restart) +#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID + # (change requires restart) +#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown' + # (change requires restart) + + +#------------------------------------------------------------------------------ +# REPLICATION +#------------------------------------------------------------------------------ + +# - Sending Servers - + +# Set these on the primary and on any standby that will send replication data. + +#max_wal_senders = 10 # max number of walsender processes + # (change requires restart) +#max_replication_slots = 10 # max number of replication slots + # (change requires restart) +#wal_keep_size = 0 # in megabytes; 0 disables +#max_slot_wal_keep_size = -1 # in megabytes; -1 disables +#wal_sender_timeout = 60s # in milliseconds; 0 disables +#track_commit_timestamp = off # collect timestamp of transaction commit + # (change requires restart) + +# - Primary Server - + +# These settings are ignored on a standby server. + +#synchronous_standby_names = '' # standby servers that provide sync rep + # method to choose sync standbys, number of sync standbys, + # and comma-separated list of application_name + # from standby(s); '*' = all +#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed + +# - Standby Servers - + +# These settings are ignored on a primary server. + +#primary_conninfo = '' # connection string to sending server +#primary_slot_name = '' # replication slot on sending server +#promote_trigger_file = '' # file name whose presence ends recovery +#hot_standby = on # "off" disallows queries during recovery + # (change requires restart) +#max_standby_archive_delay = 30s # max delay before canceling queries + # when reading WAL from archive; + # -1 allows indefinite delay +#max_standby_streaming_delay = 30s # max delay before canceling queries + # when reading streaming WAL; + # -1 allows indefinite delay +#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name + # is not set +#wal_receiver_status_interval = 10s # send replies at least this often + # 0 disables +#hot_standby_feedback = off # send info from standby to prevent + # query conflicts +#wal_receiver_timeout = 60s # time that receiver waits for + # communication from primary + # in milliseconds; 0 disables +#wal_retrieve_retry_interval = 5s # time to wait before retrying to + # retrieve WAL after a failed attempt +#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery + +# - Subscribers - + +# These settings are ignored on a publisher. + +#max_logical_replication_workers = 4 # taken from max_worker_processes + # (change requires restart) +#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers + + +#------------------------------------------------------------------------------ +# QUERY TUNING +#------------------------------------------------------------------------------ + +# - Planner Method Configuration - + +#enable_async_append = on +#enable_bitmapscan = on +#enable_gathermerge = on +#enable_hashagg = on +#enable_hashjoin = on +#enable_incremental_sort = on +#enable_indexscan = on +#enable_indexonlyscan = on +#enable_material = on +#enable_memoize = on +#enable_mergejoin = on +#enable_nestloop = on +#enable_parallel_append = on +#enable_parallel_hash = on +#enable_partition_pruning = on +#enable_partitionwise_join = off +#enable_partitionwise_aggregate = off +#enable_seqscan = on +#enable_sort = on +#enable_tidscan = on + +# - Planner Cost Constants - + +#seq_page_cost = 1.0 # measured on an arbitrary scale +#random_page_cost = 4.0 # same scale as above +#cpu_tuple_cost = 0.01 # same scale as above +#cpu_index_tuple_cost = 0.005 # same scale as above +#cpu_operator_cost = 0.0025 # same scale as above +#parallel_setup_cost = 1000.0 # same scale as above +#parallel_tuple_cost = 0.1 # same scale as above +#min_parallel_table_scan_size = 8MB +#min_parallel_index_scan_size = 512kB +#effective_cache_size = 4GB + +#jit_above_cost = 100000 # perform JIT compilation if available + # and query more expensive than this; + # -1 disables +#jit_inline_above_cost = 500000 # inline small functions if query is + # more expensive than this; -1 disables +#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if + # query is more expensive than this; + # -1 disables + +# - Genetic Query Optimizer - + +#geqo = on +#geqo_threshold = 12 +#geqo_effort = 5 # range 1-10 +#geqo_pool_size = 0 # selects default based on effort +#geqo_generations = 0 # selects default based on effort +#geqo_selection_bias = 2.0 # range 1.5-2.0 +#geqo_seed = 0.0 # range 0.0-1.0 + +# - Other Planner Options - + +#default_statistics_target = 100 # range 1-10000 +#constraint_exclusion = partition # on, off, or partition +#cursor_tuple_fraction = 0.1 # range 0.0-1.0 +#from_collapse_limit = 8 +#jit = on # allow JIT compilation +#join_collapse_limit = 8 # 1 disables collapsing of explicit + # JOIN clauses +#plan_cache_mode = auto # auto, force_generic_plan or + # force_custom_plan + + +#------------------------------------------------------------------------------ +# REPORTING AND LOGGING +#------------------------------------------------------------------------------ + +# - Where to Log - + +#log_destination = 'stderr' # Valid values are combinations of + # stderr, csvlog, syslog, and eventlog, + # depending on platform. csvlog + # requires logging_collector to be on. + +# This is used when logging to stderr: +#logging_collector = off # Enable capturing of stderr and csvlog + # into log files. Required to be on for + # csvlogs. + # (change requires restart) + +# These are only used if logging_collector is on: +#log_directory = 'log' # directory where log files are written, + # can be absolute or relative to PGDATA +#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern, + # can include strftime() escapes +#log_file_mode = 0600 # creation mode for log files, + # begin with 0 to use octal notation +#log_rotation_age = 1d # Automatic rotation of logfiles will + # happen after that time. 0 disables. +#log_rotation_size = 10MB # Automatic rotation of logfiles will + # happen after that much log output. + # 0 disables. +#log_truncate_on_rotation = off # If on, an existing log file with the + # same name as the new log file will be + # truncated rather than appended to. + # But such truncation only occurs on + # time-driven rotation, not on restarts + # or size-driven rotation. Default is + # off, meaning append to existing files + # in all cases. + +# These are relevant when logging to syslog: +#syslog_facility = 'LOCAL0' +#syslog_ident = 'postgres' +#syslog_sequence_numbers = on +#syslog_split_messages = on + +# This is only relevant when logging to eventlog (Windows): +# (change requires restart) +#event_source = 'PostgreSQL' + +# - When to Log - + +#log_min_messages = warning # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic + +#log_min_error_statement = error # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic (effectively off) + +#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements + # and their durations, > 0 logs only + # statements running at least this number + # of milliseconds + +#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements + # and their durations, > 0 logs only a sample of + # statements running at least this number + # of milliseconds; + # sample fraction is determined by log_statement_sample_rate + +#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding + # log_min_duration_sample to be logged; + # 1.0 logs all such statements, 0.0 never logs + + +#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements + # are logged regardless of their duration; 1.0 logs all + # statements from all transactions, 0.0 never logs + +# - What to Log - + +#debug_print_parse = off +#debug_print_rewritten = off +#debug_print_plan = off +#debug_pretty_print = on +#log_autovacuum_min_duration = -1 # log autovacuum activity; + # -1 disables, 0 logs all actions and + # their durations, > 0 logs only + # actions running at least this number + # of milliseconds. +#log_checkpoints = off +#log_connections = off +#log_disconnections = off +#log_duration = off +#log_error_verbosity = default # terse, default, or verbose messages +#log_hostname = off +#log_line_prefix = '%m [%p] ' # special values: + # %a = application name + # %u = user name + # %d = database name + # %r = remote host and port + # %h = remote host + # %b = backend type + # %p = process ID + # %P = process ID of parallel group leader + # %t = timestamp without milliseconds + # %m = timestamp with milliseconds + # %n = timestamp with milliseconds (as a Unix epoch) + # %Q = query ID (0 if none or not computed) + # %i = command tag + # %e = SQL state + # %c = session ID + # %l = session line number + # %s = session start timestamp + # %v = virtual transaction ID + # %x = transaction ID (0 if none) + # %q = stop here in non-session + # processes + # %% = '%' + # e.g. '<%u%%%d> ' +#log_lock_waits = off # log lock waits >= deadlock_timeout +#log_recovery_conflict_waits = off # log standby recovery conflict waits + # >= deadlock_timeout +#log_parameter_max_length = -1 # when logging statements, limit logged + # bind-parameter values to N bytes; + # -1 means print in full, 0 disables +#log_parameter_max_length_on_error = 0 # when logging an error, limit logged + # bind-parameter values to N bytes; + # -1 means print in full, 0 disables +#log_statement = 'none' # none, ddl, mod, all +#log_replication_commands = off +#log_temp_files = -1 # log temporary files equal or larger + # than the specified size in kilobytes; + # -1 disables, 0 logs all temp files +#log_timezone = 'GMT' + + +#------------------------------------------------------------------------------ +# PROCESS TITLE +#------------------------------------------------------------------------------ + +#cluster_name = '' # added to process titles if nonempty + # (change requires restart) +#update_process_title = on + + +#------------------------------------------------------------------------------ +# STATISTICS +#------------------------------------------------------------------------------ + +# - Query and Index Statistics Collector - + +#track_activities = on +#track_activity_query_size = 1024 # (change requires restart) +#track_counts = on +#track_io_timing = off +#track_wal_io_timing = off +#track_functions = none # none, pl, all +#stats_temp_directory = 'pg_stat_tmp' + + +# - Monitoring - + +#compute_query_id = auto +#log_statement_stats = off +#log_parser_stats = off +#log_planner_stats = off +#log_executor_stats = off + + +#------------------------------------------------------------------------------ +# AUTOVACUUM +#------------------------------------------------------------------------------ + +#autovacuum = on # Enable autovacuum subprocess? 'on' + # requires track_counts to also be on. +#autovacuum_max_workers = 3 # max number of autovacuum subprocesses + # (change requires restart) +#autovacuum_naptime = 1min # time between autovacuum runs +#autovacuum_vacuum_threshold = 50 # min number of row updates before + # vacuum +#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts + # before vacuum; -1 disables insert + # vacuums +#autovacuum_analyze_threshold = 50 # min number of row updates before + # analyze +#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum +#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table + # size before insert vacuum +#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze +#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum + # (change requires restart) +#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age + # before forced vacuum + # (change requires restart) +#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for + # autovacuum, in milliseconds; + # -1 means use vacuum_cost_delay +#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for + # autovacuum, -1 means use + # vacuum_cost_limit + + +#------------------------------------------------------------------------------ +# CLIENT CONNECTION DEFAULTS +#------------------------------------------------------------------------------ + +# - Statement Behavior - + +#client_min_messages = notice # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # log + # notice + # warning + # error +#search_path = '"$user", public' # schema names +#row_security = on +#default_table_access_method = 'heap' +#default_tablespace = '' # a tablespace name, '' uses the default +#default_toast_compression = 'pglz' # 'pglz' or 'lz4' +#temp_tablespaces = '' # a list of tablespace names, '' uses + # only default tablespace +#check_function_bodies = on +#default_transaction_isolation = 'read committed' +#default_transaction_read_only = off +#default_transaction_deferrable = off +#session_replication_role = 'origin' +#statement_timeout = 0 # in milliseconds, 0 is disabled +#lock_timeout = 0 # in milliseconds, 0 is disabled +#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled +#idle_session_timeout = 0 # in milliseconds, 0 is disabled +#vacuum_freeze_table_age = 150000000 +#vacuum_freeze_min_age = 50000000 +#vacuum_failsafe_age = 1600000000 +#vacuum_multixact_freeze_table_age = 150000000 +#vacuum_multixact_freeze_min_age = 5000000 +#vacuum_multixact_failsafe_age = 1600000000 +#bytea_output = 'hex' # hex, escape +#xmlbinary = 'base64' +#xmloption = 'content' +#gin_pending_list_limit = 4MB + +# - Locale and Formatting - + +#datestyle = 'iso, mdy' +#intervalstyle = 'postgres' +#timezone = 'GMT' +#timezone_abbreviations = 'Default' # Select the set of available time zone + # abbreviations. Currently, there are + # Default + # Australia (historical usage) + # India + # You can create your own file in + # share/timezonesets/. +#extra_float_digits = 1 # min -15, max 3; any value >0 actually + # selects precise output mode +#client_encoding = sql_ascii # actually, defaults to database + # encoding + +# These settings are initialized by initdb, but they can be changed. +#lc_messages = 'C' # locale for system error message + # strings +#lc_monetary = 'C' # locale for monetary formatting +#lc_numeric = 'C' # locale for number formatting +#lc_time = 'C' # locale for time formatting + +# default configuration for text search +#default_text_search_config = 'pg_catalog.simple' + +# - Shared Library Preloading - + +#local_preload_libraries = '' +#session_preload_libraries = '' +#shared_preload_libraries = '' # (change requires restart) +#jit_provider = 'llvmjit' # JIT library to use + +# - Other Defaults - + +#dynamic_library_path = '$libdir' +#extension_destdir = '' # prepend path when loading extensions + # and shared objects (added by Debian) +#gin_fuzzy_search_limit = 0 + + +#------------------------------------------------------------------------------ +# LOCK MANAGEMENT +#------------------------------------------------------------------------------ + +#deadlock_timeout = 1s +#max_locks_per_transaction = 64 # min 10 + # (change requires restart) +#max_pred_locks_per_transaction = 64 # min 10 + # (change requires restart) +#max_pred_locks_per_relation = -2 # negative values mean + # (max_pred_locks_per_transaction + # / -max_pred_locks_per_relation) - 1 +#max_pred_locks_per_page = 2 # min 0 + + +#------------------------------------------------------------------------------ +# VERSION AND PLATFORM COMPATIBILITY +#------------------------------------------------------------------------------ + +# - Previous PostgreSQL Versions - + +#array_nulls = on +#backslash_quote = safe_encoding # on, off, or safe_encoding +#escape_string_warning = on +#lo_compat_privileges = off +#quote_all_identifiers = off +#standard_conforming_strings = on +#synchronize_seqscans = on + +# - Other Platforms and Clients - + +#transform_null_equals = off + + +#------------------------------------------------------------------------------ +# ERROR HANDLING +#------------------------------------------------------------------------------ + +#exit_on_error = off # terminate session on any error? +#restart_after_crash = on # reinitialize after backend crash? +#data_sync_retry = off # retry or panic on failure to fsync + # data? + # (change requires restart) +#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+) + + +#------------------------------------------------------------------------------ +# CONFIG FILE INCLUDES +#------------------------------------------------------------------------------ + +# These options allow settings to be loaded from files other than the +# default postgresql.conf. Note that these are directives, not variable +# assignments, so they can usefully be given more than once. + +#include_dir = '...' # include files ending in '.conf' from + # a directory, e.g., 'conf.d' +#include_if_exists = '...' # include file only if it exists +#include = '...' # include file + + +#------------------------------------------------------------------------------ +# CUSTOMIZED OPTIONS +#------------------------------------------------------------------------------ + +# Add settings for extensions here diff --git a/test/electric_test.exs b/test/electric_test.exs new file mode 100644 index 00000000..69439ea7 --- /dev/null +++ b/test/electric_test.exs @@ -0,0 +1,8 @@ +defmodule ElectricTest do + use ExUnit.Case + doctest Electric + + test "greets the world" do + assert Electric.hello() == :world + end +end diff --git a/test/test_helper.exs b/test/test_helper.exs new file mode 100644 index 00000000..869559e7 --- /dev/null +++ b/test/test_helper.exs @@ -0,0 +1 @@ +ExUnit.start()