diff --git a/Makefile b/Makefile index 183442150d..7e5b5f7078 100644 --- a/Makefile +++ b/Makefile @@ -87,7 +87,7 @@ endif # ci here doesn't refer to continuous integration, but to cabal-run-integration.sh # Usage: make ci - build & run all tests, excluding integration # make ci package=all - build & run all tests, including integration -# make ci package=brig - build brig & run "brig-integration" +# make ci package=brig - build brig & run "brig-integration" # make ci package=integration - build & run "integration" # # You can pass environment variables to all the suites, like so @@ -128,7 +128,8 @@ sanitize-pr: list-flaky-tests: @echo -e "\n\nif you want to run these, set RUN_FLAKY_TESTS=1\n\n" - @git grep -Hn '\bflakyTestCase \"' + @git grep -Hne '\bflakyTestCase \"' + @git grep -Hne '[^^]\bflakyTest\b' .PHONY: cabal-fmt cabal-fmt: diff --git a/cassandra-schema.cql b/cassandra-schema.cql index 7e10bed09e..8ca8c9b35e 100644 --- a/cassandra-schema.cql +++ b/cassandra-schema.cql @@ -1,26 +1,24 @@ -- automatically generated with `make git-add-cassandra-schema` -CREATE KEYSPACE galley_test WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} AND durable_writes = true; +CREATE KEYSPACE brig_test WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} AND durable_writes = true; -CREATE TYPE galley_test.permissions ( - self bigint, - copy bigint +CREATE TYPE brig_test.asset ( + typ int, + key text, + size int ); -CREATE TYPE galley_test.pubkey ( +CREATE TYPE brig_test.pubkey ( typ int, size int, pem blob ); -CREATE TABLE galley_test.meta ( - id int, - version int, - date timestamp, - descr text, - PRIMARY KEY (id, version) -) WITH CLUSTERING ORDER BY (version ASC) - AND bloom_filter_fp_chance = 0.01 +CREATE TABLE brig_test.team_invitation_info ( + code ascii PRIMARY KEY, + id uuid, + team uuid +) WITH bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} @@ -35,12 +33,10 @@ CREATE TABLE galley_test.meta ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE galley_test.team_conv ( - team uuid, - conv uuid, - PRIMARY KEY (team, conv) -) WITH CLUSTERING ORDER BY (conv ASC) - AND bloom_filter_fp_chance = 0.1 +CREATE TABLE brig_test.provider_keys ( + key text PRIMARY KEY, + provider uuid +) WITH bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} @@ -55,19 +51,20 @@ CREATE TABLE galley_test.team_conv ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE galley_test.user_team ( - user uuid, - team uuid, - PRIMARY KEY (user, team) -) WITH CLUSTERING ORDER BY (team ASC) - AND bloom_filter_fp_chance = 0.1 +CREATE TABLE brig_test.oauth_refresh_token ( + id uuid PRIMARY KEY, + client uuid, + created_at timestamp, + scope set, + user uuid +) WITH bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 + AND default_time_to_live = 14515200 AND gc_grace_seconds = 864000 AND max_index_interval = 2048 AND memtable_flush_period_in_ms = 0 @@ -75,15 +72,13 @@ CREATE TABLE galley_test.user_team ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE galley_test.service ( - provider uuid, - id uuid, - auth_token ascii, - base_url blob, - enabled boolean, - fingerprints set, - PRIMARY KEY (provider, id) -) WITH CLUSTERING ORDER BY (id ASC) +CREATE TABLE brig_test.team_invitation_email ( + email text, + team uuid, + code ascii, + invitation uuid, + PRIMARY KEY (email, team) +) WITH CLUSTERING ORDER BY (team ASC) AND bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' @@ -99,14 +94,10 @@ CREATE TABLE galley_test.service ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE galley_test.data_migration ( - id int, - version int, - date timestamp, - descr text, - PRIMARY KEY (id, version) -) WITH CLUSTERING ORDER BY (version ASC) - AND bloom_filter_fp_chance = 0.01 +CREATE TABLE brig_test.rich_info ( + user uuid PRIMARY KEY, + json blob +) WITH bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} @@ -121,40 +112,10 @@ CREATE TABLE galley_test.data_migration ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE galley_test.team_features ( - team_id uuid PRIMARY KEY, - app_lock_enforce int, - app_lock_inactivity_timeout_secs int, - app_lock_status int, - conference_calling int, - digital_signatures int, - expose_invitation_urls_to_team_admin int, - file_sharing int, - file_sharing_lock_status int, - guest_links_lock_status int, - guest_links_status int, - legalhold_status int, - mls_allowed_ciphersuites set, - mls_default_ciphersuite int, - mls_default_protocol int, - mls_e2eid_acme_discovery_url blob, - mls_e2eid_grace_period int, - mls_e2eid_lock_status int, - mls_e2eid_status int, - mls_e2eid_ver_exp timestamp, - mls_protocol_toggle_users set, - mls_status int, - outlook_cal_integration_lock_status int, - outlook_cal_integration_status int, - search_visibility_inbound_status int, - search_visibility_status int, - self_deleting_messages_lock_status int, - self_deleting_messages_status int, - self_deleting_messages_ttl int, - snd_factor_password_challenge_lock_status int, - snd_factor_password_challenge_status int, - sso_status int, - validate_saml_emails int +CREATE TABLE brig_test.user_keys_hash ( + key blob PRIMARY KEY, + key_type int, + user uuid ) WITH bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' @@ -170,22 +131,14 @@ CREATE TABLE galley_test.team_features ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE galley_test.member ( - conv uuid, - user uuid, - conversation_role text, - hidden boolean, - hidden_ref text, - otr_archived boolean, - otr_archived_ref text, - otr_muted boolean, - otr_muted_ref text, - otr_muted_status int, - provider uuid, +CREATE TABLE brig_test.service_tag ( + bucket int, + tag bigint, + name text, service uuid, - status int, - PRIMARY KEY (conv, user) -) WITH CLUSTERING ORDER BY (user ASC) + provider uuid, + PRIMARY KEY ((bucket, tag), name, service) +) WITH CLUSTERING ORDER BY (name ASC, service ASC) AND bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' @@ -201,11 +154,14 @@ CREATE TABLE galley_test.member ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE galley_test.custom_backend ( - domain text PRIMARY KEY, - config_json_url blob, - webapp_welcome_url blob -) WITH bloom_filter_fp_chance = 0.01 +CREATE TABLE brig_test.meta ( + id int, + version int, + date timestamp, + descr text, + PRIMARY KEY (id, version) +) WITH CLUSTERING ORDER BY (version ASC) + AND bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} @@ -220,19 +176,10 @@ CREATE TABLE galley_test.custom_backend ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE galley_test.user_remote_conv ( - user uuid, - conv_remote_domain text, - conv_remote_id uuid, - hidden boolean, - hidden_ref text, - otr_archived boolean, - otr_archived_ref text, - otr_muted_ref text, - otr_muted_status int, - PRIMARY KEY (user, conv_remote_domain, conv_remote_id) -) WITH CLUSTERING ORDER BY (conv_remote_domain ASC, conv_remote_id ASC) - AND bloom_filter_fp_chance = 0.1 +CREATE TABLE brig_test.unique_claims ( + value text PRIMARY KEY, + claims set +) WITH bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} @@ -240,19 +187,27 @@ CREATE TABLE galley_test.user_remote_conv ( AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 AND default_time_to_live = 0 - AND gc_grace_seconds = 864000 + AND gc_grace_seconds = 0 AND max_index_interval = 2048 AND memtable_flush_period_in_ms = 0 AND min_index_interval = 128 AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE galley_test.legalhold_whitelisted ( - team uuid PRIMARY KEY -) WITH bloom_filter_fp_chance = 0.01 +CREATE TABLE brig_test.user_cookies ( + user uuid, + expires timestamp, + id bigint, + created timestamp, + label text, + succ_id bigint, + type int, + PRIMARY KEY (user, expires, id) +) WITH CLUSTERING ORDER BY (expires ASC, id ASC) + AND bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 @@ -264,13 +219,13 @@ CREATE TABLE galley_test.legalhold_whitelisted ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE galley_test.member_remote_user ( - conv uuid, - user_remote_domain text, - user_remote_id uuid, - conversation_role text, - PRIMARY KEY (conv, user_remote_domain, user_remote_id) -) WITH CLUSTERING ORDER BY (user_remote_domain ASC, user_remote_id ASC) +CREATE TABLE brig_test.mls_key_packages ( + user uuid, + client text, + ref blob, + data blob, + PRIMARY KEY ((user, client), ref) +) WITH CLUSTERING ORDER BY (ref ASC) AND bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' @@ -286,16 +241,14 @@ CREATE TABLE galley_test.member_remote_user ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE galley_test.team_member ( - team uuid, - user uuid, - invited_at timestamp, - invited_by uuid, - legalhold_status int, - perms frozen, - PRIMARY KEY (team, user) -) WITH CLUSTERING ORDER BY (user ASC) - AND bloom_filter_fp_chance = 0.1 +CREATE TABLE brig_test.mls_key_package_refs ( + ref blob PRIMARY KEY, + client text, + conv uuid, + conv_domain text, + domain text, + user uuid +) WITH bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} @@ -310,16 +263,13 @@ CREATE TABLE galley_test.team_member ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE galley_test.team_notifications ( - team uuid, - id timeuuid, - payload blob, - PRIMARY KEY (team, id) -) WITH CLUSTERING ORDER BY (id ASC) - AND bloom_filter_fp_chance = 0.1 +CREATE TABLE brig_test.excluded_phones ( + prefix text PRIMARY KEY, + comment text +) WITH bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 @@ -331,16 +281,17 @@ CREATE TABLE galley_test.team_notifications ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE galley_test.legalhold_pending_prekeys ( +CREATE TABLE brig_test.codes ( user uuid, - key int, - data text, - PRIMARY KEY (user, key) -) WITH CLUSTERING ORDER BY (key ASC) - AND bloom_filter_fp_chance = 0.1 + scope int, + code text, + retries int, + PRIMARY KEY (user, scope) +) WITH CLUSTERING ORDER BY (scope ASC) + AND bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 @@ -352,14 +303,13 @@ CREATE TABLE galley_test.legalhold_pending_prekeys ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE galley_test.group_id_conv_id ( - group_id blob PRIMARY KEY, - conv_id uuid, - domain text -) WITH bloom_filter_fp_chance = 0.01 +CREATE TABLE brig_test.user_handle ( + handle text PRIMARY KEY, + user uuid +) WITH bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 @@ -371,19 +321,25 @@ CREATE TABLE galley_test.group_id_conv_id ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; --- NOTE: this table is unused. It was replaced by mls_group_member_client -CREATE TABLE galley_test.member_client ( - conv uuid, - user_domain text, - user uuid, - client text, - key_package_ref blob, - PRIMARY KEY (conv, user_domain, user, client) -) WITH CLUSTERING ORDER BY (user_domain ASC, user ASC, client ASC) - AND bloom_filter_fp_chance = 0.01 +CREATE TABLE brig_test.service ( + provider uuid, + id uuid, + assets list>, + auth_tokens list, + base_url blob, + descr text, + enabled boolean, + fingerprints list, + name text, + pubkeys list>, + summary text, + tags set, + PRIMARY KEY (provider, id) +) WITH CLUSTERING ORDER BY (id ASC) + AND bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 @@ -395,20 +351,19 @@ CREATE TABLE galley_test.member_client ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE galley_test.legalhold_service ( - team_id uuid PRIMARY KEY, - auth_token ascii, - base_url blob, - fingerprint blob, - pubkey pubkey -) WITH bloom_filter_fp_chance = 0.1 +CREATE TABLE brig_test.oauth_user_refresh_token ( + user uuid, + token_id uuid, + PRIMARY KEY (user, token_id) +) WITH CLUSTERING ORDER BY (token_id ASC) + AND bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 + AND default_time_to_live = 14515200 AND gc_grace_seconds = 864000 AND max_index_interval = 2048 AND memtable_flush_period_in_ms = 0 @@ -416,18 +371,14 @@ CREATE TABLE galley_test.legalhold_service ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE galley_test.conversation_codes ( - key ascii, - scope int, - conversation uuid, - password blob, - value ascii, - PRIMARY KEY (key, scope) -) WITH CLUSTERING ORDER BY (scope ASC) - AND bloom_filter_fp_chance = 0.1 +CREATE TABLE brig_test.invitation_info ( + code ascii PRIMARY KEY, + id uuid, + inviter uuid +) WITH bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 @@ -439,14 +390,12 @@ CREATE TABLE galley_test.conversation_codes ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE galley_test.mls_group_member_client ( - group_id blob, - user_domain text, - user uuid, - client text, - key_package_ref blob, - PRIMARY KEY (group_id, user_domain, user, client) -) WITH CLUSTERING ORDER BY (user_domain ASC, user ASC, client ASC) +CREATE TABLE brig_test.service_whitelist ( + team uuid, + provider uuid, + service uuid, + PRIMARY KEY (team, provider, service) +) WITH CLUSTERING ORDER BY (provider ASC, service ASC) AND bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' @@ -462,10 +411,14 @@ CREATE TABLE galley_test.mls_group_member_client ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE galley_test.clients ( - user uuid PRIMARY KEY, - clients set -) WITH bloom_filter_fp_chance = 0.01 +CREATE TABLE brig_test.provider ( + id uuid PRIMARY KEY, + descr text, + email text, + name text, + password blob, + url blob +) WITH bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} @@ -480,23 +433,9 @@ CREATE TABLE galley_test.clients ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE galley_test.conversation ( - conv uuid PRIMARY KEY, - access set, - access_role int, - access_roles_v2 set, - cipher_suite int, - creator uuid, - deleted boolean, - epoch bigint, - group_id blob, - message_timer bigint, - name text, - protocol int, - public_group_state blob, - receipt_mode int, - team uuid, - type int +CREATE TABLE brig_test.user_keys ( + key text PRIMARY KEY, + user uuid ) WITH bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' @@ -512,15 +451,17 @@ CREATE TABLE galley_test.conversation ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE galley_test.mls_commit_locks ( - group_id blob, - epoch bigint, - PRIMARY KEY (group_id, epoch) -) WITH CLUSTERING ORDER BY (epoch ASC) - AND bloom_filter_fp_chance = 0.01 +CREATE TABLE brig_test.mls_public_keys ( + user uuid, + client text, + sig_scheme text, + key blob, + PRIMARY KEY (user, client, sig_scheme) +) WITH CLUSTERING ORDER BY (client ASC, sig_scheme ASC) + AND bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 @@ -532,17 +473,10 @@ CREATE TABLE galley_test.mls_commit_locks ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE galley_test.team ( - team uuid PRIMARY KEY, - binding boolean, - creator uuid, - deleted boolean, - icon text, - icon_key text, - name text, - search_visibility int, - splash_screen text, - status int +CREATE TABLE brig_test.invitee_info ( + invitee uuid PRIMARY KEY, + conv uuid, + inviter uuid ) WITH bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' @@ -558,12 +492,33 @@ CREATE TABLE galley_test.team ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE galley_test.billing_team_member ( - team uuid, +CREATE TABLE brig_test.nonce ( user uuid, - PRIMARY KEY (team, user) -) WITH CLUSTERING ORDER BY (user ASC) + key text, + nonce uuid, + PRIMARY KEY (user, key) +) WITH CLUSTERING ORDER BY (key ASC) AND bloom_filter_fp_chance = 0.01 + AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} + AND comment = '' + AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} + AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND dclocal_read_repair_chance = 0.1 + AND default_time_to_live = 300 + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair_chance = 0.0 + AND speculative_retry = '99PERCENTILE'; + +CREATE TABLE brig_test.login_codes ( + user uuid PRIMARY KEY, + code text, + retries int, + timeout timestamp +) WITH bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} @@ -578,15 +533,12 @@ CREATE TABLE galley_test.billing_team_member ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE galley_test.mls_proposal_refs ( - group_id blob, - epoch bigint, - ref blob, - origin int, - proposal blob, - PRIMARY KEY (group_id, epoch, ref) -) WITH CLUSTERING ORDER BY (epoch ASC, ref ASC) - AND bloom_filter_fp_chance = 0.01 +CREATE TABLE brig_test.oauth_client ( + id uuid PRIMARY KEY, + name text, + redirect_uri blob, + secret blob +) WITH bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} @@ -601,15 +553,18 @@ CREATE TABLE galley_test.mls_proposal_refs ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE galley_test.user ( +CREATE TABLE brig_test.service_team ( + provider uuid, + service uuid, + team uuid, user uuid, conv uuid, - PRIMARY KEY (user, conv) -) WITH CLUSTERING ORDER BY (conv ASC) - AND bloom_filter_fp_chance = 0.1 + PRIMARY KEY ((provider, service), team, user) +) WITH CLUSTERING ORDER BY (team ASC, user ASC) + AND bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 @@ -621,18 +576,9 @@ CREATE TABLE galley_test.user ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE KEYSPACE gundeck_test WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} AND durable_writes = true; - -CREATE TABLE gundeck_test.push ( - ptoken text, - app text, - transport int, - client text, - connection blob, - usr uuid, - PRIMARY KEY (ptoken, app, transport) -) WITH CLUSTERING ORDER BY (app ASC, transport ASC) - AND bloom_filter_fp_chance = 0.1 +CREATE TABLE brig_test.blacklist ( + key text PRIMARY KEY +) WITH bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} @@ -647,35 +593,39 @@ CREATE TABLE gundeck_test.push ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE gundeck_test.notifications ( - user uuid, - id timeuuid, - clients set, - payload blob, - PRIMARY KEY (user, id) -) WITH CLUSTERING ORDER BY (id ASC) - AND bloom_filter_fp_chance = 0.1 +CREATE TABLE brig_test.service_whitelist_rev ( + provider uuid, + service uuid, + team uuid, + PRIMARY KEY ((provider, service), team) +) WITH CLUSTERING ORDER BY (team ASC) + AND bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.TimeWindowCompactionStrategy', 'compaction_window_size': '1', 'compaction_window_unit': 'DAYS', 'max_threshold': '32', 'min_threshold': '4'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 AND default_time_to_live = 0 - AND gc_grace_seconds = 0 + AND gc_grace_seconds = 864000 AND max_index_interval = 2048 AND memtable_flush_period_in_ms = 0 AND min_index_interval = 128 AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE gundeck_test.meta ( - id int, - version int, - date timestamp, - descr text, - PRIMARY KEY (id, version) -) WITH CLUSTERING ORDER BY (version ASC) +CREATE TABLE brig_test.team_invitation ( + team uuid, + id uuid, + code ascii, + created_at timestamp, + created_by uuid, + email text, + name text, + phone text, + role int, + PRIMARY KEY (team, id) +) WITH CLUSTERING ORDER BY (id ASC) AND bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' @@ -691,17 +641,32 @@ CREATE TABLE gundeck_test.meta ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE gundeck_test.user_push ( - usr uuid, - ptoken text, - app text, - transport int, - arn text, - client text, - connection blob, - PRIMARY KEY (usr, ptoken, app, transport) -) WITH CLUSTERING ORDER BY (ptoken ASC, app ASC, transport ASC) - AND bloom_filter_fp_chance = 0.1 +CREATE TABLE brig_test.user ( + id uuid PRIMARY KEY, + accent list, + accent_id int, + activated boolean, + assets list>, + country ascii, + email text, + email_unvalidated text, + expires timestamp, + feature_conference_calling int, + handle text, + language ascii, + managed_by int, + name text, + password blob, + phone text, + picture list, + provider uuid, + searchable boolean, + service uuid, + sso_id text, + status int, + supported_protocols int, + team uuid +) WITH bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} @@ -716,25 +681,13 @@ CREATE TABLE gundeck_test.user_push ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE KEYSPACE brig_test WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} AND durable_writes = true; - -CREATE TYPE brig_test.asset ( - typ int, - key text, - size int -); - -CREATE TYPE brig_test.pubkey ( - typ int, - size int, - pem blob -); - -CREATE TABLE brig_test.team_invitation_info ( - code ascii PRIMARY KEY, - id uuid, - team uuid -) WITH bloom_filter_fp_chance = 0.01 +CREATE TABLE brig_test.vcodes_throttle ( + key ascii, + scope int, + initial_delay int, + PRIMARY KEY (key, scope) +) WITH CLUSTERING ORDER BY (scope ASC) + AND bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} @@ -749,13 +702,16 @@ CREATE TABLE brig_test.team_invitation_info ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.provider_keys ( - key text PRIMARY KEY, - provider uuid -) WITH bloom_filter_fp_chance = 0.1 +CREATE TABLE brig_test.properties ( + user uuid, + key ascii, + value blob, + PRIMARY KEY (user, key) +) WITH CLUSTERING ORDER BY (key ASC) + AND bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 @@ -767,20 +723,22 @@ CREATE TABLE brig_test.provider_keys ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.oauth_refresh_token ( - id uuid PRIMARY KEY, - client uuid, - created_at timestamp, - scope set, - user uuid -) WITH bloom_filter_fp_chance = 0.01 +CREATE TABLE brig_test.service_user ( + provider uuid, + service uuid, + user uuid, + conv uuid, + team uuid, + PRIMARY KEY ((provider, service), user) +) WITH CLUSTERING ORDER BY (user ASC) + AND bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 14515200 + AND default_time_to_live = 0 AND gc_grace_seconds = 864000 AND max_index_interval = 2048 AND memtable_flush_period_in_ms = 0 @@ -788,17 +746,17 @@ CREATE TABLE brig_test.oauth_refresh_token ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.team_invitation_email ( - email text, - team uuid, - code ascii, - invitation uuid, - PRIMARY KEY (email, team) -) WITH CLUSTERING ORDER BY (team ASC) +CREATE TABLE brig_test.prekeys ( + user uuid, + client text, + key int, + data text, + PRIMARY KEY (user, client, key) +) WITH CLUSTERING ORDER BY (client ASC, key ASC) AND bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 @@ -810,9 +768,13 @@ CREATE TABLE brig_test.team_invitation_email ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.rich_info ( - user uuid PRIMARY KEY, - json blob +CREATE TABLE brig_test.oauth_auth_code ( + code ascii PRIMARY KEY, + client uuid, + code_challenge blob, + redirect_uri blob, + scope set, + user uuid ) WITH bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' @@ -820,7 +782,7 @@ CREATE TABLE brig_test.rich_info ( AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 + AND default_time_to_live = 300 AND gc_grace_seconds = 864000 AND max_index_interval = 2048 AND memtable_flush_period_in_ms = 0 @@ -828,11 +790,22 @@ CREATE TABLE brig_test.rich_info ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.user_keys_hash ( - key blob PRIMARY KEY, - key_type int, - user uuid -) WITH bloom_filter_fp_chance = 0.1 +CREATE TABLE brig_test.clients ( + user uuid, + client text, + capabilities set, + class int, + cookie text, + ip inet, + label text, + lat double, + lon double, + model text, + tstamp timestamp, + type int, + PRIMARY KEY (user, client) +) WITH CLUSTERING ORDER BY (client ASC) + AND bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} @@ -847,15 +820,10 @@ CREATE TABLE brig_test.user_keys_hash ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.service_tag ( - bucket int, - tag bigint, - name text, - service uuid, - provider uuid, - PRIMARY KEY ((bucket, tag), name, service) -) WITH CLUSTERING ORDER BY (name ASC, service ASC) - AND bloom_filter_fp_chance = 0.1 +CREATE TABLE brig_test.budget ( + key text PRIMARY KEY, + budget int +) WITH bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} @@ -863,24 +831,27 @@ CREATE TABLE brig_test.service_tag ( AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 AND default_time_to_live = 0 - AND gc_grace_seconds = 864000 + AND gc_grace_seconds = 0 AND max_index_interval = 2048 AND memtable_flush_period_in_ms = 0 AND min_index_interval = 128 AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.meta ( - id int, - version int, - date timestamp, - descr text, - PRIMARY KEY (id, version) -) WITH CLUSTERING ORDER BY (version ASC) - AND bloom_filter_fp_chance = 0.01 +CREATE TABLE brig_test.connection_remote ( + left uuid, + right_domain text, + right_user uuid, + conv_domain text, + conv_id uuid, + last_update timestamp, + status int, + PRIMARY KEY (left, right_domain, right_user) +) WITH CLUSTERING ORDER BY (right_domain ASC, right_user ASC) + AND bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 @@ -892,34 +863,33 @@ CREATE TABLE brig_test.meta ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.unique_claims ( - value text PRIMARY KEY, - claims set -) WITH bloom_filter_fp_chance = 0.1 +CREATE TABLE brig_test.users_pending_activation ( + user uuid PRIMARY KEY, + expires_at timestamp +) WITH bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 AND default_time_to_live = 0 - AND gc_grace_seconds = 0 + AND gc_grace_seconds = 864000 AND max_index_interval = 2048 AND memtable_flush_period_in_ms = 0 AND min_index_interval = 128 AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.user_cookies ( - user uuid, - expires timestamp, - id bigint, - created timestamp, - label text, - succ_id bigint, - type int, - PRIMARY KEY (user, expires, id) -) WITH CLUSTERING ORDER BY (expires ASC, id ASC) +CREATE TABLE brig_test.connection ( + left uuid, + right uuid, + conv uuid, + last_update timestamp, + message text, + status int, + PRIMARY KEY (left, right) +) WITH CLUSTERING ORDER BY (right ASC) AND bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' @@ -934,15 +904,15 @@ CREATE TABLE brig_test.user_cookies ( AND min_index_interval = 128 AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; +CREATE INDEX conn_status ON brig_test.connection (status); -CREATE TABLE brig_test.mls_key_packages ( - user uuid, - client text, - ref blob, - data blob, - PRIMARY KEY ((user, client), ref) -) WITH CLUSTERING ORDER BY (ref ASC) - AND bloom_filter_fp_chance = 0.1 +CREATE TABLE brig_test.password_reset ( + key ascii PRIMARY KEY, + code ascii, + retries int, + timeout timestamp, + user uuid +) WITH bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} @@ -957,17 +927,13 @@ CREATE TABLE brig_test.mls_key_packages ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.mls_key_package_refs ( - ref blob PRIMARY KEY, - client text, - conv uuid, - conv_domain text, - domain text, - user uuid -) WITH bloom_filter_fp_chance = 0.1 +CREATE TABLE brig_test.federation_remotes ( + domain text PRIMARY KEY, + search_policy int +) WITH bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 @@ -979,10 +945,17 @@ CREATE TABLE brig_test.mls_key_package_refs ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.excluded_phones ( - prefix text PRIMARY KEY, - comment text -) WITH bloom_filter_fp_chance = 0.01 +CREATE TABLE brig_test.invitation ( + inviter uuid, + id uuid, + code ascii, + created_at timestamp, + email text, + name text, + phone text, + PRIMARY KEY (inviter, id) +) WITH CLUSTERING ORDER BY (id ASC) + AND bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} @@ -997,17 +970,18 @@ CREATE TABLE brig_test.excluded_phones ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.codes ( - user uuid, - scope int, - code text, - retries int, - PRIMARY KEY (user, scope) -) WITH CLUSTERING ORDER BY (scope ASC) - AND bloom_filter_fp_chance = 0.01 - AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} +CREATE TABLE brig_test.activation_keys ( + key ascii PRIMARY KEY, + challenge ascii, + code ascii, + key_text text, + key_type ascii, + retries int, + user uuid +) WITH bloom_filter_fp_chance = 0.1 + AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 @@ -1019,10 +993,17 @@ CREATE TABLE brig_test.codes ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.user_handle ( - handle text PRIMARY KEY, - user uuid -) WITH bloom_filter_fp_chance = 0.1 +CREATE TABLE brig_test.vcodes ( + key ascii, + scope int, + account uuid, + email text, + phone text, + retries int, + value ascii, + PRIMARY KEY (key, scope) +) WITH CLUSTERING ORDER BY (scope ASC) + AND bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} @@ -1030,28 +1011,20 @@ CREATE TABLE brig_test.user_handle ( AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 AND default_time_to_live = 0 - AND gc_grace_seconds = 864000 + AND gc_grace_seconds = 0 AND max_index_interval = 2048 AND memtable_flush_period_in_ms = 0 AND min_index_interval = 128 AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.service ( - provider uuid, - id uuid, - assets list>, - auth_tokens list, - base_url blob, - descr text, - enabled boolean, - fingerprints list, +CREATE TABLE brig_test.service_prefix ( + prefix text, name text, - pubkeys list>, - summary text, - tags set, - PRIMARY KEY (provider, id) -) WITH CLUSTERING ORDER BY (id ASC) + service uuid, + provider uuid, + PRIMARY KEY (prefix, name, service) +) WITH CLUSTERING ORDER BY (name ASC, service ASC) AND bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' @@ -1067,11 +1040,26 @@ CREATE TABLE brig_test.service ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.oauth_user_refresh_token ( - user uuid, - token_id uuid, - PRIMARY KEY (user, token_id) -) WITH CLUSTERING ORDER BY (token_id ASC) +CREATE KEYSPACE galley_test WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} AND durable_writes = true; + +CREATE TYPE galley_test.permissions ( + self bigint, + copy bigint +); + +CREATE TYPE galley_test.pubkey ( + typ int, + size int, + pem blob +); + +CREATE TABLE galley_test.meta ( + id int, + version int, + date timestamp, + descr text, + PRIMARY KEY (id, version) +) WITH CLUSTERING ORDER BY (version ASC) AND bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' @@ -1079,7 +1067,7 @@ CREATE TABLE brig_test.oauth_user_refresh_token ( AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 14515200 + AND default_time_to_live = 0 AND gc_grace_seconds = 864000 AND max_index_interval = 2048 AND memtable_flush_period_in_ms = 0 @@ -1087,14 +1075,15 @@ CREATE TABLE brig_test.oauth_user_refresh_token ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.invitation_info ( - code ascii PRIMARY KEY, - id uuid, - inviter uuid -) WITH bloom_filter_fp_chance = 0.01 +CREATE TABLE galley_test.team_conv ( + team uuid, + conv uuid, + PRIMARY KEY (team, conv) +) WITH CLUSTERING ORDER BY (conv ASC) + AND bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 @@ -1106,16 +1095,15 @@ CREATE TABLE brig_test.invitation_info ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.service_whitelist ( +CREATE TABLE galley_test.user_team ( + user uuid, team uuid, - provider uuid, - service uuid, - PRIMARY KEY (team, provider, service) -) WITH CLUSTERING ORDER BY (provider ASC, service ASC) - AND bloom_filter_fp_chance = 0.01 + PRIMARY KEY (user, team) +) WITH CLUSTERING ORDER BY (team ASC) + AND bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 @@ -1127,17 +1115,19 @@ CREATE TABLE brig_test.service_whitelist ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.provider ( - id uuid PRIMARY KEY, - descr text, - email text, - name text, - password blob, - url blob -) WITH bloom_filter_fp_chance = 0.1 +CREATE TABLE galley_test.service ( + provider uuid, + id uuid, + auth_token ascii, + base_url blob, + enabled boolean, + fingerprints set, + PRIMARY KEY (provider, id) +) WITH CLUSTERING ORDER BY (id ASC) + AND bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 @@ -1149,13 +1139,17 @@ CREATE TABLE brig_test.provider ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.user_keys ( - key text PRIMARY KEY, - user uuid -) WITH bloom_filter_fp_chance = 0.1 +CREATE TABLE galley_test.data_migration ( + id int, + version int, + date timestamp, + descr text, + PRIMARY KEY (id, version) +) WITH CLUSTERING ORDER BY (version ASC) + AND bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 @@ -1167,14 +1161,41 @@ CREATE TABLE brig_test.user_keys ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.mls_public_keys ( - user uuid, - client text, - sig_scheme text, - key blob, - PRIMARY KEY (user, client, sig_scheme) -) WITH CLUSTERING ORDER BY (client ASC, sig_scheme ASC) - AND bloom_filter_fp_chance = 0.1 +CREATE TABLE galley_test.team_features ( + team_id uuid PRIMARY KEY, + app_lock_enforce int, + app_lock_inactivity_timeout_secs int, + app_lock_status int, + conference_calling int, + digital_signatures int, + expose_invitation_urls_to_team_admin int, + file_sharing int, + file_sharing_lock_status int, + guest_links_lock_status int, + guest_links_status int, + legalhold_status int, + mls_allowed_ciphersuites set, + mls_default_ciphersuite int, + mls_default_protocol int, + mls_e2eid_acme_discovery_url blob, + mls_e2eid_grace_period int, + mls_e2eid_lock_status int, + mls_e2eid_status int, + mls_e2eid_ver_exp timestamp, + mls_protocol_toggle_users set, + mls_status int, + outlook_cal_integration_lock_status int, + outlook_cal_integration_status int, + search_visibility_inbound_status int, + search_visibility_status int, + self_deleting_messages_lock_status int, + self_deleting_messages_status int, + self_deleting_messages_ttl int, + snd_factor_password_challenge_lock_status int, + snd_factor_password_challenge_status int, + sso_status int, + validate_saml_emails int +) WITH bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} @@ -1189,11 +1210,23 @@ CREATE TABLE brig_test.mls_public_keys ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.invitee_info ( - invitee uuid PRIMARY KEY, +CREATE TABLE galley_test.member ( conv uuid, - inviter uuid -) WITH bloom_filter_fp_chance = 0.1 + user uuid, + conversation_role text, + hidden boolean, + hidden_ref text, + otr_archived boolean, + otr_archived_ref text, + otr_muted boolean, + otr_muted_ref text, + otr_muted_status int, + provider uuid, + service uuid, + status int, + PRIMARY KEY (conv, user) +) WITH CLUSTERING ORDER BY (user ASC) + AND bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} @@ -1208,20 +1241,18 @@ CREATE TABLE brig_test.invitee_info ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.nonce ( - user uuid, - key text, - nonce uuid, - PRIMARY KEY (user, key) -) WITH CLUSTERING ORDER BY (key ASC) - AND bloom_filter_fp_chance = 0.01 +CREATE TABLE galley_test.custom_backend ( + domain text PRIMARY KEY, + config_json_url blob, + webapp_welcome_url blob +) WITH bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 300 + AND default_time_to_live = 0 AND gc_grace_seconds = 864000 AND max_index_interval = 2048 AND memtable_flush_period_in_ms = 0 @@ -1229,15 +1260,22 @@ CREATE TABLE brig_test.nonce ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.login_codes ( - user uuid PRIMARY KEY, - code text, - retries int, - timeout timestamp -) WITH bloom_filter_fp_chance = 0.01 +CREATE TABLE galley_test.user_remote_conv ( + user uuid, + conv_remote_domain text, + conv_remote_id uuid, + hidden boolean, + hidden_ref text, + otr_archived boolean, + otr_archived_ref text, + otr_muted_ref text, + otr_muted_status int, + PRIMARY KEY (user, conv_remote_domain, conv_remote_id) +) WITH CLUSTERING ORDER BY (conv_remote_domain ASC, conv_remote_id ASC) + AND bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 @@ -1249,11 +1287,8 @@ CREATE TABLE brig_test.login_codes ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.oauth_client ( - id uuid PRIMARY KEY, - name text, - redirect_uri blob, - secret blob +CREATE TABLE galley_test.legalhold_whitelisted ( + team uuid PRIMARY KEY ) WITH bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' @@ -1269,18 +1304,17 @@ CREATE TABLE brig_test.oauth_client ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.service_team ( - provider uuid, - service uuid, - team uuid, - user uuid, +CREATE TABLE galley_test.member_remote_user ( conv uuid, - PRIMARY KEY ((provider, service), team, user) -) WITH CLUSTERING ORDER BY (team ASC, user ASC) - AND bloom_filter_fp_chance = 0.01 + user_remote_domain text, + user_remote_id uuid, + conversation_role text, + PRIMARY KEY (conv, user_remote_domain, user_remote_id) +) WITH CLUSTERING ORDER BY (user_remote_domain ASC, user_remote_id ASC) + AND bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 @@ -1292,20 +1326,19 @@ CREATE TABLE brig_test.service_team ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.invitation ( - inviter uuid, - id uuid, - code ascii, - created_at timestamp, - email text, - name text, - phone text, - PRIMARY KEY (inviter, id) -) WITH CLUSTERING ORDER BY (id ASC) - AND bloom_filter_fp_chance = 0.01 +CREATE TABLE galley_test.team_member ( + team uuid, + user uuid, + invited_at timestamp, + invited_by uuid, + legalhold_status int, + perms frozen, + PRIMARY KEY (team, user) +) WITH CLUSTERING ORDER BY (user ASC) + AND bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 @@ -1317,9 +1350,13 @@ CREATE TABLE brig_test.invitation ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.blacklist ( - key text PRIMARY KEY -) WITH bloom_filter_fp_chance = 0.1 +CREATE TABLE galley_test.team_notifications ( + team uuid, + id timeuuid, + payload blob, + PRIMARY KEY (team, id) +) WITH CLUSTERING ORDER BY (id ASC) + AND bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} @@ -1334,16 +1371,16 @@ CREATE TABLE brig_test.blacklist ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.service_whitelist_rev ( - provider uuid, - service uuid, - team uuid, - PRIMARY KEY ((provider, service), team) -) WITH CLUSTERING ORDER BY (team ASC) - AND bloom_filter_fp_chance = 0.01 +CREATE TABLE galley_test.legalhold_pending_prekeys ( + user uuid, + key int, + data text, + PRIMARY KEY (user, key) +) WITH CLUSTERING ORDER BY (key ASC) + AND bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 @@ -1355,19 +1392,11 @@ CREATE TABLE brig_test.service_whitelist_rev ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.team_invitation ( - team uuid, - id uuid, - code ascii, - created_at timestamp, - created_by uuid, - email text, - name text, - phone text, - role int, - PRIMARY KEY (team, id) -) WITH CLUSTERING ORDER BY (id ASC) - AND bloom_filter_fp_chance = 0.01 +CREATE TABLE galley_test.group_id_conv_id ( + group_id blob PRIMARY KEY, + conv_id uuid, + domain text +) WITH bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} @@ -1382,35 +1411,19 @@ CREATE TABLE brig_test.team_invitation ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.user ( - id uuid PRIMARY KEY, - accent list, - accent_id int, - activated boolean, - assets list>, - country ascii, - email text, - email_unvalidated text, - expires timestamp, - feature_conference_calling int, - handle text, - language ascii, - managed_by int, - name text, - password blob, - phone text, - picture list, - provider uuid, - searchable boolean, - service uuid, - sso_id text, - status int, - supported_protocols int, - team uuid -) WITH bloom_filter_fp_chance = 0.1 +-- NOTE: this table is unused. It was replaced by mls_group_member_client +CREATE TABLE galley_test.member_client ( + conv uuid, + user_domain text, + user uuid, + client text, + key_package_ref blob, + PRIMARY KEY (conv, user_domain, user, client) +) WITH CLUSTERING ORDER BY (user_domain ASC, user ASC, client ASC) + AND bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 @@ -1422,16 +1435,16 @@ CREATE TABLE brig_test.user ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.vcodes_throttle ( - key ascii, - scope int, - initial_delay int, - PRIMARY KEY (key, scope) -) WITH CLUSTERING ORDER BY (scope ASC) - AND bloom_filter_fp_chance = 0.01 +CREATE TABLE galley_test.legalhold_service ( + team_id uuid PRIMARY KEY, + auth_token ascii, + base_url blob, + fingerprint blob, + pubkey pubkey +) WITH bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 @@ -1443,16 +1456,18 @@ CREATE TABLE brig_test.vcodes_throttle ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.properties ( - user uuid, +CREATE TABLE galley_test.conversation_codes ( key ascii, - value blob, - PRIMARY KEY (user, key) -) WITH CLUSTERING ORDER BY (key ASC) - AND bloom_filter_fp_chance = 0.01 + scope int, + conversation uuid, + password blob, + value ascii, + PRIMARY KEY (key, scope) +) WITH CLUSTERING ORDER BY (scope ASC) + AND bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 @@ -1464,14 +1479,14 @@ CREATE TABLE brig_test.properties ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.service_user ( - provider uuid, - service uuid, +CREATE TABLE galley_test.mls_group_member_client ( + group_id blob, + user_domain text, user uuid, - conv uuid, - team uuid, - PRIMARY KEY ((provider, service), user) -) WITH CLUSTERING ORDER BY (user ASC) + client text, + key_package_ref blob, + PRIMARY KEY (group_id, user_domain, user, client) +) WITH CLUSTERING ORDER BY (user_domain ASC, user ASC, client ASC) AND bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' @@ -1487,14 +1502,10 @@ CREATE TABLE brig_test.service_user ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.prekeys ( - user uuid, - client text, - key int, - data text, - PRIMARY KEY (user, client, key) -) WITH CLUSTERING ORDER BY (client ASC, key ASC) - AND bloom_filter_fp_chance = 0.01 +CREATE TABLE galley_test.clients ( + user uuid PRIMARY KEY, + clients set +) WITH bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} @@ -1509,21 +1520,31 @@ CREATE TABLE brig_test.prekeys ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.oauth_auth_code ( - code ascii PRIMARY KEY, - client uuid, - code_challenge blob, - redirect_uri blob, - scope set, - user uuid -) WITH bloom_filter_fp_chance = 0.01 +CREATE TABLE galley_test.conversation ( + conv uuid PRIMARY KEY, + access set, + access_role int, + access_roles_v2 set, + cipher_suite int, + creator uuid, + deleted boolean, + epoch bigint, + group_id blob, + message_timer bigint, + name text, + protocol int, + public_group_state blob, + receipt_mode int, + team uuid, + type int +) WITH bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 300 + AND default_time_to_live = 0 AND gc_grace_seconds = 864000 AND max_index_interval = 2048 AND memtable_flush_period_in_ms = 0 @@ -1531,25 +1552,15 @@ CREATE TABLE brig_test.oauth_auth_code ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.clients ( - user uuid, - client text, - capabilities set, - class int, - cookie text, - ip inet, - label text, - lat double, - lon double, - model text, - tstamp timestamp, - type int, - PRIMARY KEY (user, client) -) WITH CLUSTERING ORDER BY (client ASC) +CREATE TABLE galley_test.mls_commit_locks ( + group_id blob, + epoch bigint, + PRIMARY KEY (group_id, epoch) +) WITH CLUSTERING ORDER BY (epoch ASC) AND bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 @@ -1561,9 +1572,17 @@ CREATE TABLE brig_test.clients ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.budget ( - key text PRIMARY KEY, - budget int +CREATE TABLE galley_test.team ( + team uuid PRIMARY KEY, + binding boolean, + creator uuid, + deleted boolean, + icon text, + icon_key text, + name text, + search_visibility int, + splash_screen text, + status int ) WITH bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' @@ -1572,27 +1591,22 @@ CREATE TABLE brig_test.budget ( AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 AND default_time_to_live = 0 - AND gc_grace_seconds = 0 + AND gc_grace_seconds = 864000 AND max_index_interval = 2048 AND memtable_flush_period_in_ms = 0 AND min_index_interval = 128 AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.connection_remote ( - left uuid, - right_domain text, - right_user uuid, - conv_domain text, - conv_id uuid, - last_update timestamp, - status int, - PRIMARY KEY (left, right_domain, right_user) -) WITH CLUSTERING ORDER BY (right_domain ASC, right_user ASC) - AND bloom_filter_fp_chance = 0.1 +CREATE TABLE galley_test.billing_team_member ( + team uuid, + user uuid, + PRIMARY KEY (team, user) +) WITH CLUSTERING ORDER BY (user ASC) + AND bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 @@ -1604,10 +1618,15 @@ CREATE TABLE brig_test.connection_remote ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.users_pending_activation ( - user uuid PRIMARY KEY, - expires_at timestamp -) WITH bloom_filter_fp_chance = 0.01 +CREATE TABLE galley_test.mls_proposal_refs ( + group_id blob, + epoch bigint, + ref blob, + origin int, + proposal blob, + PRIMARY KEY (group_id, epoch, ref) +) WITH CLUSTERING ORDER BY (epoch ASC, ref ASC) + AND bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} @@ -1622,15 +1641,11 @@ CREATE TABLE brig_test.users_pending_activation ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.connection ( - left uuid, - right uuid, +CREATE TABLE galley_test.user ( + user uuid, conv uuid, - last_update timestamp, - message text, - status int, - PRIMARY KEY (left, right) -) WITH CLUSTERING ORDER BY (right ASC) + PRIMARY KEY (user, conv) +) WITH CLUSTERING ORDER BY (conv ASC) AND bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' @@ -1645,15 +1660,19 @@ CREATE TABLE brig_test.connection ( AND min_index_interval = 128 AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE INDEX conn_status ON brig_test.connection (status); -CREATE TABLE brig_test.password_reset ( - key ascii PRIMARY KEY, - code ascii, - retries int, - timeout timestamp, - user uuid -) WITH bloom_filter_fp_chance = 0.1 +CREATE KEYSPACE gundeck_test WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} AND durable_writes = true; + +CREATE TABLE gundeck_test.push ( + ptoken text, + app text, + transport int, + client text, + connection blob, + usr uuid, + PRIMARY KEY (ptoken, app, transport) +) WITH CLUSTERING ORDER BY (app ASC, transport ASC) + AND bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} @@ -1668,61 +1687,60 @@ CREATE TABLE brig_test.password_reset ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.activation_keys ( - key ascii PRIMARY KEY, - challenge ascii, - code ascii, - key_text text, - key_type ascii, - retries int, - user uuid -) WITH bloom_filter_fp_chance = 0.1 +CREATE TABLE gundeck_test.notifications ( + user uuid, + id timeuuid, + clients set, + payload blob, + PRIMARY KEY (user, id) +) WITH CLUSTERING ORDER BY (id ASC) + AND bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.TimeWindowCompactionStrategy', 'compaction_window_size': '1', 'compaction_window_unit': 'DAYS', 'max_threshold': '32', 'min_threshold': '4'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 AND default_time_to_live = 0 - AND gc_grace_seconds = 864000 + AND gc_grace_seconds = 0 AND max_index_interval = 2048 AND memtable_flush_period_in_ms = 0 AND min_index_interval = 128 AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.vcodes ( - key ascii, - scope int, - account uuid, - email text, - phone text, - retries int, - value ascii, - PRIMARY KEY (key, scope) -) WITH CLUSTERING ORDER BY (scope ASC) - AND bloom_filter_fp_chance = 0.1 +CREATE TABLE gundeck_test.meta ( + id int, + version int, + date timestamp, + descr text, + PRIMARY KEY (id, version) +) WITH CLUSTERING ORDER BY (version ASC) + AND bloom_filter_fp_chance = 0.01 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND crc_check_chance = 1.0 AND dclocal_read_repair_chance = 0.1 AND default_time_to_live = 0 - AND gc_grace_seconds = 0 + AND gc_grace_seconds = 864000 AND max_index_interval = 2048 AND memtable_flush_period_in_ms = 0 AND min_index_interval = 128 AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; -CREATE TABLE brig_test.service_prefix ( - prefix text, - name text, - service uuid, - provider uuid, - PRIMARY KEY (prefix, name, service) -) WITH CLUSTERING ORDER BY (name ASC, service ASC) +CREATE TABLE gundeck_test.user_push ( + usr uuid, + ptoken text, + app text, + transport int, + arn text, + client text, + connection blob, + PRIMARY KEY (usr, ptoken, app, transport) +) WITH CLUSTERING ORDER BY (ptoken ASC, app ASC, transport ASC) AND bloom_filter_fp_chance = 0.1 AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} AND comment = '' diff --git a/changelog.d/0-release-notes/pr-3260 b/changelog.d/0-release-notes/pr-3260 new file mode 100644 index 0000000000..a2299fffcb --- /dev/null +++ b/changelog.d/0-release-notes/pr-3260 @@ -0,0 +1 @@ +Federation only: from this release on, remote connections should be configured via an internal REST API; the config files will be honored for a transition period, but will be ignored starting in a future release. YOU NEED TO UPDATE YOUR BRIG HELM CHART BEFORE DEPLOYING THIS RELEASE. [Details in the docs.](http://docs.wire.com/understand/configure-federation.html#if-your-instance-has-been-federating-before) \ No newline at end of file diff --git a/charts/brig/templates/configmap.yaml b/charts/brig/templates/configmap.yaml index 73ba4a6f2c..2ed6eb6833 100644 --- a/charts/brig/templates/configmap.yaml +++ b/charts/brig/templates/configmap.yaml @@ -247,7 +247,16 @@ data: setPropertyMaxValueLen: {{ .setPropertyMaxValueLen }} setDeleteThrottleMillis: {{ .setDeleteThrottleMillis }} setFederationDomain: {{ .setFederationDomain }} + {{- if .setFederationStrategy }} + setFederationStrategy: {{ .setFederationStrategy }} + {{- end }} + {{- if .setFederationDomainConfigsUpdateFreq }} + setFederationDomainConfigsUpdateFreq: {{ .setFederationDomainConfigsUpdateFreq }} + {{- end }} {{- if .setFederationDomainConfigs }} + # 'setFederationDomainConfigs' is deprecated as of https://github.com/wireapp/wire-server/pull/3260. See + # https://docs.wire.com/understand/federation/backend-communication.html#configuring-remote-connections + # for details. setFederationDomainConfigs: {{ toYaml .setFederationDomainConfigs | nindent 8 }} {{- end }} {{- if .setSearchSameTeamOnly }} diff --git a/charts/brig/values.yaml b/charts/brig/values.yaml index fcab57a54e..7f75695522 100644 --- a/charts/brig/values.yaml +++ b/charts/brig/values.yaml @@ -100,6 +100,8 @@ config: # Disable one ore more API versions. Please make sure the configuration value is the same in all these charts: # brig, cannon, cargohold, galley, gundeck, proxy, spar. # setDisabledAPIVersions: [ v3 ] + setFederationStrategy: allowNone + setFederationDomainConfigsUpdateFreq: 10 smtp: passwordFile: /etc/wire/brig/secrets/smtp-password.txt proxy: {} diff --git a/charts/cannon/templates/configmap.yaml b/charts/cannon/templates/configmap.yaml index 940d601306..0c9c2c43cf 100644 --- a/charts/cannon/templates/configmap.yaml +++ b/charts/cannon/templates/configmap.yaml @@ -14,6 +14,10 @@ data: host: gundeck port: 8080 + brig: + host: brig + port: 8080 + drainOpts: gracePeriodSeconds: {{ .Values.config.drainOpts.gracePeriodSeconds }} millisecondsBetweenBatches: {{ .Values.config.drainOpts.millisecondsBetweenBatches }} diff --git a/charts/federator/templates/configmap.yaml b/charts/federator/templates/configmap.yaml index cbfd53e9e7..44de627107 100644 --- a/charts/federator/templates/configmap.yaml +++ b/charts/federator/templates/configmap.yaml @@ -51,18 +51,5 @@ data: clientCertificate: "/etc/wire/federator/secrets/tls.crt" clientPrivateKey: "/etc/wire/federator/secrets/tls.key" useSystemCAStore: {{ .useSystemCAStore }} - federationStrategy: - {{- if .federationStrategy.allowAll }} - allowAll: - {{- else if .federationStrategy.allowedDomains }} - allowedDomains: - {{- range $domain := .federationStrategy.allowedDomains }} - - {{ $domain | quote }} - {{- end }} - {{- else }} - # In gotemplate there is no way to distinguish between empty list and no - # list, we assume empty list when there is no list - allowedDomains: [] - {{- end}} {{- end }} {{- end }} diff --git a/charts/federator/values.yaml b/charts/federator/values.yaml index 9bb7cf834a..406bb3b17c 100644 --- a/charts/federator/values.yaml +++ b/charts/federator/values.yaml @@ -41,8 +41,6 @@ config: # A client certificate and corresponding private key can be specified # similarly to a custom CA store. useSystemCAStore: true - federationStrategy: - allowedDomains: [] podSecurityContext: allowPrivilegeEscalation: false diff --git a/charts/gundeck/templates/configmap.yaml b/charts/gundeck/templates/configmap.yaml index 2349e68cc4..4f63a09144 100644 --- a/charts/gundeck/templates/configmap.yaml +++ b/charts/gundeck/templates/configmap.yaml @@ -13,6 +13,10 @@ data: host: 0.0.0.0 port: {{ $.Values.service.internalPort }} + brig: + host: brig + port: 8080 + cassandra: endpoint: host: {{ .cassandra.host }} diff --git a/docs/src/developer/developer/federation-design-aspects.md b/docs/src/developer/developer/federation-design-aspects.md new file mode 100644 index 0000000000..5e63c6e695 --- /dev/null +++ b/docs/src/developer/developer/federation-design-aspects.md @@ -0,0 +1,29 @@ +# Federation Design Aspects + +(configuring-remote-connections-dev-perspective)= + +## Keeping track of federator remotes + +**Since [PR#3260](https://github.com/wireapp/wire-server/pull/3260).** + +Federation can start and end. These events need handlers to be called +(like remove remote users from local conv), plus it is not convenient +to edit and re-deploy config files every time that happens. Hence +remotes are stored in cassandra in brig, and every pod of every +service keeps a cache in an `IORef` in its `Env` (this information is +needed in many end-points, so it has to remain as fast as read access +to `Env`). + +See {ref}`configure-federation-strategy-in-brig` for the +administrator's point of view. If you haven't done so, go read that +section now! + +The state is persisted in cassandra table `brig.federation_remotes`. +brig provides the contents via an internal CRUD API (see +{ref}`configure-federation-strategy-in-brig` for the links). In the +future, we may decide that brig needs to cache the table itself, but +for now (`GET` is only used for the internal end-point to share it +with other services) we hope to get away with the simple solution and +always read from cassandra directly. + +(More details to be added?) diff --git a/docs/src/developer/reference/config-options.md b/docs/src/developer/reference/config-options.md index 9be01c3449..5ebc544b65 100644 --- a/docs/src/developer/reference/config-options.md +++ b/docs/src/developer/reference/config-options.md @@ -393,43 +393,7 @@ settings: ### Federation allow list -As of 2021-07, federation (whatever is implemented by the time you read this) is turned off by default by means of having an empty allow list: - -```yaml -# federator.yaml -optSettings: - federationStrategy: - allowedDomains: [] -``` - -You can choose to federate with a specific list of allowed servers: - - -```yaml -# federator.yaml -optSettings: - federationStrategy: - allowedDomains: - - server1.example.com - - server2.example.com -``` - -or, you can federate with everyone: - -```yaml -# federator.yaml -optSettings: - federationStrategy: - # note the 'empty' value after 'allowAll' - allowAll: - -# when configuring helm charts, this becomes (note 'true' after 'allowAll') -# inside helm_vars/wire-server: -federator: - optSettings: - federationStrategy: - allowAll: true -``` +See {ref}`configure-federation-strategy-in-brig` (since [PR#3260](https://github.com/wireapp/wire-server/pull/3260)). ### Federation TLS Config @@ -611,24 +575,11 @@ any key package whose expiry date is set further than 15 days after upload time ### Federated domain specific configuration settings -#### Restrict user search -The lookup and search of users on a wire instance can be configured. This can be done per federated domain. - -```yaml -# [brig.yaml] -optSettings: - setFederationDomainConfigs: - - domain: example.com - search_policy: no_search -``` +#### Restrict user search -Valid values for `search_policy` are: -- `no_search`: No users are returned by federated searches. -- `exact_handle_search`: Only users where the handle exactly matches are returned. -- `full_search`: Additionally to `exact_handle_search`, users are found by a freetext search on handle and display name. +See {ref}`configure-federation-strategy-in-brig` (since [PR#3260](https://github.com/wireapp/wire-server/pull/3260)). -If there is no configuration for a domain, it's defaulted to `no_search`. ### API Versioning diff --git a/docs/src/understand/configure-federation.md b/docs/src/understand/configure-federation.md index fd092ad20f..3477a10242 100644 --- a/docs/src/understand/configure-federation.md +++ b/docs/src/understand/configure-federation.md @@ -62,7 +62,7 @@ backend. domain. Your user known to you as Alice, and known on your server with ID `ac41a202-2555-11ec-9341-00163e5e6c00` will become known for other servers you federate with as - + ``` json { "user": { @@ -73,7 +73,7 @@ backend. ``` - This domain is shown in the User Interface - alongside user information. + alongside user information. Example: Using the same example as above, for backends you federate with, Alice would be displayed with the @@ -138,7 +138,7 @@ The SRV record would look as follows: _wire-server-federator._tcp.example.com. 600 IN SRV 0 10 443 federator.wire.example.org. ``` -### DNS A record for the federator +### DNS A record for the federator Background: `federator` is the server component responsible for incoming and outgoing requests to other backend; but it is proxied on the @@ -151,7 +151,7 @@ also needs to point to the IP of your ingress, i.e. the IP you want to provide services on. (federation-certificate-setup)= -## Generate and configure TLS server and client certificates +## Generate and configure TLS server and client certificates Are your servers on the public internet? Then you have the option of using TLS certificates from [Let\'s encrypt](https://letsencrypt.org/). @@ -196,7 +196,7 @@ FS-33 and FS-49 (tickets only visible to Wire employees). ``` -### (A) Let\'s encrypt TLS server and client certificate generation and renewal +### (A) Let\'s encrypt TLS server and client certificate generation and renewal The following will make use of [Let\'s encrypt](https://letsencrypt.org/) for both server certificates (used @@ -395,7 +395,171 @@ cargohold: federationDomain: example.com # your chosen "backend domain" ``` -### Configure federator process to run and allow incoming traffic +(configure-federation-strategy-in-brig)= + +### Configure federation strategy (whom to federate with) in brig + +**Since [PR#3260](https://github.com/wireapp/wire-server/pull/3260).** + +Also see {ref}`configuring-remote-connections-dev-perspective` for the +developer's point of view on this topic. + +You also need to define the federation strategy (whom to federate +with), and the frequency with which the other backend services will +refresh their cache of this configuration. + +``` yaml +# override values for wire-server +# (e.g. under ./helm_vars/wire-server/values.yaml) +brig: + config: + optSettings: + setFederationStrategy: allowNone # [allowAll | allowDynamic | allowNone] + setFederationDomainConfigsUpdateFreq: 10 # seconds +``` + +The default strategy of `allowNone` effectively disables federation +(and probably isn't what you want if you are reading this). +`allowAll` federates with any backend that requests contact or that a +user uses in a search. `allowDynamic` only federates with known +remote backends listed in cassandra. + +The update frequency determines how often other services will refresh +the information about remote connections from brig. + +More information about individual remote connections is stored in +brig's cassandra, and maintained via internal brig api end-points by +the sysadmin: + +* [`POST`](https://staging-nginz-https.zinfra.io/api-internal/swagger-ui/brig/#/brig/post_i_federation_remotes) + + - after adding a new remote backend, wait for the other end to do + the same with you, and then wait a few moments for things to + stabilize (at least `update_interval * 2`; see below). + +* [`GET`](https://staging-nginz-https.zinfra.io/api-internal/swagger-ui/brig/#/brig/get_i_federation_remotes) + + - this serves an object with 3 fields: + - `remotes` (from cassandra): the list of remote domains with search policy (and + possibly other information in the future); + - `strategy` (from config): federation strategy; one of `allowNone`, `allowDynamic`, `allowAll` (see above) + - `update_interval` (from config): the suggested update frequency with which calling + services should refresh their information. + + - It doesn't serve the local domain, which needs to be configured + for every service that needs to know it individually. This may + change in the future. + + - This end-point enjoys a comparably high amount of traffic. If you + have many pods (a large instance with say, >100 pods), *and* you set a very + short update interval (<10s), you should monitor brig's service and + database load closely in the beginning. + +* [`PUT`](https://staging-nginz-https.zinfra.io/api-internal/swagger-ui/brig/#/brig/put_i_federation_remotes__domain_) + +* [`DELETE`](https://staging-nginz-https.zinfra.io/api-internal/swagger-ui/brig/#/brig/delete_i_federation_remotes__domain_) + - **WARNING:** If you delete a connection, all users from that + remote will be removed from local conversations, and all + conversations hosted by that remote will be removed from the local + backend. Connections between local and remote users that are + removed will be archived, and can be re-established should you + decide to add the same backend later. + +The `remotes` list looks like this: + +``` +[ + { + "domain": "wire.example.com", + "search_policy": "full_search" + }, + { + "domain": "evil.example.com" + "search_policy": "no_search" + }, + ... +] +``` + +It serves two purposes: + +1. If federation strategy is `allowDynamic`, only backends that are + listed can be reached by us and can reach us; + +2. Independently of the federation strategy, the list provides + information about remote backends that may change dynamically (at + the time of writing this: search policy, see + {ref}`searching-users-on-another-federated-backend` and + {ref}`user-searchability` for more context) + +The search policy for a remote backend can be: + +- `no_search`: No users are returned by federated searches. default. +- `exact_handle_search`: Only users where the handle exactly matches are returned. +- `full_search`: Additionally to `exact_handle_search`, users are found by a freetext search on handle and display name. + +If federation strategy is `allowAll`, and there is no entry for a +domain in the database, default is `no_search`. The field in +cassandra is not nullable, ie., you always have to explicitly name a +search policy if you create an entry. + +#### If your instance has been federating before + +You only need to read this section if your instance has been +federating with other instances prior to +[PR#3260](https://github.com/wireapp/wire-server/pull/3260), and you +are upgrading to the release containing that PR. + +From now on the federation policy set in the federator config under +`federationStrategy` is ignored. Instead, the federation strategy is +pulled by all services from brig, who in turn gets it from a +combination of config file and database (see +{ref}`configure-federation-strategy-in-brig` above). + +In order to achieve a zero-downtime upgrade, follow these steps: + +1. Update the brig config values file as described above. + +2. If you have chosen `brig.config.optSettings.setFederationStrategy: + allowDynamic` you need to make sure the list of all domains you want + to allow federation with is complete (before, there was a search + policy default; now wire will stop federating with removes that are + not listed here). Example: + + ```yaml + brig: + config: + optSettings: + setFederationDomainConfigs: + - domain: red.example.com + search_policy: full_search + - domain: blue.example.com + search_policy: no_search + ``` + + This change is to cover the time window between upgrading the brig + pods and populating cassandra with the information needed (see + Step 3 below). + + Any later lookup of this information will return the union of what + is in cassandra and what is in the config file. Any attempt to + write data to cassandra that contradicts data in the config file + will result in an error. Before you change any remote domain + config, remove it from the config file. + +3. Populate cassandra with remote domain configs as described above. + +4. At any time after you are done with the upgrade and have convinced + yourself everything went smoothly, remove outdated brig and + federator config values, in particular: + - `brig.config.optSettings.setFederationDomainConfigs` + - `federator.config.optSettings.federationStrategy` + + At a later point, wire-server will start ignoring + `setFederationDomainConfigs` altogether (follow future entries in + the changelog to learn when that happens). + +### Configure federator process to run and allow incoming traffic For federation to work, the `federator` subchart of wire-server has to be enabled: @@ -422,7 +586,7 @@ config: federator: federator.wire.example.org # set this to your "infra" domain ``` -### Configure the validation depth when handling client certificates +### Configure the validation depth when handling client certificates By default, `verify_depth` is `1`, meaning that in order to validate an incoming request from another backend, this backend needs to have a @@ -482,14 +646,14 @@ federator: allowAll: true ``` -## Applying all configuration changes +## Applying all configuration changes Depending on your installation method and time you initially installed your first version of wire-server, commands to run to apply all of the above configrations may vary. You want to ensure that you upgrade the `nginx-ingress-services` and `wire-server` helm charts at a minimum. -## Manually test that your configurations work as expected +## Manually test that your configurations work as expected ### Manually test DNS @@ -518,7 +682,7 @@ DOMAIN to your {ref}`federation infrastructure domain `. They should include your domain as part of the SAN (Subject Alternative Names) and not have expired. -### Manually test that federation works +### Manually test that federation works Prerequisites: diff --git a/docs/src/understand/searchability.md b/docs/src/understand/searchability.md index 2f37fdcc09..b1608e0d6a 100644 --- a/docs/src/understand/searchability.md +++ b/docs/src/understand/searchability.md @@ -1,3 +1,5 @@ +(user-searchability)= + # User Searchability You can configure how search is limited or not based on user membership in a given team. @@ -99,29 +101,13 @@ galley: This default value applies to all teams for which no explicit configuration of the `TeamSearchVisibility` has been set. -## Searching users on another federated backend - - -Allowing search is done at the backend configuration level by the sysadmin: +(searching-users-on-another-federated-backend)= -- A configuration setting `FederatedUserSearchPolicy` per federating domain with these possible values: - - - `no_search` The federating backend is not allowed to search any users (either by exact handle or full-text). - - `exact_handle_search` The federating backend may only search by exact handle - - `full_search` The federating backend may search users by full text search on display name and handle. The search search results are additionally affected by `SearchVisibilityInbound` setting of each team on the backend. +## Searching users on another federated backend - The configuration value `FederatedUserSearchPolicy` is per federated domain, e.g. in the values of the wire-server chart: - - ```yaml - brig: - config: - optSettings: - setFederationDomainConfigs: - - domain: a.example.com - search_policy: no_search - - domain: a.example.com - search_policy: full_search - ``` +- Setting the search policy for individual remote federated backends + is done via a internal brig api end-points by a sysadmin (see + {ref}`configure-federation-strategy-in-brig`}. - The `SearchVisibilityInbound` setting applies. Since the default value for teams is `SearchableByOwnTeam` this means that for a team to be full-text searchable by users on a federating backend both @@ -271,4 +257,3 @@ settings: featureFlags: teamSearchVisibility: disabled-by-default # or enabled-by-default ``` - diff --git a/hack/helm_vars/wire-server/values.yaml.gotmpl b/hack/helm_vars/wire-server/values.yaml.gotmpl index 821d62d958..9f81c5ebb6 100644 --- a/hack/helm_vars/wire-server/values.yaml.gotmpl +++ b/hack/helm_vars/wire-server/values.yaml.gotmpl @@ -78,11 +78,15 @@ brig: # See helmfile for the real value setFederationDomain: integration.example.com setFederationDomainConfigs: - # See helmfile for the real value + # 'setFederationDomainConfigs' is deprecated as of https://github.com/wireapp/wire-server/pull/3260. See + # https://docs.wire.com/understand/federation/backend-communication.html#configuring-remote-connections + # for details. - domain: integration.example.com search_policy: full_search - domain: federation-test-helper.{{ .Release.Namespace }}.svc.cluster.local search_policy: full_search + setFederationStrategy: allowAll + setFederationDomainConfigsUpdateFreq: 10 set2FACodeGenerationDelaySecs: 5 setNonceTtlSecs: 300 setDpopMaxSkewSecs: 1 @@ -300,8 +304,6 @@ federator: imagePullPolicy: {{ .Values.imagePullPolicy }} config: optSettings: - federationStrategy: - allowAll: true useSystemCAStore: false background-worker: diff --git a/integration/test/API/BrigInternal.hs b/integration/test/API/BrigInternal.hs index 408b849358..138cb5d803 100644 --- a/integration/test/API/BrigInternal.hs +++ b/integration/test/API/BrigInternal.hs @@ -1,6 +1,7 @@ module API.BrigInternal where import API.Common +import qualified Data.Aeson as Aeson import Data.Function import Data.Maybe import Testlib.Prelude @@ -47,6 +48,68 @@ createUser domain cu = do ] ) +data FedConn = FedConn + { domain :: String, + searchStrategy :: String + } + deriving (Eq, Ord, Show) + +instance ToJSON FedConn where + toJSON (FedConn d s) = + Aeson.object + [ "domain" .= d, + "search_policy" .= s + ] + +instance MakesValue FedConn where + make = pure . toJSON + +createFedConn :: (HasCallStack, MakesValue dom, MakesValue fedConn) => dom -> fedConn -> App Response +createFedConn dom fedConn = do + bindResponse (createFedConn' dom fedConn) $ \res -> do + res.status `shouldMatchRange` (200, 299) + pure res + +createFedConn' :: (HasCallStack, MakesValue dom, MakesValue fedConn) => dom -> fedConn -> App Response +createFedConn' dom fedConn = do + req <- rawBaseRequest dom Brig Unversioned "/i/federation/remotes" + conn <- make fedConn + submit "POST" $ req & addJSON conn + +readFedConns :: (HasCallStack, MakesValue dom) => dom -> App Response +readFedConns dom = do + bindResponse (readFedConns' dom) $ \res -> do + res.status `shouldMatchRange` (200, 299) + pure res + +readFedConns' :: (HasCallStack, MakesValue dom) => dom -> App Response +readFedConns' dom = do + req <- rawBaseRequest dom Brig Unversioned "/i/federation/remotes" + submit "GET" req + +updateFedConn :: (HasCallStack, MakesValue owndom, MakesValue fedConn) => owndom -> String -> fedConn -> App Response +updateFedConn owndom dom fedConn = do + bindResponse (updateFedConn' owndom dom fedConn) $ \res -> do + res.status `shouldMatchRange` (200, 299) + pure res + +updateFedConn' :: (HasCallStack, MakesValue owndom, MakesValue fedConn) => owndom -> String -> fedConn -> App Response +updateFedConn' owndom dom fedConn = do + req <- rawBaseRequest owndom Brig Unversioned ("/i/federation/remotes/" <> dom) + conn <- make fedConn + submit "PUT" $ addJSON conn req + +deleteFedConn :: (HasCallStack, MakesValue owndom) => owndom -> String -> App Response +deleteFedConn owndom dom = do + bindResponse (deleteFedConn' owndom dom) $ \res -> do + res.status `shouldMatchRange` (200, 299) + pure res + +deleteFedConn' :: (HasCallStack, MakesValue owndom) => owndom -> String -> App Response +deleteFedConn' owndom dom = do + req <- rawBaseRequest owndom Brig Unversioned ("/i/federation/remotes/" <> dom) + submit "DELETE" req + registerOAuthClient :: (HasCallStack, MakesValue user, MakesValue name, MakesValue url) => user -> name -> url -> App Response registerOAuthClient user name url = do req <- baseRequest user Brig Unversioned "i/oauth/clients" diff --git a/integration/test/SetupHelpers.hs b/integration/test/SetupHelpers.hs index a46e8e8cc5..ac028577ae 100644 --- a/integration/test/SetupHelpers.hs +++ b/integration/test/SetupHelpers.hs @@ -58,3 +58,11 @@ getAllConvs u = do resp.status `shouldMatchInt` 200 resp.json result %. "found" & asList + +resetFedConns :: (HasCallStack, MakesValue owndom) => owndom -> App () +resetFedConns owndom = do + bindResponse (Internal.readFedConns owndom) $ \resp -> do + rdoms :: [String] <- do + rawlist <- resp.json %. "remotes" & asList + (asString . (%. "domain")) `mapM` rawlist + Internal.deleteFedConn' owndom `mapM_` rdoms diff --git a/integration/test/Test/Brig.hs b/integration/test/Test/Brig.hs index 7374030476..7f292118c6 100644 --- a/integration/test/Test/Brig.hs +++ b/integration/test/Test/Brig.hs @@ -4,10 +4,15 @@ import qualified API.Brig as Public import qualified API.BrigInternal as Internal import qualified API.Common as API import qualified API.GalleyInternal as Internal +import Control.Monad.IO.Class (liftIO) +import Data.Aeson.Types import qualified Data.Set as Set -import Data.String.Conversions (cs) +import Data.String.Conversions +import qualified Data.UUID as UUID +import qualified Data.UUID.V4 as UUID import GHC.Stack import SetupHelpers +import Testlib.Assertions import Testlib.Prelude testSearchContactForExternalUsers :: HasCallStack => App () @@ -21,6 +26,85 @@ testSearchContactForExternalUsers = do bindResponse (Public.searchContacts partner (owner %. "name") OwnDomain) $ \resp -> resp.status `shouldMatchInt` 403 +testCrudFederationRemotes :: HasCallStack => App () +testCrudFederationRemotes = do + let parseFedConns :: HasCallStack => Response -> App [Value] + parseFedConns resp = + -- Pick out the list of federation domain configs + getJSON 200 resp %. "remotes" + & asList + -- Enforce that the values are objects and not something else + >>= traverse (fmap Object . asObject) + + addOnce :: (MakesValue fedConn, Ord fedConn2, ToJSON fedConn2, MakesValue fedConn2, HasCallStack) => fedConn -> [fedConn2] -> App () + addOnce fedConn want = do + bindResponse (Internal.createFedConn OwnDomain fedConn) $ \res -> do + addFailureContext ("res = " <> show res) $ res.status `shouldMatchInt` 200 + res2 <- parseFedConns =<< Internal.readFedConns OwnDomain + sort res2 `shouldMatch` sort want + + addFail :: HasCallStack => MakesValue fedConn => fedConn -> App () + addFail fedConn = do + bindResponse (Internal.createFedConn' OwnDomain fedConn) $ \res -> do + addFailureContext ("res = " <> show res) $ res.status `shouldMatchInt` 533 + + deleteOnce :: (Ord fedConn, ToJSON fedConn, MakesValue fedConn) => String -> [fedConn] -> App () + deleteOnce domain want = do + bindResponse (Internal.deleteFedConn OwnDomain domain) $ \res -> do + addFailureContext ("res = " <> show res) $ res.status `shouldMatchInt` 200 + res2 <- parseFedConns =<< Internal.readFedConns OwnDomain + sort res2 `shouldMatch` sort want + + deleteFail :: HasCallStack => String -> App () + deleteFail del = do + bindResponse (Internal.deleteFedConn' OwnDomain del) $ \res -> do + addFailureContext ("res = " <> show res) $ res.status `shouldMatchInt` 533 + + updateOnce :: (MakesValue fedConn, Ord fedConn2, ToJSON fedConn2, MakesValue fedConn2, HasCallStack) => String -> fedConn -> [fedConn2] -> App () + updateOnce domain fedConn want = do + bindResponse (Internal.updateFedConn OwnDomain domain fedConn) $ \res -> do + addFailureContext ("res = " <> show res) $ res.status `shouldMatchInt` 200 + res2 <- parseFedConns =<< Internal.readFedConns OwnDomain + sort res2 `shouldMatch` sort want + + updateFail :: (MakesValue fedConn, HasCallStack) => String -> fedConn -> App () + updateFail domain fedConn = do + bindResponse (Internal.updateFedConn' OwnDomain domain fedConn) $ \res -> do + addFailureContext ("res = " <> show res) $ res.status `shouldMatchInt` 533 + + dom1 :: String <- (<> ".example.com") . UUID.toString <$> liftIO UUID.nextRandom + dom2 :: String <- (<> ".example.com") . UUID.toString <$> liftIO UUID.nextRandom + + let remote1, remote1', remote1'' :: Internal.FedConn + remote1 = Internal.FedConn dom1 "no_search" + remote1' = remote1 {Internal.searchStrategy = "full_search"} + remote1'' = remote1 {Internal.domain = dom2} + + cfgRemotesExpect :: Internal.FedConn + cfgRemotesExpect = Internal.FedConn (cs "example.com") "full_search" + + remote1J <- make remote1 + remote1J' <- make remote1' + + resetFedConns OwnDomain + cfgRemotes <- parseFedConns =<< Internal.readFedConns OwnDomain + cfgRemotes `shouldMatch` [cfgRemotesExpect] + -- entries present in the config file can be idempotently added if identical, but cannot be + -- updated, deleted or updated. + addOnce cfgRemotesExpect [cfgRemotesExpect] + addFail (cfgRemotesExpect {Internal.searchStrategy = "no_search"}) + deleteFail (Internal.domain cfgRemotesExpect) + updateFail (Internal.domain cfgRemotesExpect) (cfgRemotesExpect {Internal.searchStrategy = "no_search"}) + -- create + addOnce remote1 $ (remote1J : cfgRemotes) + addOnce remote1 $ (remote1J : cfgRemotes) -- idempotency + -- update + updateOnce (Internal.domain remote1) remote1' (remote1J' : cfgRemotes) + updateFail (Internal.domain remote1) remote1'' + -- delete + deleteOnce (Internal.domain remote1) cfgRemotes + deleteOnce (Internal.domain remote1) cfgRemotes -- idempotency + testCrudOAuthClient :: HasCallStack => App () testCrudOAuthClient = do user <- randomUser OwnDomain def diff --git a/integration/test/Testlib/Assertions.hs b/integration/test/Testlib/Assertions.hs index bed010185d..05e8d98f16 100644 --- a/integration/test/Testlib/Assertions.hs +++ b/integration/test/Testlib/Assertions.hs @@ -84,6 +84,19 @@ shouldMatchInt :: App () shouldMatchInt = shouldMatch +shouldMatchRange :: + (MakesValue a, HasCallStack) => + -- | The actual value + a -> + -- | The expected range, inclusive both sides + (Int, Int) -> + App () +shouldMatchRange a (lower, upper) = do + xa :: Int <- asInt a + when (xa < lower || xa > upper) $ do + pa <- prettyJSON xa + assertFailure $ "Actual:\n" <> pa <> "\nExpected:\nin range (" <> show lower <> ", " <> show upper <> ") (including bounds)" + shouldMatchSet :: (MakesValue a, MakesValue b, HasCallStack) => a -> diff --git a/integration/test/Testlib/Types.hs b/integration/test/Testlib/Types.hs index 01025ed1c7..782021e4fc 100644 --- a/integration/test/Testlib/Types.hs +++ b/integration/test/Testlib/Types.hs @@ -37,6 +37,7 @@ data Response = Response headers :: [HTTP.Header], request :: HTTP.Request } + deriving (Show) instance HasField "json" Response (App Aeson.Value) where getField response = maybe (assertFailure "Response has no json body") pure response.jsonBody diff --git a/libs/wire-api-federation/src/Wire/API/Federation/Error.hs b/libs/wire-api-federation/src/Wire/API/Federation/Error.hs index d51fd252f0..6d6e305760 100644 --- a/libs/wire-api-federation/src/Wire/API/Federation/Error.hs +++ b/libs/wire-api-federation/src/Wire/API/Federation/Error.hs @@ -160,7 +160,10 @@ data FederationError -- indicate a bug in either backend, or an incompatibility in the -- server-to-server API. FederationUnexpectedBody Text - | -- | Federator client got an unexpected error response from remote backend + | -- | Federator client got an unexpected error response from remote backend. + -- Also used for error conditions that will go away in a future release, + -- like "can't delete remote domains from config file", which is only + -- needed until we start disregarding the config file. FederationUnexpectedError Text | -- | One or more remote backends is unreachable FederationUnreachableDomains (Set Domain) diff --git a/libs/wire-api/default.nix b/libs/wire-api/default.nix index 945f8c3c21..332cb183f4 100644 --- a/libs/wire-api/default.nix +++ b/libs/wire-api/default.nix @@ -46,6 +46,7 @@ , hspec , hspec-wai , http-api-data +, http-client , http-media , http-types , imports @@ -70,6 +71,7 @@ , quickcheck-instances , random , resourcet +, retry , saml2-web-sso , schema-profunctor , scientific @@ -93,6 +95,7 @@ , tasty-quickcheck , text , time +, tinylog , transitive-anns , types-common , unliftio @@ -116,6 +119,7 @@ mkDerivation { src = gitignoreSource ./.; libraryHaskellDepends = [ aeson + async attoparsec base base64-bytestring @@ -149,6 +153,7 @@ mkDerivation { hscim HsOpenSSL http-api-data + http-client http-media http-types imports @@ -170,6 +175,7 @@ mkDerivation { quickcheck-instances random resourcet + retry saml2-web-sso schema-profunctor scientific @@ -189,6 +195,7 @@ mkDerivation { tagged text time + tinylog transitive-anns types-common unordered-containers diff --git a/libs/wire-api/src/Wire/API/FederationUpdate.hs b/libs/wire-api/src/Wire/API/FederationUpdate.hs new file mode 100644 index 0000000000..54be7d04ec --- /dev/null +++ b/libs/wire-api/src/Wire/API/FederationUpdate.hs @@ -0,0 +1,91 @@ +module Wire.API.FederationUpdate + ( syncFedDomainConfigs, + SyncFedDomainConfigsCallback (..), + emptySyncFedDomainConfigsCallback, + ) +where + +import Control.Concurrent.Async +import Control.Exception (ErrorCall (ErrorCall), finally, throwIO) +import qualified Control.Retry as R +import qualified Data.Set as Set +import Data.Text (unpack) +import Imports +import Network.HTTP.Client (defaultManagerSettings, newManager) +import Servant.Client (BaseUrl (BaseUrl), ClientEnv (ClientEnv), ClientError, Scheme (Http), runClientM) +import Servant.Client.Internal.HttpClient (defaultMakeClientRequest) +import qualified System.Logger as L +import Util.Options (Endpoint (..)) +import Wire.API.Routes.FederationDomainConfig (FederationDomainConfig (domain), FederationDomainConfigs (remotes, updateInterval)) +import qualified Wire.API.Routes.Internal.Brig as IAPI +import Wire.API.Routes.Named (namedClient) + +-- | 'FedUpdateCallback' is not called if a new settings cannot be fetched, or if they are +-- equal to the old settings. +syncFedDomainConfigs :: Endpoint -> L.Logger -> SyncFedDomainConfigsCallback -> IO (IORef FederationDomainConfigs, Async ()) +syncFedDomainConfigs (Endpoint h p) log' cb = do + let baseUrl = BaseUrl Http (unpack h) (fromIntegral p) "" + clientEnv <- newManager defaultManagerSettings <&> \mgr -> ClientEnv mgr baseUrl Nothing defaultMakeClientRequest + ioref <- newIORef =<< initialize log' clientEnv + updateDomainsThread <- + async $ + let go = finally + (loop log' clientEnv cb ioref) + $ do + L.log log' L.Error $ L.msg (L.val "Federation domain sync thread died, restarting domain synchronization.") + go + in go + + pure (ioref, updateDomainsThread) + +-- | Initial function for getting the set of domains from brig, and an update interval +initialize :: L.Logger -> ClientEnv -> IO FederationDomainConfigs +initialize logger clientEnv = + let -- keep trying every 3s for one minute + policy :: R.RetryPolicy + policy = R.constantDelay 3_081_003 <> R.limitRetries 20 + + go :: IO (Maybe FederationDomainConfigs) + go = do + fetch clientEnv >>= \case + Right s -> pure $ Just s + Left e -> do + L.log logger L.Info $ + L.msg (L.val "Failed to reach brig for federation setup, retrying...") + L.~~ "error" L..= show e + pure Nothing + in R.retrying policy (const (pure . isNothing)) (const go) >>= \case + Just c -> pure c + Nothing -> throwIO $ ErrorCall "*** Failed to reach brig for federation setup, giving up!" + +loop :: L.Logger -> ClientEnv -> SyncFedDomainConfigsCallback -> IORef FederationDomainConfigs -> IO () +loop logger clientEnv (SyncFedDomainConfigsCallback callback) env = forever $ do + fetch clientEnv >>= \case + Left e -> + L.log logger L.Info $ + L.msg (L.val "Could not retrieve an updated list of federation domains from Brig; I'll keep trying!") + L.~~ "error" L..= show e + Right new -> do + old <- readIORef env + unless (domainListsEqual old new) $ callback old new + atomicWriteIORef env new + delay <- updateInterval <$> readIORef env + threadDelay (delay * 1_000_000) + where + domainListsEqual o n = + Set.fromList (domain <$> remotes o) + == Set.fromList (domain <$> remotes n) + +fetch :: ClientEnv -> IO (Either ClientError FederationDomainConfigs) +fetch = runClientM (namedClient @IAPI.API @"get-federation-remotes") + +-- | The callback takes the previous and the new settings and runs a given action. +newtype SyncFedDomainConfigsCallback = SyncFedDomainConfigsCallback + { fromFedUpdateCallback :: + FederationDomainConfigs -> -- old value + FederationDomainConfigs -> -- new value + IO () + } + +emptySyncFedDomainConfigsCallback :: SyncFedDomainConfigsCallback +emptySyncFedDomainConfigsCallback = SyncFedDomainConfigsCallback $ \_ _ -> pure () diff --git a/libs/wire-api/src/Wire/API/Routes/FederationDomainConfig.hs b/libs/wire-api/src/Wire/API/Routes/FederationDomainConfig.hs new file mode 100644 index 0000000000..9db8fb3044 --- /dev/null +++ b/libs/wire-api/src/Wire/API/Routes/FederationDomainConfig.hs @@ -0,0 +1,100 @@ +-- This file is part of the Wire Server implementation. +-- +-- Copyright (C) 2022 Wire Swiss GmbH +-- +-- This program is free software: you can redistribute it and/or modify it under +-- the terms of the GNU Affero General Public License as published by the Free +-- Software Foundation, either version 3 of the License, or (at your option) any +-- later version. +-- +-- This program is distributed in the hope that it will be useful, but WITHOUT +-- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +-- FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more +-- details. +-- +-- You should have received a copy of the GNU Affero General Public License along +-- with this program. If not, see . + +module Wire.API.Routes.FederationDomainConfig + ( FederationDomainConfig (..), + FederationDomainConfigs (..), + defFederationDomainConfigs, + FederationStrategy (..), + ) +where + +import Control.Lens ((?~)) +import Data.Aeson (FromJSON, ToJSON) +import Data.Domain (Domain) +import Data.Schema +import qualified Data.Swagger as S +import GHC.Generics +import Imports +import Wire.API.User.Search (FederatedUserSearchPolicy) +import Wire.Arbitrary (Arbitrary, GenericUniform (..)) + +-- | Everything we need to know about a remote instance in order to federate with it. Comes +-- in `AllowedDomains` if `AllowStrategy` is `AllowDynamic`. If `AllowAll`, we still use this +-- information for search policy. +data FederationDomainConfig = FederationDomainConfig + { domain :: Domain, + cfgSearchPolicy :: FederatedUserSearchPolicy + } + deriving (Eq, Ord, Show, Generic) + deriving (ToJSON, FromJSON, S.ToSchema) via Schema FederationDomainConfig + deriving (Arbitrary) via (GenericUniform FederationDomainConfig) + +instance ToSchema FederationDomainConfig where + schema = + object "FederationDomainConfig" $ + FederationDomainConfig + <$> domain .= field "domain" schema + <*> cfgSearchPolicy .= field "search_policy" schema + +data FederationDomainConfigs = FederationDomainConfigs + { strategy :: FederationStrategy, + remotes :: [FederationDomainConfig], + updateInterval :: Int + } + deriving (Show, Generic, Eq) + deriving (ToJSON, FromJSON, S.ToSchema) via Schema FederationDomainConfigs + deriving (Arbitrary) via (GenericUniform FederationDomainConfigs) + +defFederationDomainConfigs :: FederationDomainConfigs +defFederationDomainConfigs = + FederationDomainConfigs + { strategy = AllowNone, + remotes = [], + updateInterval = 10 + } + +instance ToSchema FederationDomainConfigs where + schema = + objectWithDocModifier + "FederationDomainConfigs" + (description ?~ "See https://docs.wire.com/understand/federation/backend-communication.html#configuring-remote-connections.") + $ FederationDomainConfigs + <$> strategy .= field "strategy" schema + <*> remotes .= field "remotes" (array schema) + <*> updateInterval .= field "update_interval" schema + +data FederationStrategy + = -- | Disable federation. + AllowNone + | -- | Allow any backend that asks. + AllowAll + | -- | Any backend explicitly configured in table `brig.federation_remotes` (if that table + -- is empty, this is the same as `AllowNone`). + AllowDynamic + deriving (Eq, Show, Generic) + deriving (ToJSON, FromJSON, S.ToSchema) via Schema FederationStrategy + deriving (Arbitrary) via (GenericUniform FederationStrategy) + +instance ToSchema FederationStrategy where + schema = + enum @Text "FederationStrategy" $ + mconcat + [ element "allowNone" AllowNone, + element "allowAll" AllowAll, + element "allowDynamic" AllowDynamic + ] diff --git a/libs/wire-api/src/Wire/API/Routes/Internal/Brig.hs b/libs/wire-api/src/Wire/API/Routes/Internal/Brig.hs index 1b4ab217f7..87819ff1e6 100644 --- a/libs/wire-api/src/Wire/API/Routes/Internal/Brig.hs +++ b/libs/wire-api/src/Wire/API/Routes/Internal/Brig.hs @@ -24,6 +24,7 @@ module Wire.API.Routes.Internal.Brig TeamsAPI, UserAPI, AuthAPI, + FederationRemotesAPI, EJPDRequest, ISearchIndexAPI, GetAccountConferenceCallingConfig, @@ -40,6 +41,7 @@ where import Control.Lens ((.~)) import Data.Aeson (FromJSON, ToJSON) import qualified Data.Code as Code +import Data.Domain (Domain) import Data.Id as Id import Data.Qualified (Qualified) import Data.Schema hiding (swaggerDoc) @@ -55,6 +57,7 @@ import Wire.API.Error.Brig import Wire.API.MLS.Credential import Wire.API.MLS.KeyPackage import Wire.API.MakesFederatedCall +import Wire.API.Routes.FederationDomainConfig import Wire.API.Routes.Internal.Brig.Connection import Wire.API.Routes.Internal.Brig.EJPD import Wire.API.Routes.Internal.Brig.OAuth (OAuthAPI) @@ -386,6 +389,7 @@ type API = :<|> AuthAPI :<|> OAuthAPI :<|> ISearchIndexAPI + :<|> FederationRemotesAPI ) type IStatusAPI = @@ -464,6 +468,51 @@ type AuthAPI = :> MultiVerb1 'GET '[JSON] (RespondEmpty 200 "OK") ) +-- | This is located in brig, not in federator, because brig has a cassandra instance. This +-- is not ideal, and other services could keep their local in-ram copy of this table up to date +-- via rabbitmq, but FUTUREWORK. +type FederationRemotesAPI = + Named + "add-federation-remotes" + ( Description FederationRemotesAPIDescription + :> "federation" + :> "remotes" + :> ReqBody '[JSON] FederationDomainConfig + :> Post '[JSON] () + ) + :<|> Named + "get-federation-remotes" + ( Description FederationRemotesAPIDescription + :> "federation" + :> "remotes" + :> Get '[JSON] FederationDomainConfigs + ) + :<|> Named + "update-federation-remotes" + ( Description FederationRemotesAPIDescription + :> "federation" + :> "remotes" + :> Capture "domain" Domain + :> ReqBody '[JSON] FederationDomainConfig + :> Put '[JSON] () + ) + :<|> Named + "delete-federation-remotes" + ( Description FederationRemotesAPIDescription + :> Description FederationRemotesAPIDeleteDescription + :> "federation" + :> "remotes" + :> Capture "domain" Domain + :> Delete '[JSON] () + ) + +type FederationRemotesAPIDescription = + "See https://docs.wire.com/understand/federation/backend-communication.html#configuring-remote-connections for background. " + +type FederationRemotesAPIDeleteDescription = + "**WARNING!** If you remove a remote connection, all users from that remote will be removed from local conversations, and all \ + \group conversations hosted by that remote will be removed from the local backend. This cannot be reverted! " + swaggerDoc :: Swagger swaggerDoc = toSwagger (Proxy @API) diff --git a/libs/wire-api/src/Wire/API/User/Search.hs b/libs/wire-api/src/Wire/API/User/Search.hs index 5c57b64bc2..bef46f56be 100644 --- a/libs/wire-api/src/Wire/API/User/Search.hs +++ b/libs/wire-api/src/Wire/API/User/Search.hs @@ -306,7 +306,7 @@ data FederatedUserSearchPolicy = NoSearch | ExactHandleSearch | FullSearch - deriving (Show, Eq, Generic, Enum, Bounded) + deriving (Show, Eq, Ord, Generic, Enum, Bounded) deriving (Arbitrary) via (GenericUniform FederatedUserSearchPolicy) deriving (ToJSON, FromJSON) via (Schema FederatedUserSearchPolicy) diff --git a/libs/wire-api/test/unit/Test/Wire/API/Roundtrip/Aeson.hs b/libs/wire-api/test/unit/Test/Wire/API/Roundtrip/Aeson.hs index 7464f2e3b9..9500992102 100644 --- a/libs/wire-api/test/unit/Test/Wire/API/Roundtrip/Aeson.hs +++ b/libs/wire-api/test/unit/Test/Wire/API/Roundtrip/Aeson.hs @@ -47,6 +47,7 @@ import qualified Wire.API.Provider.External as Provider.External import qualified Wire.API.Provider.Service as Provider.Service import qualified Wire.API.Provider.Service.Tag as Provider.Service.Tag import qualified Wire.API.Push.Token as Push.Token +import qualified Wire.API.Routes.FederationDomainConfig as FederationDomainConfig import qualified Wire.API.Routes.Internal.Galley.TeamsIntra as TeamsIntra import qualified Wire.API.Routes.Version as Routes.Version import qualified Wire.API.SystemSettings as SystemSettings @@ -143,6 +144,8 @@ tests = testRoundTrip @Event.Conversation.OtrMessage, testRoundTrip @Event.Team.Event, testRoundTrip @Event.Team.EventType, + testRoundTrip @FederationDomainConfig.FederationDomainConfigs, + testRoundTrip @FederationDomainConfig.FederationStrategy, testRoundTrip @Message.Priority, testRoundTrip @Message.OtrRecipients, testRoundTrip @Message.NewOtrMessage, diff --git a/libs/wire-api/wire-api.cabal b/libs/wire-api/wire-api.cabal index f41d0ce80c..bce5530d5f 100644 --- a/libs/wire-api/wire-api.cabal +++ b/libs/wire-api/wire-api.cabal @@ -38,6 +38,7 @@ library Wire.API.Event.Conversation Wire.API.Event.FeatureConfig Wire.API.Event.Team + Wire.API.FederationUpdate Wire.API.Internal.BulkPush Wire.API.Internal.Notification Wire.API.MakesFederatedCall @@ -79,6 +80,7 @@ library Wire.API.Routes.ClientAlgebra Wire.API.Routes.Cookies Wire.API.Routes.CSV + Wire.API.Routes.FederationDomainConfig Wire.API.Routes.Internal.Brig Wire.API.Routes.Internal.Brig.Connection Wire.API.Routes.Internal.Brig.EJPD @@ -191,6 +193,7 @@ library MultiParamTypeClasses MultiWayIf NamedFieldPuns + NumericUnderscores OverloadedRecordDot OverloadedStrings PackageImports @@ -215,6 +218,7 @@ library build-depends: aeson >=2.0.1.0 + , async , attoparsec >=0.10 , base >=4 && <5 , base64-bytestring >=1.0 @@ -248,6 +252,7 @@ library , hscim , HsOpenSSL , http-api-data + , http-client , http-media , http-types , imports @@ -269,6 +274,7 @@ library , quickcheck-instances >=0.3.16 , random >=1.2.0 , resourcet + , retry , saml2-web-sso , schema-profunctor , scientific @@ -288,6 +294,7 @@ library , tagged , text >=0.11 , time >=1.4 + , tinylog , transitive-anns , types-common >=0.16 , unordered-containers >=0.2 diff --git a/services/brig/brig.cabal b/services/brig/brig.cabal index 82c1755a8a..cdaf514474 100644 --- a/services/brig/brig.cabal +++ b/services/brig/brig.cabal @@ -51,6 +51,7 @@ library Brig.Data.Activation Brig.Data.Client Brig.Data.Connection + Brig.Data.Federation Brig.Data.Instances Brig.Data.LoginCode Brig.Data.MLS.KeyPackage @@ -209,6 +210,7 @@ library , conduit >=1.2.8 , containers >=0.5 , cookie >=0.4 + , cql , cryptobox-haskell >=0.1.1 , currency-codes >=2.0 , data-default >=0.5 @@ -260,6 +262,7 @@ library , polysemy-plugin , polysemy-wire-zoo , proto-lens >=0.1 + , random , random-shuffle >=0.0.3 , resource-pool >=0.2 , resourcet >=1.1 @@ -658,6 +661,7 @@ executable brig-schema V74_AddOAuthTables V75_AddOAuthCodeChallenge V76_AddSupportedProtocols + V77_FederationRemotes V_FUTUREWORK hs-source-dirs: schema/src diff --git a/services/brig/brig.integration.yaml b/services/brig/brig.integration.yaml index 4fd638b7fb..d9547a4ab6 100644 --- a/services/brig/brig.integration.yaml +++ b/services/brig/brig.integration.yaml @@ -189,6 +189,8 @@ optSettings: # Remember to keep it the same in Galley. setFederationDomain: example.com setFeatureFlags: # see #RefConfigOptions in `/docs/reference` + setFederationDomainConfigsUpdateFreq: 1 + setFederationStrategy: allowAll setFederationDomainConfigs: - domain: example.com search_policy: full_search diff --git a/services/brig/default.nix b/services/brig/default.nix index 2acad89cca..e5859de928 100644 --- a/services/brig/default.nix +++ b/services/brig/default.nix @@ -29,6 +29,7 @@ , conduit , containers , cookie +, cql , cryptobox-haskell , currency-codes , data-default @@ -187,6 +188,7 @@ mkDerivation { conduit containers cookie + cql cryptobox-haskell currency-codes data-default @@ -238,6 +240,7 @@ mkDerivation { polysemy-plugin polysemy-wire-zoo proto-lens + random random-shuffle resource-pool resourcet diff --git a/services/brig/schema/src/Main.hs b/services/brig/schema/src/Main.hs index 6aa6618367..8e1b05598a 100644 --- a/services/brig/schema/src/Main.hs +++ b/services/brig/schema/src/Main.hs @@ -56,6 +56,7 @@ import qualified V73_ReplaceNonceTable import qualified V74_AddOAuthTables import qualified V75_AddOAuthCodeChallenge import qualified V76_AddSupportedProtocols +import qualified V77_FederationRemotes main :: IO () main = do @@ -99,7 +100,8 @@ main = do V73_ReplaceNonceTable.migration, V74_AddOAuthTables.migration, V75_AddOAuthCodeChallenge.migration, - V76_AddSupportedProtocols.migration + V76_AddSupportedProtocols.migration, + V77_FederationRemotes.migration -- When adding migrations here, don't forget to update -- 'schemaVersion' in Brig.App diff --git a/services/brig/schema/src/V77_FederationRemotes.hs b/services/brig/schema/src/V77_FederationRemotes.hs new file mode 100644 index 0000000000..250164ceb8 --- /dev/null +++ b/services/brig/schema/src/V77_FederationRemotes.hs @@ -0,0 +1,37 @@ +{-# LANGUAGE QuasiQuotes #-} + +-- This file is part of the Wire Server implementation. +-- +-- Copyright (C) 2022 Wire Swiss GmbH +-- +-- This program is free software: you can redistribute it and/or modify it under +-- the terms of the GNU Affero General Public License as published by the Free +-- Software Foundation, either version 3 of the License, or (at your option) any +-- later version. +-- +-- This program is distributed in the hope that it will be useful, but WITHOUT +-- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +-- FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more +-- details. +-- +-- You should have received a copy of the GNU Affero General Public License along +-- with this program. If not, see . + +module V77_FederationRemotes + ( migration, + ) +where + +import Cassandra.Schema +import Imports +import Text.RawString.QQ + +migration :: Migration +migration = + Migration 77 "Table for keeping track of instances we federate with" $ + schema' + [r| CREATE TABLE federation_remotes ( + domain text PRIMARY KEY, + search_policy int + ) + |] diff --git a/services/brig/src/Brig/API/Internal.hs b/services/brig/src/Brig/API/Internal.hs index 6ebb40af4c..10afb03a9c 100644 --- a/services/brig/src/Brig/API/Internal.hs +++ b/services/brig/src/Brig/API/Internal.hs @@ -38,6 +38,7 @@ import qualified Brig.Code as Code import Brig.Data.Activation import qualified Brig.Data.Client as Data import qualified Brig.Data.Connection as Data +import qualified Brig.Data.Federation as Data import qualified Brig.Data.MLS.KeyPackage as Data import qualified Brig.Data.User as Data import Brig.Effects.BlacklistPhonePrefixStore (BlacklistPhonePrefixStore) @@ -61,16 +62,17 @@ import qualified Brig.User.API.Search as Search import qualified Brig.User.EJPD import qualified Brig.User.Search.Index as Index import Control.Error hiding (bool) -import Control.Lens (view) +import Control.Lens (view, (^.)) import Data.Aeson hiding (json) import Data.ByteString.Conversion import qualified Data.ByteString.Conversion as List +import Data.Domain (Domain) import Data.Handle import Data.Id as Id import qualified Data.Map.Strict as Map import Data.Qualified import qualified Data.Set as Set -import Imports hiding (cs, head) +import Imports hiding (head) import Network.HTTP.Types.Status import Network.Wai (Response) import Network.Wai.Predicate hiding (result, setStatus) @@ -81,14 +83,17 @@ import Polysemy import Servant hiding (Handler, JSON, addHeader, respond) import Servant.Swagger.Internal.Orphans () import qualified System.Logger.Class as Log +import System.Random (randomRIO) import UnliftIO.Async import Wire.API.Connection import Wire.API.Error import qualified Wire.API.Error.Brig as E import Wire.API.Federation.API +import Wire.API.Federation.Error (FederationError (..)) import Wire.API.MLS.Credential import Wire.API.MLS.KeyPackage import Wire.API.MLS.Serialisation +import Wire.API.Routes.FederationDomainConfig import Wire.API.Routes.Internal.Brig import qualified Wire.API.Routes.Internal.Brig as BrigIRoutes import Wire.API.Routes.Internal.Brig.Connection @@ -121,6 +126,7 @@ servantSitemap = :<|> authAPI :<|> internalOauthAPI :<|> internalSearchIndexAPI + :<|> federationRemotesAPI istatusAPI :: forall r. ServerT BrigIRoutes.IStatusAPI (Handler r) istatusAPI = Named @"get-status" (pure NoContent) @@ -180,6 +186,128 @@ authAPI = :<|> Named @"login-code" getLoginCode :<|> Named @"reauthenticate" reauthenticate +federationRemotesAPI :: ServerT BrigIRoutes.FederationRemotesAPI (Handler r) +federationRemotesAPI = + Named @"add-federation-remotes" addFederationRemote + :<|> Named @"get-federation-remotes" getFederationRemotes + :<|> Named @"update-federation-remotes" updateFederationRemote + :<|> Named @"delete-federation-remotes" deleteFederationRemote + +addFederationRemote :: FederationDomainConfig -> ExceptT Brig.API.Error.Error (AppT r) () +addFederationRemote fedDomConf = do + assertNoDivergingDomainInConfigFiles fedDomConf + result <- lift . wrapClient $ Data.addFederationRemote fedDomConf + case result of + Data.AddFederationRemoteSuccess -> pure () + Data.AddFederationRemoteMaxRemotesReached -> + throwError . fedError . FederationUnexpectedError $ + "Maximum number of remote backends reached. If you need to create more connections, \ + \please contact wire.com." + +-- | Compile config file list into a map indexed by domains. Use this to make sure the config +-- file is consistent (ie., no two entries for the same domain). +remotesMapFromCfgFile :: AppT r (Map Domain FederationDomainConfig) +remotesMapFromCfgFile = do + cfg <- asks (fromMaybe [] . setFederationDomainConfigs . view settings) + let dict = [(domain cnf, cnf) | cnf <- cfg] + merge c c' = + if c == c' + then c + else error $ "error in config file: conflicting parameters on domain: " <> show (c, c') + pure $ Map.fromListWith merge dict + +-- | Return the config file list. Use this to make sure the config file is consistent (ie., +-- no two entries for the same domain). Based on `remotesMapFromCfgFile`. +remotesListFromCfgFile :: AppT r [FederationDomainConfig] +remotesListFromCfgFile = Map.elems <$> remotesMapFromCfgFile + +-- | If remote domain is registered in config file, the version that can be added to the +-- database must be the same. +assertNoDivergingDomainInConfigFiles :: FederationDomainConfig -> ExceptT Brig.API.Error.Error (AppT r) () +assertNoDivergingDomainInConfigFiles fedComConf = do + cfg <- lift remotesMapFromCfgFile + let diverges = case Map.lookup (domain fedComConf) cfg of + Nothing -> False + Just fedComConf' -> fedComConf' /= fedComConf + when diverges $ do + throwError . fedError . FederationUnexpectedError $ + "keeping track of remote domains in the brig config file is deprecated, but as long as we \ + \do that, adding a domain with different settings than in the config file is nto allowed. want " + <> ( "Just " + <> cs (show fedComConf) + <> "or Nothing, " + ) + <> ( "got " + <> cs (show (Map.lookup (domain fedComConf) cfg)) + ) + +getFederationRemotes :: ExceptT Brig.API.Error.Error (AppT r) FederationDomainConfigs +getFederationRemotes = lift $ do + -- FUTUREWORK: we should solely rely on `db` in the future for remote domains; merging + -- remote domains from `cfg` is just for providing an easier, more robust migration path. + -- See + -- https://docs.wire.com/understand/federation/backend-communication.html#configuring-remote-connections, + -- http://docs.wire.com/developer/developer/federation-design-aspects.html#configuring-remote-connections-dev-perspective + db <- wrapClient Data.getFederationRemotes + (ms :: Maybe FederationStrategy, mf :: [FederationDomainConfig], mu :: Maybe Int) <- do + cfg <- ask + domcfgs <- remotesListFromCfgFile -- (it's not very elegant to prove the env twice here, but this code is transitory.) + pure + ( setFederationStrategy (cfg ^. settings), + domcfgs, + setFederationDomainConfigsUpdateFreq (cfg ^. settings) + ) + + -- update frequency settings of `<1` are interpreted as `1 second`. only warn about this every now and + -- then, that'll be noise enough for the logs given the traffic on this end-point. + unless (maybe True (> 0) mu) $ + randomRIO (0 :: Int, 1000) + >>= \case + 0 -> Log.warn (Log.msg (Log.val "Invalid brig configuration: setFederationDomainConfigsUpdateFreq must be > 0. setting to 1 second.")) + _ -> pure () + + defFederationDomainConfigs + & maybe id (\v cfg -> cfg {strategy = v}) ms + & (\cfg -> cfg {remotes = nub $ db <> mf}) + & maybe id (\v cfg -> cfg {updateInterval = min 1 v}) mu + & pure + +updateFederationRemote :: Domain -> FederationDomainConfig -> ExceptT Brig.API.Error.Error (AppT r) () +updateFederationRemote dom fedcfg = do + assertDomainIsNotUpdated dom fedcfg + assertNoDomainsFromConfigFiles dom + (lift . wrapClient . Data.updateFederationRemote $ fedcfg) >>= \case + True -> pure () + False -> + throwError . fedError . FederationUnexpectedError . cs $ + "federation domain does not exist and cannot be updated: " <> show (dom, fedcfg) + +assertDomainIsNotUpdated :: Domain -> FederationDomainConfig -> ExceptT Brig.API.Error.Error (AppT r) () +assertDomainIsNotUpdated dom fedcfg = do + when (dom /= domain fedcfg) $ + throwError . fedError . FederationUnexpectedError . cs $ + "federation domain of a given peer cannot be changed from " <> show (domain fedcfg) <> " to " <> show dom <> "." + +-- | FUTUREWORK: should go away in the future; see 'getFederationRemotes'. +assertNoDomainsFromConfigFiles :: Domain -> ExceptT Brig.API.Error.Error (AppT r) () +assertNoDomainsFromConfigFiles dom = do + cfg <- asks (fromMaybe [] . setFederationDomainConfigs . view settings) + when (dom `elem` (domain <$> cfg)) $ do + throwError . fedError . FederationUnexpectedError $ + "keeping track of remote domains in the brig config file is deprecated, but as long as we \ + \do that, removing or updating items listed in the config file is not allowed." + +-- | Remove the entry from the database if present (or do nothing if not). This responds with +-- 533 if the entry was also present in the config file, but only *after* it has removed the +-- entry from cassandra. +-- +-- The ordering on this delete then check seems weird, but allows us to default all the +-- way back to config file state for a federation domain. +deleteFederationRemote :: Domain -> ExceptT Brig.API.Error.Error (AppT r) () +deleteFederationRemote dom = do + lift . wrapClient . Data.deleteFederationRemote $ dom + assertNoDomainsFromConfigFiles dom + -- | Responds with 'Nothing' if field is NULL in existing user or user does not exist. getAccountConferenceCallingConfig :: UserId -> (Handler r) (ApiFt.WithStatusNoLock ApiFt.ConferenceCallingConfig) getAccountConferenceCallingConfig uid = @@ -255,8 +383,8 @@ getMLSClients usr _ss = do pure . Set.fromList . map (uncurry ClientInfo) $ clientInfo where getResult [] = pure mempty - getResult ((u, cs) : rs) - | u == usr = pure cs + getResult ((u, cs') : rs) + | u == usr = pure cs' | otherwise = getResult rs getValidity lusr cid = diff --git a/services/brig/src/Brig/API/Util.hs b/services/brig/src/Brig/API/Util.hs index c2768e1e9d..275dd465a8 100644 --- a/services/brig/src/Brig/API/Util.hs +++ b/services/brig/src/Brig/API/Util.hs @@ -40,8 +40,7 @@ import Brig.API.Types import Brig.App import qualified Brig.Code as Code import qualified Brig.Data.User as Data -import Brig.Options (FederationDomainConfig, federationDomainConfigs, set2FACodeGenerationDelaySecs) -import qualified Brig.Options as Opts +import Brig.Options (federationDomainConfigs, set2FACodeGenerationDelaySecs) import Brig.Types.Intra (accountUser) import Control.Lens (view) import Control.Monad.Catch (throwM) @@ -64,6 +63,7 @@ import Util.Logging (sha256String) import Wire.API.Error import Wire.API.Error.Brig import Wire.API.Federation.Error +import Wire.API.Routes.FederationDomainConfig as FD import Wire.API.User import Wire.API.User.Search (FederatedUserSearchPolicy (NoSearch)) import qualified Wire.Sem.Concurrency as C @@ -170,11 +170,11 @@ exceptTToMaybe = (pure . either Just (const Nothing)) <=< runExceptT lookupDomainConfig :: MonadReader Env m => Domain -> m (Maybe FederationDomainConfig) lookupDomainConfig domain = do domainConfigs <- fromMaybe [] <$> view (settings . federationDomainConfigs) - pure $ find ((== domain) . Opts.domain) domainConfigs + pure $ find ((== domain) . FD.domain) domainConfigs -- | If domain is not configured fall back to `FullSearch` lookupSearchPolicy :: MonadReader Env m => Domain -> m FederatedUserSearchPolicy -lookupSearchPolicy domain = fromMaybe NoSearch <$> (Opts.cfgSearchPolicy <$$> lookupDomainConfig domain) +lookupSearchPolicy domain = fromMaybe NoSearch <$> (FD.cfgSearchPolicy <$$> lookupDomainConfig domain) -- | Convert a qualified value into a local one. Throw if the value is not actually local. ensureLocal :: Qualified a -> AppT r (Local a) diff --git a/services/brig/src/Brig/App.hs b/services/brig/src/Brig/App.hs index 6d1d5543a1..8f07bc7105 100644 --- a/services/brig/src/Brig/App.hs +++ b/services/brig/src/Brig/App.hs @@ -157,7 +157,7 @@ import Wire.API.User.Identity (Email) import Wire.API.User.Profile (Locale) schemaVersion :: Int32 -schemaVersion = 75 +schemaVersion = 77 ------------------------------------------------------------------------------- -- Environment diff --git a/services/brig/src/Brig/Data/Federation.hs b/services/brig/src/Brig/Data/Federation.hs new file mode 100644 index 0000000000..3a1ec6cda8 --- /dev/null +++ b/services/brig/src/Brig/Data/Federation.hs @@ -0,0 +1,76 @@ +-- This file is part of the Wire Server implementation. +-- +-- Copyright (C) 2022 Wire Swiss GmbH +-- +-- This program is free software: you can redistribute it and/or modify it under +-- the terms of the GNU Affero General Public License as published by the Free +-- Software Foundation, either version 3 of the License, or (at your option) any +-- later version. +-- +-- This program is distributed in the hope that it will be useful, but WITHOUT +-- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +-- FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more +-- details. +-- +-- You should have received a copy of the GNU Affero General Public License along +-- with this program. If not, see . + +module Brig.Data.Federation + ( getFederationRemotes, + addFederationRemote, + updateFederationRemote, + deleteFederationRemote, + AddFederationRemoteResult (..), + ) +where + +import Brig.Data.Instances () +import Cassandra +import Control.Exception (ErrorCall (ErrorCall)) +import Control.Monad.Catch (throwM) +import Data.Domain +import Database.CQL.Protocol (SerialConsistency (LocalSerialConsistency), serialConsistency) +import Imports +import Wire.API.Routes.FederationDomainConfig +import Wire.API.User.Search + +maxKnownNodes :: Int +maxKnownNodes = 10000 + +getFederationRemotes :: forall m. MonadClient m => m [FederationDomainConfig] +getFederationRemotes = uncurry FederationDomainConfig <$$> qry + where + qry :: m [(Domain, FederatedUserSearchPolicy)] + qry = retry x1 . query get $ params LocalQuorum () + + get :: PrepQuery R () (Domain, FederatedUserSearchPolicy) + get = fromString $ "SELECT domain, search_policy FROM federation_remotes LIMIT " <> show maxKnownNodes + +data AddFederationRemoteResult = AddFederationRemoteSuccess | AddFederationRemoteMaxRemotesReached + +addFederationRemote :: MonadClient m => FederationDomainConfig -> m AddFederationRemoteResult +addFederationRemote (FederationDomainConfig rdom searchpolicy) = do + l <- length <$> getFederationRemotes + if l >= maxKnownNodes + then pure AddFederationRemoteMaxRemotesReached + else AddFederationRemoteSuccess <$ retry x5 (write add (params LocalQuorum (rdom, searchpolicy))) + where + add :: PrepQuery W (Domain, FederatedUserSearchPolicy) () + add = "INSERT INTO federation_remotes (domain, search_policy) VALUES (?, ?)" + +updateFederationRemote :: MonadClient m => FederationDomainConfig -> m Bool +updateFederationRemote (FederationDomainConfig rdom spol) = do + retry x1 (trans upd (params LocalQuorum (spol, rdom)) {serialConsistency = Just LocalSerialConsistency}) >>= \case + [] -> pure False + [_] -> pure True + _ -> throwM $ ErrorCall "Primary key violation detected federation_remotes" + where + upd :: PrepQuery W (FederatedUserSearchPolicy, Domain) x + upd = "UPDATE federation_remotes SET search_policy = ? WHERE domain = ? IF EXISTS" + +deleteFederationRemote :: MonadClient m => Domain -> m () +deleteFederationRemote rdom = + retry x1 $ write delete (params LocalQuorum (Identity rdom)) + where + delete :: PrepQuery W (Identity Domain) () + delete = "DELETE FROM federation_remotes WHERE domain = ?" diff --git a/services/brig/src/Brig/Data/Instances.hs b/services/brig/src/Brig/Data/Instances.hs index e13e2362ab..6df4cb1fa2 100644 --- a/services/brig/src/Brig/Data/Instances.hs +++ b/services/brig/src/Brig/Data/Instances.hs @@ -45,6 +45,7 @@ import Wire.API.User.Activation import Wire.API.User.Client import Wire.API.User.Password import Wire.API.User.RichInfo +import Wire.API.User.Search deriving instance Cql Name @@ -287,6 +288,18 @@ instance Cql SearchVisibilityInbound where fromCql (CqlInt 1) = pure SearchableByAllTeams fromCql n = Left $ "Unexpected SearchVisibilityInbound: " ++ show n +instance Cql FederatedUserSearchPolicy where + ctype = Tagged IntColumn + + toCql NoSearch = CqlInt 0 + toCql ExactHandleSearch = CqlInt 1 + toCql FullSearch = CqlInt 2 + + fromCql (CqlInt 0) = pure NoSearch + fromCql (CqlInt 1) = pure ExactHandleSearch + fromCql (CqlInt 2) = pure FullSearch + fromCql n = Left $ "Unexpected SearchVisibilityInbound: " ++ show n + instance Cql (Imports.Set BaseProtocolTag) where ctype = Tagged IntColumn diff --git a/services/brig/src/Brig/Options.hs b/services/brig/src/Brig/Options.hs index 626ee21c94..f947bef3e3 100644 --- a/services/brig/src/Brig/Options.hs +++ b/services/brig/src/Brig/Options.hs @@ -55,10 +55,10 @@ import Network.AMQP.Extended import qualified Network.DNS as DNS import System.Logger.Extended (Level, LogFormat) import Util.Options +import Wire.API.Routes.FederationDomainConfig import Wire.API.Routes.Version import qualified Wire.API.Team.Feature as Public import Wire.API.User -import Wire.API.User.Search (FederatedUserSearchPolicy) import Wire.Arbitrary (Arbitrary, arbitrary) newtype Timeout = Timeout @@ -401,20 +401,6 @@ instance ToSchema ListAllSFTServers where element "disabled" HideAllSFTServers ] -data FederationDomainConfig = FederationDomainConfig - { domain :: Domain, - cfgSearchPolicy :: FederatedUserSearchPolicy - } - deriving (Show, Generic) - deriving (ToJSON, FromJSON) via Schema FederationDomainConfig - -instance ToSchema FederationDomainConfig where - schema = - object "FederationDomainConfig" $ - FederationDomainConfig - <$> domain .= field "domain" schema - <*> cfgSearchPolicy .= field "search_policy" schema - -- | Options that are consumed on startup data Opts = Opts -- services @@ -553,24 +539,34 @@ data Settings = Settings -- returns users from the same team setSearchSameTeamOnly :: !(Maybe Bool), -- | FederationDomain is required, even when not wanting to federate with other backends - -- (in that case the 'allowedDomains' can be set to empty in Federator) + -- (in that case the 'setFederationStrategy' can be set to `allowNone` below, or to + -- `allowDynamic` while keeping the list of allowed domains empty, see + -- https://docs.wire.com/understand/federation/backend-communication.html#configuring-remote-connections) -- Federation domain is used to qualify local IDs and handles, -- e.g. 0c4d8944-70fa-480e-a8b7-9d929862d18c@wire.com and somehandle@wire.com. -- It should also match the SRV DNS records under which other wire-server installations can find this backend: - -- _wire-server-federator._tcp. - -- Once set, DO NOT change it: if you do, existing users may have a broken experience and/or stop working + -- >>> _wire-server-federator._tcp. + -- Once set, DO NOT change it: if you do, existing users may have a broken experience and/or stop working. -- Remember to keep it the same in all services. - -- Example: - -- allowedDomains: - -- - wire.com - -- - example.com setFederationDomain :: !Domain, + -- | See https://docs.wire.com/understand/federation/backend-communication.html#configuring-remote-connections + -- default: AllowNone + setFederationStrategy :: !(Maybe FederationStrategy), + -- | 'setFederationDomainConfigs' is introduced in + -- https://github.com/wireapp/wire-server/pull/3260 for the sole purpose of transitioning + -- to dynamic federation remote configuration. See + -- https://docs.wire.com/understand/federation/backend-communication.html#configuring-remote-connections + -- for details. + -- default: [] setFederationDomainConfigs :: !(Maybe [FederationDomainConfig]), + -- | In seconds. Default: 10 seconds. Values <1 are silently replaced by 1. See + -- https://docs.wire.com/understand/federation/backend-communication.html#configuring-remote-connections + setFederationDomainConfigsUpdateFreq :: !(Maybe Int), -- | The amount of time in milliseconds to wait after reading from an SQS queue -- returns no message, before asking for messages from SQS again. -- defaults to 'defSqsThrottleMillis'. -- When using real SQS from AWS, throttling isn't needed as much, since using - -- SQS.rmWaitTimeSeconds (Just 20) in Brig.AWS.listen + -- >>> SQS.rmWaitTimeSeconds (Just 20) in Brig.AWS.listen -- ensures that there is only one request every 20 seconds. -- However, that parameter is not honoured when using fake-sqs -- (where throttling can thus make sense) diff --git a/services/brig/test/integration/API/Federation.hs b/services/brig/test/integration/API/Federation.hs index 7e0547cf73..43b6f433c7 100644 --- a/services/brig/test/integration/API/Federation.hs +++ b/services/brig/test/integration/API/Federation.hs @@ -50,6 +50,7 @@ import qualified Wire.API.Federation.API.Brig as FedBrig import qualified Wire.API.Federation.API.Brig as S import Wire.API.Federation.Component import Wire.API.Federation.Version +import Wire.API.Routes.FederationDomainConfig as FD import Wire.API.User import Wire.API.User.Client import Wire.API.User.Client.Prekey @@ -67,7 +68,7 @@ tests m opts brig cannon fedBrigClient = test m "POST /federation/search-users : Found (multiple users)" (testFulltextSearchMultipleUsers opts brig), test m "POST /federation/search-users : NotFound" (testSearchNotFound opts), test m "POST /federation/search-users : Empty Input - NotFound" (testSearchNotFoundEmpty opts), - test m "POST /federation/search-users : configured restrictions" (testSearchRestrictions opts brig), + flakyTest m "POST /federation/search-users : configured restrictions" (testSearchRestrictions opts brig), test m "POST /federation/get-user-by-handle : configured restrictions" (testGetUserByHandleRestrictions opts brig), test m "POST /federation/get-user-by-handle : Found" (testGetUserByHandleSuccess opts brig), test m "POST /federation/get-user-by-handle : NotFound" (testGetUserByHandleNotFound opts), @@ -85,7 +86,7 @@ tests m opts brig cannon fedBrigClient = allowFullSearch :: Domain -> Opt.Opts -> Opt.Opts allowFullSearch domain opts = - opts & Opt.optionSettings . Opt.federationDomainConfigs ?~ [Opt.FederationDomainConfig domain FullSearch] + opts & Opt.optionSettings . Opt.federationDomainConfigs ?~ [FD.FederationDomainConfig domain FullSearch] testSearchSuccess :: Opt.Opts -> Brig -> Http () testSearchSuccess opts brig = do @@ -184,9 +185,9 @@ testSearchRestrictions opts brig = do let opts' = opts & Opt.optionSettings . Opt.federationDomainConfigs - ?~ [ Opt.FederationDomainConfig domainNoSearch NoSearch, - Opt.FederationDomainConfig domainExactHandle ExactHandleSearch, - Opt.FederationDomainConfig domainFullSearch FullSearch + ?~ [ FD.FederationDomainConfig domainNoSearch NoSearch, + FD.FederationDomainConfig domainExactHandle ExactHandleSearch, + FD.FederationDomainConfig domainFullSearch FullSearch ] let expectSearch :: HasCallStack => Domain -> Text -> [Qualified UserId] -> FederatedUserSearchPolicy -> WaiTest.Session () @@ -220,9 +221,9 @@ testGetUserByHandleRestrictions opts brig = do let opts' = opts & Opt.optionSettings . Opt.federationDomainConfigs - ?~ [ Opt.FederationDomainConfig domainNoSearch NoSearch, - Opt.FederationDomainConfig domainExactHandle ExactHandleSearch, - Opt.FederationDomainConfig domainFullSearch FullSearch + ?~ [ FD.FederationDomainConfig domainNoSearch NoSearch, + FD.FederationDomainConfig domainExactHandle ExactHandleSearch, + FD.FederationDomainConfig domainFullSearch FullSearch ] let expectSearch domain expectedUser = do diff --git a/services/brig/test/integration/API/Search.hs b/services/brig/test/integration/API/Search.hs index 4a1111245e..0030c80f08 100644 --- a/services/brig/test/integration/API/Search.hs +++ b/services/brig/test/integration/API/Search.hs @@ -79,7 +79,7 @@ tests opts mgr galley brig = do testWithBothIndices opts mgr "size - when exact handle matches a team user" $ testSearchSize brig True, testWithBothIndices opts mgr "size - when exact handle matches a non team user" $ testSearchSize brig False, test mgr "empty query" $ testSearchEmpty brig, - test mgr "reindex" $ testReindex brig, + flakyTest mgr "reindex" $ testReindex brig, testWithBothIndices opts mgr "no match" $ testSearchNoMatch brig, testWithBothIndices opts mgr "no extra results" $ testSearchNoExtraResults brig, testWithBothIndices opts mgr "order-handle (prefix match)" $ testOrderHandle brig, diff --git a/services/brig/test/integration/Util.hs b/services/brig/test/integration/Util.hs index b86ccc68ee..d243fa9d04 100644 --- a/services/brig/test/integration/Util.hs +++ b/services/brig/test/integration/Util.hs @@ -29,7 +29,6 @@ import Brig.App (applog, fsWatcher, sftEnv, turnEnv) import Brig.Calling as Calling import qualified Brig.Code as Code import qualified Brig.Options as Opt -import qualified Brig.Options as Opts import qualified Brig.Run as Run import Brig.Types.Activation import qualified Brig.ZAuth as ZAuth @@ -98,6 +97,7 @@ import Test.Tasty (TestName, TestTree) import Test.Tasty.Cannon import qualified Test.Tasty.Cannon as WS import Test.Tasty.HUnit +import Test.Tasty.Pending (flakyTestCase) import Text.Printf (printf) import qualified UnliftIO.Async as Async import Util.Options @@ -226,6 +226,9 @@ instance ToJSON SESNotification where test :: Manager -> TestName -> Http a -> TestTree test m n h = testCase n (void $ runHttpT m h) +flakyTest :: Manager -> TestName -> Http a -> TestTree +flakyTest m n h = flakyTestCase n (void $ runHttpT m h) + twoRandomUsers :: (MonadCatch m, MonadIO m, MonadHttp m, HasCallStack) => Brig -> m (Qualified UserId, UserId, Qualified UserId, UserId) twoRandomUsers brig = do quid1 <- userQualifiedId <$> randomUser brig @@ -1051,7 +1054,7 @@ circumventSettingsOverride = runHttpT -- -- Beware: (1) Not all async parts of brig are running in this. (2) other services will -- see the old, unaltered brig. -withSettingsOverrides :: MonadIO m => Opts.Opts -> WaiTest.Session a -> m a +withSettingsOverrides :: MonadIO m => Opt.Opts -> WaiTest.Session a -> m a withSettingsOverrides opts action = liftIO $ do (brigApp, env) <- Run.mkApp opts sftDiscovery <- @@ -1065,10 +1068,10 @@ withSettingsOverrides opts action = liftIO $ do -- | When we remove the customer-specific extension of domain blocking, this test will fail to -- compile. -withDomainsBlockedForRegistration :: (MonadIO m) => Opts.Opts -> [Text] -> WaiTest.Session a -> m a +withDomainsBlockedForRegistration :: (MonadIO m) => Opt.Opts -> [Text] -> WaiTest.Session a -> m a withDomainsBlockedForRegistration opts domains sess = do - let opts' = opts {Opts.optSettings = (Opts.optSettings opts) {Opts.setCustomerExtensions = Just blocked}} - blocked = Opts.CustomerExtensions (Opts.DomainsBlockedForRegistration (unsafeMkDomain <$> domains)) + let opts' = opts {Opt.optSettings = (Opt.optSettings opts) {Opt.setCustomerExtensions = Just blocked}} + blocked = Opt.CustomerExtensions (Opt.DomainsBlockedForRegistration (unsafeMkDomain <$> domains)) unsafeMkDomain = either error id . mkDomain withSettingsOverrides opts' sess diff --git a/services/cannon/cannon.integration.yaml b/services/cannon/cannon.integration.yaml index f64f3c104f..1886f69405 100644 --- a/services/cannon/cannon.integration.yaml +++ b/services/cannon/cannon.integration.yaml @@ -16,6 +16,10 @@ gundeck: host: 127.0.0.1 port: 8086 +brig: + host: 0.0.0.0 + port: 8082 + drainOpts: gracePeriodSeconds: 1 millisecondsBetweenBatches: 500 diff --git a/services/cannon/cannon2.integration.yaml b/services/cannon/cannon2.integration.yaml index 5c25937652..2aa003cfd6 100644 --- a/services/cannon/cannon2.integration.yaml +++ b/services/cannon/cannon2.integration.yaml @@ -16,6 +16,10 @@ gundeck: host: 127.0.0.1 port: 8086 +brig: + host: 0.0.0.0 + port: 8082 + drainOpts: gracePeriodSeconds: 1 millisecondsBetweenBatches: 5 diff --git a/services/cannon/src/Cannon/Options.hs b/services/cannon/src/Cannon/Options.hs index bce5cba50c..8e59927ff4 100644 --- a/services/cannon/src/Cannon/Options.hs +++ b/services/cannon/src/Cannon/Options.hs @@ -24,6 +24,8 @@ module Cannon.Options port, cannon, gundeck, + brig, + Brig (..), externalHost, externalHostFile, logLevel, @@ -67,6 +69,15 @@ makeFields ''Gundeck deriveApiFieldJSON ''Gundeck +data Brig = Brig + { _brigHost :: !Text, + _brigPort :: !Word16 + } + deriving (Eq, Show, Generic) + +makeFields ''Brig +deriveApiFieldJSON ''Brig + data DrainOpts = DrainOpts { -- | Maximum amount of time draining should take. Must not be set to 0. _drainOptsGracePeriodSeconds :: Word64, @@ -87,6 +98,7 @@ deriveApiFieldJSON ''DrainOpts data Opts = Opts { _optsCannon :: !Cannon, _optsGundeck :: !Gundeck, + _optsBrig :: !Brig, _optsLogLevel :: !Level, _optsLogNetStrings :: !(Maybe (Last Bool)), _optsLogFormat :: !(Maybe (Last LogFormat)), diff --git a/services/cannon/src/Cannon/Run.hs b/services/cannon/src/Cannon/Run.hs index b628a15b89..19338dccf3 100644 --- a/services/cannon/src/Cannon/Run.hs +++ b/services/cannon/src/Cannon/Run.hs @@ -56,6 +56,8 @@ import qualified System.Logger.Extended as L import System.Posix.Signals import qualified System.Posix.Signals as Signals import System.Random.MWC (createSystemRandom) +import Util.Options (Endpoint (..)) +import Wire.API.FederationUpdate import qualified Wire.API.Routes.Internal.Cannon as Internal import Wire.API.Routes.Public.Cannon import Wire.API.Routes.Version.Wai @@ -79,6 +81,12 @@ run o = do <*> mkClock refreshMetricsThread <- Async.async $ runCannon' e refreshMetrics s <- newSettings $ Server (o ^. cannon . host) (o ^. cannon . port) (applog e) m (Just idleTimeout) + + -- Get the federation domain list from Brig and start the updater loop + let brigEndpoint = Endpoint bh bp + Brig bh bp = o ^. brig + (_, updateDomainsThread) <- syncFedDomainConfigs brigEndpoint g emptySyncFedDomainConfigsCallback + let middleware :: Wai.Middleware middleware = versionMiddleware (fold (o ^. disabledAPIVersions)) @@ -101,6 +109,7 @@ run o = do -- the same time and then calling the drain script. I suspect this might be due to some -- cleanup in wai. this needs to be tested very carefully when touched. Async.cancel refreshMetricsThread + Async.cancel updateDomainsThread L.close (applog e) where idleTimeout = fromIntegral $ maxPingInterval + 3 diff --git a/services/federator/default.nix b/services/federator/default.nix index 1ef766deac..6bc1e1ddbf 100644 --- a/services/federator/default.nix +++ b/services/federator/default.nix @@ -114,6 +114,7 @@ mkDerivation { wai wai-utilities warp + wire-api wire-api-federation x509 x509-validation diff --git a/services/federator/federator.cabal b/services/federator/federator.cabal index bf1e7d428f..53536521d9 100644 --- a/services/federator/federator.cabal +++ b/services/federator/federator.cabal @@ -141,6 +141,7 @@ library , wai , wai-utilities , warp + , wire-api , wire-api-federation , x509 , x509-validation diff --git a/services/federator/federator.integration.yaml b/services/federator/federator.integration.yaml index 9562b697e1..e0d8ec2355 100644 --- a/services/federator/federator.integration.yaml +++ b/services/federator/federator.integration.yaml @@ -21,21 +21,7 @@ optSettings: # Filepath to one or more PEM-encoded server certificates to use as a trust # store when making requests to remote backends remoteCAStore: "test/resources/integration-ca.pem" - - # Would you like to federate with every wire-server installation ? - # - federationStrategy: - allowAll: - # - # or only with a select set of other wire-server installations? - # - # federationStrategy: - # allowedDomains: - # - wire.com - # - example.com - useSystemCAStore: false - clientCertificate: "test/resources/integration-leaf.pem" clientPrivateKey: "test/resources/integration-leaf-key.pem" dnsHost: "127.0.0.1" diff --git a/services/federator/src/Federator/Env.hs b/services/federator/src/Federator/Env.hs index f6b8e79053..660dc7a547 100644 --- a/services/federator/src/Federator/Env.hs +++ b/services/federator/src/Federator/Env.hs @@ -33,6 +33,7 @@ import OpenSSL.Session (SSLContext) import qualified System.Logger.Class as LC import Util.Options import Wire.API.Federation.Component +import Wire.API.Routes.FederationDomainConfig (FederationDomainConfigs) data Env = Env { _metrics :: Metrics, @@ -40,6 +41,7 @@ data Env = Env _requestId :: RequestId, _dnsResolver :: Resolver, _runSettings :: RunSettings, + _domainConfigs :: IORef FederationDomainConfigs, _service :: Component -> Endpoint, _httpManager :: HTTP.Manager, _http2Manager :: IORef Http2Manager diff --git a/services/federator/src/Federator/ExternalServer.hs b/services/federator/src/Federator/ExternalServer.hs index 65444d252e..d146e7c518 100644 --- a/services/federator/src/Federator/ExternalServer.hs +++ b/services/federator/src/Federator/ExternalServer.hs @@ -25,7 +25,6 @@ import qualified Data.Text as Text import Federator.Discovery import Federator.Env import Federator.Error.ServerError -import Federator.Options (RunSettings) import Federator.Response import Federator.Service import Federator.Validation @@ -41,6 +40,7 @@ import Servant.Client.Core import qualified System.Logger.Message as Log import Wire.API.Federation.Component import Wire.API.Federation.Domain +import Wire.API.Routes.FederationDomainConfig -- FUTUREWORK(federation): Versioning of the federation API. callInward :: @@ -51,7 +51,7 @@ callInward :: Member (Error ValidationError) r, Member (Error DiscoveryFailure) r, Member (Error ServerError) r, - Member (Input RunSettings) r + Member (Input FederationDomainConfigs) r ) => Wai.Request -> Sem r Wai.Response diff --git a/services/federator/src/Federator/InternalServer.hs b/services/federator/src/Federator/InternalServer.hs index 76d0fdd443..b739fc9892 100644 --- a/services/federator/src/Federator/InternalServer.hs +++ b/services/federator/src/Federator/InternalServer.hs @@ -25,7 +25,6 @@ import qualified Data.ByteString as BS import qualified Data.Text as Text import Federator.Env import Federator.Error.ServerError -import Federator.Options (RunSettings) import Federator.Remote import Federator.Response import Federator.Validation @@ -36,6 +35,7 @@ import Polysemy import Polysemy.Error import Polysemy.Input import Wire.API.Federation.Component +import Wire.API.Routes.FederationDomainConfig data RequestData = RequestData { rdTargetDomain :: Text, @@ -82,7 +82,7 @@ callOutward :: Member (Embed IO) r, Member (Error ValidationError) r, Member (Error ServerError) r, - Member (Input RunSettings) r + Member (Input FederationDomainConfigs) r ) => Wai.Request -> Sem r Wai.Response diff --git a/services/federator/src/Federator/Options.hs b/services/federator/src/Federator/Options.hs index 803faa8d2a..d5bc71da53 100644 --- a/services/federator/src/Federator/Options.hs +++ b/services/federator/src/Federator/Options.hs @@ -1,4 +1,3 @@ -{-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE StrictData #-} -- This file is part of the Wire Server implementation. @@ -21,46 +20,13 @@ module Federator.Options where import Data.Aeson -import Data.Domain (Domain ()) import Imports import System.Logger.Extended (Level, LogFormat) import Util.Options -newtype AllowedDomains = AllowedDomains {allowedDomains :: [Domain]} - deriving (Eq, Show, Generic) - deriving newtype (FromJSON, ToJSON) - -data FederationStrategy - = -- | This backend allows federating with any other Wire-Server backend - AllowAll - | -- | Any backend explicitly configured in a FederationAllowList - AllowList AllowedDomains - deriving (Eq, Show, Generic) - -instance ToJSON FederationStrategy where - toJSON AllowAll = - object - [ "allowAll" .= object [] - ] - toJSON (AllowList domains) = - object - [ "allowedDomains" .= domains - ] - -instance FromJSON FederationStrategy where - parseJSON = withObject "FederationStrategy" $ \o -> do - -- Only inspect field content once we committed to one, for better error messages. - allowAll :: Maybe Value <- o .:! "allowAll" - allowList :: Maybe Value <- o .:! "allowedDomains" - case (allowAll, allowList) of - (Just _, Nothing) -> pure AllowAll -- accept any content - (Nothing, Just l) -> AllowList <$> parseJSON l - _ -> fail "invalid FederationStrategy: expected either allowAll or allowedDomains" - -- | Options that persist as runtime settings. data RunSettings = RunSettings { -- | Would you like to federate with everyone or only with a select set of other wire-server installations? - federationStrategy :: FederationStrategy, useSystemCAStore :: Bool, remoteCAStore :: Maybe FilePath, clientCertificate :: FilePath, diff --git a/services/federator/src/Federator/Response.hs b/services/federator/src/Federator/Response.hs index 9e0b03e19d..e606777c14 100644 --- a/services/federator/src/Federator/Response.hs +++ b/services/federator/src/Federator/Response.hs @@ -51,6 +51,7 @@ import Polysemy.Internal import Polysemy.TinyLog import Servant.Client.Core import Servant.Types.SourceT +import Wire.API.Routes.FederationDomainConfig import Wire.Network.DNS.Effect import Wire.Sem.Logger.TinyLog @@ -119,6 +120,7 @@ type AllEffects = ServiceStreaming, Input RunSettings, Input Http2Manager, -- needed by Remote + Input FederationDomainConfigs, -- needed for the domain list. Input Env, -- needed by Service Error ValidationError, Error RemoteError, @@ -143,6 +145,7 @@ runFederator env = DiscoveryFailure ] . runInputConst env + . runInputSem (embed @IO (readIORef (view domainConfigs env))) . runInputSem (embed @IO (readIORef (view http2Manager env))) . runInputConst (view runSettings env) . interpretServiceHTTP diff --git a/services/federator/src/Federator/Run.hs b/services/federator/src/Federator/Run.hs index 8fd76bab86..9d444e3085 100644 --- a/services/federator/src/Federator/Run.hs +++ b/services/federator/src/Federator/Run.hs @@ -47,10 +47,12 @@ import Federator.Options as Opt import Imports import qualified Network.DNS as DNS import qualified Network.HTTP.Client as HTTP -import qualified System.Logger.Class as Log +import qualified System.Logger as Log import qualified System.Logger.Extended as LogExt import Util.Options import Wire.API.Federation.Component +import Wire.API.FederationUpdate +import Wire.API.Routes.FederationDomainConfig import qualified Wire.Network.DNS.Helper as DNS ------------------------------------------------------------------------------ @@ -60,14 +62,16 @@ import qualified Wire.Network.DNS.Helper as DNS run :: Opts -> IO () run opts = do let resolvConf = mkResolvConf (optSettings opts) DNS.defaultResolvConf - DNS.withCachingResolver resolvConf $ \res -> - bracket (newEnv opts res) closeEnv $ \env -> do + DNS.withCachingResolver resolvConf $ \res -> do + logger <- LogExt.mkLogger (Opt.logLevel opts) (Opt.logNetStrings opts) (Opt.logFormat opts) + (ioref, updateFedDomainsThread) <- syncFedDomainConfigs (brig opts) logger emptySyncFedDomainConfigsCallback + bracket (newEnv opts res logger ioref) closeEnv $ \env -> do let externalServer = serveInward env portExternal internalServer = serveOutward env portInternal - withMonitor (env ^. applog) (onNewSSLContext env) (optSettings opts) $ do + withMonitor logger (onNewSSLContext env) (optSettings opts) $ do internalServerThread <- async internalServer externalServerThread <- async externalServer - void $ waitAnyCancel [internalServerThread, externalServerThread] + void $ waitAnyCancel [updateFedDomainsThread, internalServerThread, externalServerThread] where endpointInternal = federatorInternal opts portInternal = fromIntegral $ endpointInternal ^. epPort @@ -87,13 +91,12 @@ run opts = do ------------------------------------------------------------------------------- -- Environment -newEnv :: Opts -> DNS.Resolver -> IO Env -newEnv o _dnsResolver = do +newEnv :: Opts -> DNS.Resolver -> Log.Logger -> IORef FederationDomainConfigs -> IO Env +newEnv o _dnsResolver _applog _domainConfigs = do _metrics <- Metrics.metrics - _applog <- LogExt.mkLogger (Opt.logLevel o) (Opt.logNetStrings o) (Opt.logFormat o) let _requestId = def - let _runSettings = Opt.optSettings o - let _service Brig = Opt.brig o + _runSettings = Opt.optSettings o + _service Brig = Opt.brig o _service Galley = Opt.galley o _service Cargohold = Opt.cargohold o _httpManager <- initHttpManager diff --git a/services/federator/src/Federator/Validation.hs b/services/federator/src/Federator/Validation.hs index 27a5245e28..172619b27b 100644 --- a/services/federator/src/Federator/Validation.hs +++ b/services/federator/src/Federator/Validation.hs @@ -38,13 +38,13 @@ import qualified Data.X509 as X509 import qualified Data.X509.Validation as X509 import Federator.Discovery import Federator.Error -import Federator.Options import Imports import qualified Network.HTTP.Types as HTTP import qualified Network.Wai.Utilities.Error as Wai import Polysemy import Polysemy.Error import Polysemy.Input +import Wire.API.Routes.FederationDomainConfig import Wire.Network.DNS.SRV (SrvTarget (..)) data ValidationError @@ -89,20 +89,21 @@ validationErrorStatus :: ValidationError -> HTTP.Status validationErrorStatus (FederationDenied _) = HTTP.status400 validationErrorStatus _ = HTTP.status403 --- | Validates an already-parsed domain against the allowList using the federator --- startup configuration. +-- | Validates an already-parsed domain against the allow list (stored in +-- `brig.federation_remotes`, cached in `Env`). ensureCanFederateWith :: - ( Member (Input RunSettings) r, + ( Member (Input FederationDomainConfigs) r, Member (Error ValidationError) r ) => Domain -> Sem r () ensureCanFederateWith targetDomain = do - strategy <- inputs federationStrategy + FederationDomainConfigs strategy domains _ <- input case strategy of + AllowNone -> throw (FederationDenied targetDomain) AllowAll -> pure () - AllowList (AllowedDomains domains) -> - unless (targetDomain `elem` domains) $ + AllowDynamic -> do + unless (targetDomain `elem` fmap domain domains) $ throw (FederationDenied targetDomain) decodeCertificate :: @@ -135,11 +136,11 @@ parseDomainText domain = . mkDomain $ domain --- | Validates an unknown domain string against the allowList using the +-- | Validates an unknown domain string against the allow list using the -- federator startup configuration and checks that it matches the names reported -- by the client certificate validateDomain :: - ( Member (Input RunSettings) r, + ( Member (Input FederationDomainConfigs) r, Member (Error ValidationError) r, Member (Error DiscoveryFailure) r, Member DiscoverFederator r diff --git a/services/federator/test/unit/Test/Federator/ExternalServer.hs b/services/federator/test/unit/Test/Federator/ExternalServer.hs index 0a6379958c..968f9d4146 100644 --- a/services/federator/test/unit/Test/Federator/ExternalServer.hs +++ b/services/federator/test/unit/Test/Federator/ExternalServer.hs @@ -44,6 +44,7 @@ import Test.Federator.Validation (mockDiscoveryTrivial) import Test.Tasty import Test.Tasty.HUnit import Wire.API.Federation.Component +import Wire.API.Routes.FederationDomainConfig import Wire.Sem.Logger.TinyLog tests :: TestTree @@ -112,6 +113,7 @@ requestBrigSuccess = . discardTinyLogs . mockDiscoveryTrivial . runInputConst noClientCertSettings + . runInputConst scaffoldingFederationDomainConfigs $ callInward request let expectedCall = Call Brig "/federation/get-user-by-handle" "\"foo\"" aValidDomain assertEqual "one call to brig should be made" [expectedCall] actualCalls @@ -137,6 +139,7 @@ requestBrigFailure = . discardTinyLogs . mockDiscoveryTrivial . runInputConst noClientCertSettings + . runInputConst scaffoldingFederationDomainConfigs $ callInward request let expectedCall = Call Brig "/federation/get-user-by-handle" "\"foo\"" aValidDomain @@ -163,6 +166,7 @@ requestGalleySuccess = . discardTinyLogs . mockDiscoveryTrivial . runInputConst noClientCertSettings + . runInputConst scaffoldingFederationDomainConfigs $ callInward request let expectedCall = Call Galley "/federation/get-conversations" "\"foo\"" aValidDomain embed $ assertEqual "one call to galley should be made" [expectedCall] actualCalls @@ -191,6 +195,7 @@ requestNoDomain = . discardTinyLogs . mockDiscoveryTrivial . runInputConst noClientCertSettings + . runInputConst scaffoldingFederationDomainConfigs $ callInward request embed $ assertEqual "no calls to services should be made" [] actualCalls @@ -216,6 +221,7 @@ requestNoCertificate = . discardTinyLogs . mockDiscoveryTrivial . runInputConst noClientCertSettings + . runInputConst scaffoldingFederationDomainConfigs $ callInward request assertEqual "no calls to services should be made" [] actualCalls @@ -267,6 +273,7 @@ testInvalidPaths = do . discardTinyLogs . mockDiscoveryTrivial . runInputConst noClientCertSettings + . runInputConst scaffoldingFederationDomainConfigs $ callInward request assertEqual ("Expected request with path \"" <> cs invalidPath <> "\" to fail") (Left InvalidRoute) (void res) @@ -290,6 +297,7 @@ testInvalidComponent = . discardTinyLogs . mockDiscoveryTrivial . runInputConst noClientCertSettings + . runInputConst scaffoldingFederationDomainConfigs $ callInward request void res @?= Left (UnknownComponent "mast") @@ -318,6 +326,7 @@ testMethod = . discardTinyLogs . mockDiscoveryTrivial . runInputConst noClientCertSettings + . runInputConst scaffoldingFederationDomainConfigs $ callInward request void res @?= Left InvalidRoute @@ -326,3 +335,6 @@ exampleDomain = "localhost.example.com" aValidDomain :: Domain aValidDomain = Domain exampleDomain + +scaffoldingFederationDomainConfigs :: FederationDomainConfigs +scaffoldingFederationDomainConfigs = defFederationDomainConfigs {strategy = AllowAll} diff --git a/services/federator/test/unit/Test/Federator/InternalServer.hs b/services/federator/test/unit/Test/Federator/InternalServer.hs index f4d791bfc8..ee9861b50d 100644 --- a/services/federator/test/unit/Test/Federator/InternalServer.hs +++ b/services/federator/test/unit/Test/Federator/InternalServer.hs @@ -25,7 +25,6 @@ import Data.Default import Data.Domain import Federator.Error.ServerError import Federator.InternalServer (callOutward) -import Federator.Options (AllowedDomains (..), FederationStrategy (..), RunSettings (..)) import Federator.Remote import Federator.Validation import Imports @@ -43,6 +42,8 @@ import Test.Tasty import Test.Tasty.HUnit import Wire.API.Federation.Component import Wire.API.Federation.Domain +import Wire.API.Routes.FederationDomainConfig +import Wire.API.User.Search import Wire.Sem.Logger.TinyLog tests :: TestTree @@ -56,10 +57,6 @@ tests = ] ] -settingsWithAllowList :: [Domain] -> RunSettings -settingsWithAllowList domains = - noClientCertSettings {federationStrategy = AllowList (AllowedDomains domains)} - federatedRequestSuccess :: TestTree federatedRequestSuccess = testCase "should successfully return success response" $ do @@ -95,6 +92,7 @@ federatedRequestSuccess = . assertNoError @ServerError . discardTinyLogs . runInputConst settings + . runInputConst (FederationDomainConfigs AllowDynamic [FederationDomainConfig (Domain "target.example.com") FullSearch] 10) $ callOutward request Wai.responseStatus res @?= HTTP.status200 body <- Wai.lazyResponseBody res @@ -102,11 +100,11 @@ federatedRequestSuccess = -- @SF.Federation @TSFI.Federate @TSFI.DNS @S2 @S3 @S7 -- --- Refuse to send outgoing request to non-included domain when allowlist is configured. +-- Refuse to send outgoing request to non-included domain when AllowDynamic is configured. federatedRequestFailureAllowList :: TestTree federatedRequestFailureAllowList = - testCase "should not make a call when target domain not in the allowList" $ do - let settings = settingsWithAllowList [Domain "hello.world"] + testCase "should not make a call when target domain not in the allow list" $ do + let settings = noClientCertSettings let targetDomain = Domain "target.example.com" headers = [(originDomainHeaderName, "origin.example.com")] request <- @@ -136,6 +134,7 @@ federatedRequestFailureAllowList = . assertNoError @ServerError . discardTinyLogs . runInputConst settings + . runInputConst (FederationDomainConfigs AllowDynamic [FederationDomainConfig (Domain "hello.world") FullSearch] 10) $ callOutward request eith @?= Left (FederationDenied targetDomain) diff --git a/services/federator/test/unit/Test/Federator/Options.hs b/services/federator/test/unit/Test/Federator/Options.hs index 4c3b62e2b9..ce8af8375b 100644 --- a/services/federator/test/unit/Test/Federator/Options.hs +++ b/services/federator/test/unit/Test/Federator/Options.hs @@ -23,10 +23,7 @@ module Test.Federator.Options where import Control.Exception (try) import Data.Aeson (FromJSON) -import qualified Data.Aeson as Aeson import qualified Data.ByteString.Char8 as B8 -import Data.ByteString.Lazy (toStrict) -import Data.Domain (Domain (..), mkDomain) import Data.String.Interpolate as QQ import qualified Data.Yaml as Yaml import Federator.Options @@ -38,8 +35,7 @@ import Test.Tasty.HUnit defRunSettings :: FilePath -> FilePath -> RunSettings defRunSettings client key = RunSettings - { federationStrategy = AllowAll, - useSystemCAStore = True, + { useSystemCAStore = True, remoteCAStore = Nothing, clientCertificate = client, clientPrivateKey = key, @@ -54,37 +50,9 @@ tests :: TestTree tests = testGroup "Options" - [ parseFederationStrategy, - testSettings + [ testSettings ] -parseFederationStrategy :: TestTree -parseFederationStrategy = - testCase "parse FederationStrategy examples" $ do - assertParsesAs AllowAll $ - "allowAll: null" - assertParsesAs (withAllowList []) $ - "allowedDomains: []" - assertParsesAs (withAllowList ["test.org"]) . B8.pack $ - [QQ.i| - allowedDomains: - - test.org|] - assertParsesAs (withAllowList ["example.com", "wire.com"]) . B8.pack $ - [QQ.i| - allowedDomains: - - example.com - - wire.com|] - -- manual roundtrip example AllowAll - let allowA = toStrict $ Aeson.encode AllowAll - assertParsesAs AllowAll $ allowA - -- manual roundtrip example AllowList - let allowWire = withAllowList ["wire.com"] - let allowedDom = toStrict $ Aeson.encode allowWire - assertParsesAs allowWire $ allowedDom - where - withAllowList = - AllowList . AllowedDomains . map (either error id . mkDomain) - testSettings :: TestTree testSettings = testGroup @@ -103,11 +71,7 @@ testSettings = testCase "parse configuration example (closed federation)" $ do let settings = (defRunSettings "client.pem" "client-key.pem") - { federationStrategy = - AllowList - ( AllowedDomains [Domain "server2.example.com"] - ), - useSystemCAStore = False + { useSystemCAStore = False } assertParsesAs settings . B8.pack $ [QQ.i| diff --git a/services/federator/test/unit/Test/Federator/Validation.hs b/services/federator/test/unit/Test/Federator/Validation.hs index 95c842f676..6c1d7ebc95 100644 --- a/services/federator/test/unit/Test/Federator/Validation.hs +++ b/services/federator/test/unit/Test/Federator/Validation.hs @@ -26,7 +26,6 @@ import Data.List.NonEmpty (NonEmpty (..)) import qualified Data.Text.Encoding as Text import qualified Data.X509.Validation as X509 import Federator.Discovery -import Federator.Options import Federator.Validation import Imports import Polysemy @@ -37,6 +36,8 @@ import Test.Federator.Options (noClientCertSettings) import Test.Federator.Util import Test.Tasty import Test.Tasty.HUnit +import Wire.API.Routes.FederationDomainConfig +import Wire.API.User.Search import Wire.Network.DNS.SRV (SrvTarget (..)) mockDiscoveryTrivial :: Sem (DiscoverFederator ': r) x -> Sem r x @@ -58,6 +59,16 @@ mockDiscoveryFailure = Polysemy.interpret $ \case DiscoverFederator _ -> error "Not mocked" DiscoverAllFederators _ -> pure . Left $ DiscoveryFailureDNSError "mock DNS error" +scaffoldingFederationDomainConfigs :: FederationDomainConfigs +scaffoldingFederationDomainConfigs = + FederationDomainConfigs + AllowDynamic + [ FederationDomainConfig (Domain "foo.example.com") FullSearch, + FederationDomainConfig (Domain "example.com") FullSearch, + FederationDomainConfig (Domain "federator.example.com") FullSearch + ] + 10 + tests :: TestTree tests = testGroup @@ -86,20 +97,22 @@ tests = federateWithAllowListSuccess :: TestTree federateWithAllowListSuccess = testCase "should give True when target domain is in the list" $ do - let settings = settingsWithAllowList [Domain "hello.world"] + let settings = noClientCertSettings runM . assertNoError @ValidationError . runInputConst settings + . runInputConst (FederationDomainConfigs AllowDynamic [FederationDomainConfig (Domain "hello.world") FullSearch] 0) $ ensureCanFederateWith (Domain "hello.world") federateWithAllowListFail :: TestTree federateWithAllowListFail = testCase "should give False when target domain is not in the list" $ do - let settings = settingsWithAllowList [Domain "only.other.domain"] + let settings = noClientCertSettings eith :: Either ValidationError () <- runM . runError @ValidationError . runInputConst settings + . runInputConst (FederationDomainConfigs AllowDynamic [FederationDomainConfig (Domain "only.other.domain") FullSearch] 0) $ ensureCanFederateWith (Domain "hello.world") assertBool "federating should not be allowed" (isLeft eith) @@ -107,30 +120,32 @@ validateDomainAllowListFailSemantic :: TestTree validateDomainAllowListFailSemantic = testCase "semantic validation" $ do exampleCert <- BS.readFile "test/resources/unit/localhost.pem" - let settings = settingsWithAllowList [Domain "only.other.domain"] + let settings = noClientCertSettings res <- runM . runError . assertNoError @DiscoveryFailure . mockDiscoveryTrivial . runInputConst settings + . runInputConst (FederationDomainConfigs AllowDynamic [FederationDomainConfig (Domain "only.other.domain") FullSearch] 0) $ validateDomain (Just exampleCert) "invalid//.><-semantic-&@-domain" res @?= Left (DomainParseError "invalid//.><-semantic-&@-domain") -- @SF.Federation @TSFI.Federate @TSFI.DNS @S2 @S3 @S7 -- --- Refuse to send outgoing request to non-included domain when allowlist is configured. +-- Refuse to send outgoing request to non-included domain when AllowDynamic is configured. validateDomainAllowListFail :: TestTree validateDomainAllowListFail = testCase "allow list validation" $ do exampleCert <- BS.readFile "test/resources/unit/localhost.example.com.pem" - let settings = settingsWithAllowList [Domain "only.other.domain"] + let settings = noClientCertSettings res <- runM . runError . assertNoError @DiscoveryFailure . mockDiscoveryTrivial . runInputConst settings + . runInputConst (FederationDomainConfigs AllowDynamic [FederationDomainConfig (Domain "only.other.domain") FullSearch] 0) $ validateDomain (Just exampleCert) "localhost.example.com" res @?= Left (FederationDenied (Domain "localhost.example.com")) @@ -141,13 +156,14 @@ validateDomainAllowListSuccess = testCase "should give parsed domain if in the allow list" $ do exampleCert <- BS.readFile "test/resources/unit/localhost.example.com.pem" let domain = Domain "localhost.example.com" - settings = settingsWithAllowList [domain] + settings = noClientCertSettings res <- runM . assertNoError @ValidationError . assertNoError @DiscoveryFailure . mockDiscoveryTrivial . runInputConst settings + . runInputConst (FederationDomainConfigs AllowDynamic [FederationDomainConfig domain FullSearch] 0) $ validateDomain (Just exampleCert) (toByteString' domain) assertEqual "validateDomain should give 'localhost.example.com' as domain" domain res @@ -160,6 +176,7 @@ validateDomainCertMissing = . assertNoError @DiscoveryFailure . mockDiscoveryTrivial . runInputConst noClientCertSettings + . runInputConst defFederationDomainConfigs $ validateDomain Nothing "foo.example.com" res @?= Left NoClientCertificate @@ -174,6 +191,7 @@ validateDomainCertInvalid = . assertNoError @DiscoveryFailure . mockDiscoveryTrivial . runInputConst noClientCertSettings + . runInputConst scaffoldingFederationDomainConfigs $ validateDomain (Just "not a certificate") "foo.example.com" res @?= Left (CertificateParseError "no certificate found") @@ -193,6 +211,7 @@ validateDomainCertWrongDomain = . assertNoError @DiscoveryFailure . mockDiscoveryTrivial . runInputConst noClientCertSettings + . runInputConst scaffoldingFederationDomainConfigs $ validateDomain (Just exampleCert) "foo.example.com" res @?= Left (AuthenticationFailure (pure [X509.NameMismatch "foo.example.com"])) @@ -209,6 +228,7 @@ validateDomainCertCN = . assertNoError @DiscoveryFailure . mockDiscoveryTrivial . runInputConst noClientCertSettings + . runInputConst scaffoldingFederationDomainConfigs $ validateDomain (Just exampleCert) (toByteString' domain) res @?= domain @@ -223,6 +243,7 @@ validateDomainCertSAN = . assertNoError @DiscoveryFailure . mockDiscoveryTrivial . runInputConst noClientCertSettings + . runInputConst scaffoldingFederationDomainConfigs $ validateDomain (Just exampleCert) (toByteString' domain) res @?= domain @@ -237,6 +258,7 @@ validateDomainMultipleFederators = . assertNoError @DiscoveryFailure . mockDiscoveryMapping domain ("localhost.example.com" :| ["second-federator.example.com"]) . runInputConst noClientCertSettings + . runInputConst scaffoldingFederationDomainConfigs domain = Domain "foo.example.com" resFirst <- runValidation $ @@ -258,6 +280,7 @@ validateDomainDiscoveryFailed = . assertNoError @ValidationError . mockDiscoveryFailure . runInputConst noClientCertSettings + . runInputConst scaffoldingFederationDomainConfigs $ validateDomain (Just exampleCert) "example.com" res @?= Left (DiscoveryFailureDNSError "mock DNS error") @@ -272,9 +295,6 @@ validateDomainNonIdentitySRV = . assertNoError @DiscoveryFailure . mockDiscoveryMapping domain ("localhost.example.com" :| []) . runInputConst noClientCertSettings + . runInputConst scaffoldingFederationDomainConfigs $ validateDomain (Just exampleCert) (toByteString' domain) res @?= domain - -settingsWithAllowList :: [Domain] -> RunSettings -settingsWithAllowList domains = - noClientCertSettings {federationStrategy = AllowList (AllowedDomains domains)} diff --git a/services/galley/galley.integration.yaml b/services/galley/galley.integration.yaml index b44dad2cf0..c1e2685502 100644 --- a/services/galley/galley.integration.yaml +++ b/services/galley/galley.integration.yaml @@ -91,4 +91,4 @@ logNetStrings: false journal: # if set, journals; if not set, disables journaling queueName: integration-team-events.fifo endpoint: http://localhost:4568 # https://sqs.eu-west-1.amazonaws.com - region: eu-west-1 + region: eu-west-1 \ No newline at end of file diff --git a/services/galley/src/Galley/API/Action.hs b/services/galley/src/Galley/API/Action.hs index 2a0eb909db..c6d11ef9d9 100644 --- a/services/galley/src/Galley/API/Action.hs +++ b/services/galley/src/Galley/API/Action.hs @@ -757,8 +757,23 @@ notifyConversationAction tag quid notifyOrigDomain con lconv targets action = do for_ failedNotifies $ \case -- rethrow invalid-domain errors and mis-configured federation errors (_, ex@(FederationCallFailure (FederatorClientError (Wai.Error (Wai.Status 422 _) _ _ _)))) -> throw ex + -- FUTUREWORK: This error occurs when federation strategy is set to `allowDynamic` + -- and the remote domain is not in the allow list + -- Is it ok to throw all 400 errors? + (_, ex@(FederationCallFailure (FederatorClientError (Wai.Error (Wai.Status 400 _) _ _ _)))) -> throw ex (_, ex@(FederationCallFailure (FederatorClientHTTP2Error (FederatorClientConnectionError _)))) -> throw ex - _ -> pure () + -- FUTUREWORK: Default case (`_ -> pure ()`) is now explicit. Do we really want to ignore all these errors? + (_, FederationCallFailure (FederatorClientHTTP2Error _)) -> pure () + (_, FederationCallFailure (FederatorClientError _)) -> pure () + (_, FederationCallFailure FederatorClientStreamingNotSupported) -> pure () + (_, FederationCallFailure (FederatorClientServantError _)) -> pure () + (_, FederationCallFailure (FederatorClientVersionNegotiationError _)) -> pure () + (_, FederationCallFailure FederatorClientVersionMismatch) -> pure () + (_, FederationNotImplemented) -> pure () + (_, FederationNotConfigured) -> pure () + (_, FederationUnexpectedBody _) -> pure () + (_, FederationUnexpectedError _) -> pure () + (_, FederationUnreachableDomains _) -> pure () updates <- E.runFederatedConcurrentlyEither (toList (bmRemotes targets)) $ \ruids -> do diff --git a/services/galley/src/Galley/App.hs b/services/galley/src/Galley/App.hs index 1cacda685a..c80f993cd9 100644 --- a/services/galley/src/Galley/App.hs +++ b/services/galley/src/Galley/App.hs @@ -108,6 +108,7 @@ import qualified UnliftIO.Exception as UnliftIO import Util.Options import Wire.API.Error import Wire.API.Federation.Error +import Wire.API.Routes.FederationDomainConfig import qualified Wire.Sem.Logger -- Effects needed by the interpretation of other effects @@ -155,9 +156,8 @@ validateOptions l o = do (Just _, Nothing) -> error "Federator is specified and RabbitMQ config is not, please specify both or none" _ -> pure () -createEnv :: Metrics -> Opts -> IO Env -createEnv m o = do - l <- Logger.mkLogger (o ^. optLogLevel) (o ^. optLogNetStrings) (o ^. optLogFormat) +createEnv :: Metrics -> Opts -> Logger -> IORef FederationDomainConfigs -> IO Env +createEnv m o l r = do cass <- initCassandra o l mgr <- initHttpManager o h2mgr <- initHttp2Manager @@ -168,6 +168,7 @@ createEnv m o = do <*> maybe (pure Nothing) (fmap Just . Aws.mkEnv l mgr) (o ^. optJournal) <*> loadAllMLSKeys (fold (o ^. optSettings . setMlsPrivateKeyPaths)) <*> traverse (mkRabbitMqChannelMVar l) (o ^. optRabbitmq) + <*> pure r initCassandra :: Opts -> Logger -> IO ClientState initCassandra o l = do diff --git a/services/galley/src/Galley/Env.hs b/services/galley/src/Galley/Env.hs index 77e80afb02..a52acbac23 100644 --- a/services/galley/src/Galley/Env.hs +++ b/services/galley/src/Galley/Env.hs @@ -42,6 +42,7 @@ import System.Logger import Util.Options import Wire.API.MLS.Credential import Wire.API.MLS.Keys +import Wire.API.Routes.FederationDomainConfig (FederationDomainConfigs) import Wire.API.Team.Member data DeleteItem = TeamItem TeamId UserId (Maybe ConnId) @@ -62,7 +63,8 @@ data Env = Env _extEnv :: ExtEnv, _aEnv :: Maybe Aws.Env, _mlsKeys :: SignaturePurpose -> MLSKeys, - _rabbitmqChannel :: Maybe (MVar Q.Channel) + _rabbitmqChannel :: Maybe (MVar Q.Channel), + _fedDomains :: IORef FederationDomainConfigs } -- | Environment specific to the communication with external diff --git a/services/galley/src/Galley/Run.hs b/services/galley/src/Galley/Run.hs index ca67375328..30fdf47379 100644 --- a/services/galley/src/Galley/Run.hs +++ b/services/galley/src/Galley/Run.hs @@ -18,6 +18,7 @@ module Galley.Run ( run, mkApp, + mkLogger, ) where @@ -58,14 +59,16 @@ import qualified Network.Wai.Middleware.Gzip as GZip import Network.Wai.Utilities.Server import Servant hiding (route) import qualified System.Logger as Log +import System.Logger.Extended (mkLogger) import Util.Options +import Wire.API.FederationUpdate import Wire.API.Routes.API import qualified Wire.API.Routes.Public.Galley as GalleyAPI import Wire.API.Routes.Version.Wai run :: Opts -> IO () run opts = lowerCodensity $ do - (app, env) <- mkApp opts + (app, env, syndFedDomainConfigsThread) <- mkApp opts settings <- lift $ newSettings $ @@ -79,17 +82,16 @@ run opts = lowerCodensity $ do void $ Codensity $ Async.withAsync $ collectAuthMetrics (env ^. monitor) (aws ^. awsEnv) void $ Codensity $ Async.withAsync $ runApp env deleteLoop void $ Codensity $ Async.withAsync $ runApp env refreshMetrics - lift $ finally (runSettingsWithShutdown settings app Nothing) (shutdown (env ^. cstate)) + lift $ finally (runSettingsWithShutdown settings app Nothing) (closeApp env syndFedDomainConfigsThread) -mkApp :: Opts -> Codensity IO (Application, Env) +mkApp :: Opts -> Codensity IO (Application, Env, Async.Async ()) mkApp opts = do + logger <- lift $ mkLogger (opts ^. optLogLevel) (opts ^. optLogNetStrings) (opts ^. optLogFormat) + (fedDoms, syndFedDomainConfigsThread) <- lift $ syncFedDomainConfigs (opts ^. optBrig) logger emptySyncFedDomainConfigsCallback metrics <- lift $ M.metrics - env <- lift $ App.createEnv metrics opts + env <- lift $ App.createEnv metrics opts logger fedDoms lift $ runClient (env ^. cstate) $ versionCheck schemaVersion - - let logger = env ^. App.applog - let middlewares = versionMiddleware (opts ^. optSettings . setDisabledAPIVersions . traverse) . servantPlusWAIPrometheusMiddleware API.sitemap (Proxy @CombinedAPI) @@ -100,7 +102,7 @@ mkApp opts = Log.info logger $ Log.msg @Text "Galley application finished." Log.flush logger Log.close logger - pure (middlewares $ servantApp env, env) + pure (middlewares $ servantApp env, env, syndFedDomainConfigsThread) where rtree = compile API.sitemap runGalley e r k = evalGalleyToIO e (route rtree r k) @@ -123,6 +125,11 @@ mkApp opts = lookupReqId :: Request -> RequestId lookupReqId = maybe def RequestId . lookup requestIdName . requestHeaders +closeApp :: Env -> Async.Async () -> IO () +closeApp env syndFedDomainConfigsThread = do + shutdown (env ^. cstate) + Async.cancel syndFedDomainConfigsThread + customFormatters :: Servant.ErrorFormatters customFormatters = defaultErrorFormatters diff --git a/services/galley/test/integration/API/Util.hs b/services/galley/test/integration/API/Util.hs index b8ecd8ea53..7339e15578 100644 --- a/services/galley/test/integration/API/Util.hs +++ b/services/galley/test/integration/API/Util.hs @@ -2455,7 +2455,7 @@ instance HasSettingsOverrides TestM where ts :: TestSetup <- ask let opts = f (ts ^. tsGConf) liftIO . lowerCodensity $ do - (galleyApp, _env) <- Run.mkApp opts + (galleyApp, _env, _thread) <- Run.mkApp opts -- FUTUREWORK: always call Run.closeApp at the end. port' <- withMockServer galleyApp liftIO $ runReaderT diff --git a/services/gundeck/gundeck.integration.yaml b/services/gundeck/gundeck.integration.yaml index 1d56cfdf88..b43f3520f4 100644 --- a/services/gundeck/gundeck.integration.yaml +++ b/services/gundeck/gundeck.integration.yaml @@ -2,6 +2,10 @@ gundeck: host: 0.0.0.0 port: 8086 +brig: + host: 0.0.0.0 + port: 8082 + cassandra: endpoint: host: 127.0.0.1 diff --git a/services/gundeck/src/Gundeck/Options.hs b/services/gundeck/src/Gundeck/Options.hs index 8e23457e9d..999a0cd088 100644 --- a/services/gundeck/src/Gundeck/Options.hs +++ b/services/gundeck/src/Gundeck/Options.hs @@ -116,6 +116,7 @@ deriveFromJSON toOptionFieldName ''Settings data Opts = Opts { -- | Hostname and port to bind to _optGundeck :: !Endpoint, + _optBrig :: !Endpoint, _optCassandra :: !CassandraOpts, _optRedis :: !RedisEndpoint, _optRedisAdditionalWrite :: !(Maybe RedisEndpoint), diff --git a/services/gundeck/src/Gundeck/Run.hs b/services/gundeck/src/Gundeck/Run.hs index 66062f96e6..f2703fb619 100644 --- a/services/gundeck/src/Gundeck/Run.hs +++ b/services/gundeck/src/Gundeck/Run.hs @@ -52,6 +52,7 @@ import qualified Servant import qualified System.Logger as Log import qualified UnliftIO.Async as Async import Util.Options +import Wire.API.FederationUpdate import Wire.API.Routes.Public.Gundeck (GundeckAPI) import Wire.API.Routes.Version.Wai @@ -64,6 +65,10 @@ run o = do let l = e ^. applog s <- newSettings $ defaultServer (unpack $ o ^. optGundeck . epHost) (o ^. optGundeck . epPort) l m let throttleMillis = fromMaybe defSqsThrottleMillis $ o ^. (optSettings . setSqsThrottleMillis) + + -- Get the federation domain list from Brig and start the updater loop + (_, updateDomainsThread) <- syncFedDomainConfigs (o ^. optBrig) l emptySyncFedDomainConfigsCallback + lst <- Async.async $ Aws.execute (e ^. awsEnv) (Aws.listen throttleMillis (runDirect e . onEvent)) wtbs <- forM (e ^. threadBudgetState) $ \tbs -> Async.async $ runDirect e $ watchThreadBudgetState m tbs 10 wCollectAuth <- Async.async (collectAuthMetrics m (Aws._awsEnv (Env._awsEnv e))) @@ -72,6 +77,7 @@ run o = do shutdown (e ^. cstate) Async.cancel lst Async.cancel wCollectAuth + Async.cancel updateDomainsThread forM_ wtbs Async.cancel forM_ rThreads Async.cancel Redis.disconnect =<< takeMVar (e ^. rstate)