diff --git a/Makefile b/Makefile index 1cd6be92e1e..b04d44051d6 100644 --- a/Makefile +++ b/Makefile @@ -51,7 +51,12 @@ install: init .PHONY: rabbit-clean rabbit-clean: - rabbitmqadmin -f pretty_json list queues vhost name messages | jq -r '.[] | "rabbitmqadmin delete queue name=\(.name) --vhost=\(.vhost)"' | bash + rabbitmqadmin -f pretty_json list queues vhost name \ + | jq -r '.[] | "rabbitmqadmin delete queue name=\(.name) --vhost=\(.vhost)"' \ + | bash + rabbitmqadmin -f pretty_json list exchanges name vhost \ + | jq -r '.[] |select(.name | startswith("amq") | not) | select (.name != "") | "rabbitmqadmin delete exchange name=\(.name) --vhost=\(.vhost)"' \ + | bash # Clean .PHONY: full-clean diff --git a/cassandra-schema.cql b/cassandra-schema.cql index 28fad0acf4a..bc454c6fa36 100644 --- a/cassandra-schema.cql +++ b/cassandra-schema.cql @@ -1729,6 +1729,26 @@ CREATE TABLE gundeck_test.meta ( AND read_repair_chance = 0.0 AND speculative_retry = '99PERCENTILE'; +CREATE TABLE gundeck_test.missed_notifications ( + user_id uuid, + client_id text, + PRIMARY KEY (user_id, client_id) +) WITH CLUSTERING ORDER BY (client_id ASC) + AND bloom_filter_fp_chance = 0.01 + AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} + AND comment = '' + AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} + AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND dclocal_read_repair_chance = 0.1 + AND default_time_to_live = 0 + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair_chance = 0.0 + AND speculative_retry = '99PERCENTILE'; + CREATE TABLE gundeck_test.push ( ptoken text, app text, diff --git a/changelog.d/0-release-notes/WBP-10308 b/changelog.d/0-release-notes/WBP-10308 new file mode 100644 index 00000000000..0484164f8ca --- /dev/null +++ b/changelog.d/0-release-notes/WBP-10308 @@ -0,0 +1 @@ +Notifications are now also sent via RabbitMQ. Therefore, if federation is enabled, RabbitMQ is a required configuration in Brig. diff --git a/changelog.d/1-api-changes/WPB-10308 b/changelog.d/1-api-changes/WPB-10308 new file mode 100644 index 00000000000..3f5e043e42a --- /dev/null +++ b/changelog.d/1-api-changes/WPB-10308 @@ -0,0 +1,3 @@ +New endpoint `GET /events` for consuming events is added. + +When a client misses notifications because it was offline for too long, it needs to know this information so it can do a full synchronisation. This appears as the first notification in `GET /events` endpoint whenever the system detects this happening. The next acknowledgement of the message makes this notification not appear anymore until the next notification is missed. diff --git a/changelog.d/5-internal/WPB-10308 b/changelog.d/5-internal/WPB-10308 new file mode 100644 index 00000000000..6318426a3a7 --- /dev/null +++ b/changelog.d/5-internal/WPB-10308 @@ -0,0 +1,2 @@ +- RabbitMQ queues have been introduced for client notifications +- New internal endpoint `POST /i/users/:uid/clients/:cid/consumable-notifications` is added diff --git a/charts/background-worker/templates/configmap.yaml b/charts/background-worker/templates/configmap.yaml index 8840a43764e..25ac5238bd1 100644 --- a/charts/background-worker/templates/configmap.yaml +++ b/charts/background-worker/templates/configmap.yaml @@ -21,6 +21,12 @@ data: host: federator port: 8080 + cassandra: + endpoint: + host: {{ .cassandra.host }} + port: 9042 + keyspace: gundeck + {{- with .rabbitmq }} rabbitmq: host: {{ .host }} diff --git a/charts/background-worker/values.yaml b/charts/background-worker/values.yaml index 8b79f6af6be..1f8c113732e 100644 --- a/charts/background-worker/values.yaml +++ b/charts/background-worker/values.yaml @@ -29,6 +29,8 @@ config: # tlsCaSecretRef: # name: # key: + cassandra: + host: aws-cassandra backendNotificationPusher: pushBackoffMinWait: 10000 # in microseconds, so 10ms diff --git a/charts/cannon/templates/configmap.yaml b/charts/cannon/templates/configmap.yaml index 6537fc0172a..99ffd6f2ede 100644 --- a/charts/cannon/templates/configmap.yaml +++ b/charts/cannon/templates/configmap.yaml @@ -1,25 +1,46 @@ apiVersion: v1 data: + {{- with .Values }} cannon.yaml: | - logFormat: {{ .Values.config.logFormat }} - logLevel: {{ .Values.config.logLevel }} - logNetStrings: {{ .Values.config.logNetStrings }} + logFormat: {{ .config.logFormat }} + logLevel: {{ .config.logLevel }} + logNetStrings: {{ .config.logNetStrings }} cannon: host: 0.0.0.0 - port: {{ .Values.service.externalPort }} + port: {{ .service.externalPort }} externalHostFile: /etc/wire/cannon/externalHost/host.txt gundeck: host: gundeck port: 8080 + cassandra: + endpoint: + host: {{ .config.cassandra.host }} + port: 9042 + keyspace: gundeck + + {{- with .config.rabbitmq }} + rabbitmq: + host: {{ .host }} + port: {{ .port }} + vHost: {{ .vHost }} + enableTls: {{ .enableTls }} + insecureSkipVerifyTls: {{ .insecureSkipVerifyTls }} + {{- if .tlsCaSecretRef }} + caCert: /etc/wire/gundeck/rabbitmq-ca/{{ .tlsCaSecretRef.key }} + {{- end }} + {{- end }} + drainOpts: - gracePeriodSeconds: {{ .Values.config.drainOpts.gracePeriodSeconds }} - millisecondsBetweenBatches: {{ .Values.config.drainOpts.millisecondsBetweenBatches }} - minBatchSize: {{ .Values.config.drainOpts.minBatchSize }} + gracePeriodSeconds: {{ .config.drainOpts.gracePeriodSeconds }} + millisecondsBetweenBatches: {{ .config.drainOpts.millisecondsBetweenBatches }} + minBatchSize: {{ .config.drainOpts.minBatchSize }} + + disabledAPIVersions: {{ toJson .config.disabledAPIVersions }} + {{- end }} - disabledAPIVersions: {{ toJson .Values.config.disabledAPIVersions }} kind: ConfigMap metadata: diff --git a/charts/cannon/values.yaml b/charts/cannon/values.yaml index 350ffebc50a..93041914a22 100644 --- a/charts/cannon/values.yaml +++ b/charts/cannon/values.yaml @@ -11,6 +11,35 @@ config: logLevel: Info logFormat: StructuredJSON logNetStrings: false + rabbitmq: + host: rabbitmq + port: 5672 + vHost: / + enableTls: false + insecureSkipVerifyTls: false + cassandra: + host: aws-cassandra + # To enable TLS provide a CA: + # tlsCa: + # + # Or refer to an existing secret (containing the CA): + # tlsCaSecretRef: + # name: + # key: + + redis: + host: redis-ephemeral-master + port: 6379 + connectionMode: "master" # master | cluster + enableTls: false + insecureSkipVerifyTls: false + # To configure custom TLS CA, please provide one of these: + # tlsCa: + # + # Or refer to an existing secret (containing the CA): + # tlsCaSecretRef: + # name: + # key: # See also the section 'Controlling the speed of websocket draining during # cannon pod replacement' in docs/how-to/install/configuration-options.rst diff --git a/charts/gundeck/templates/configmap.yaml b/charts/gundeck/templates/configmap.yaml index cf7c37e1a7c..d067c6508a0 100644 --- a/charts/gundeck/templates/configmap.yaml +++ b/charts/gundeck/templates/configmap.yaml @@ -29,6 +29,18 @@ data: tlsCa: /etc/wire/gundeck/cassandra/{{- (include "tlsSecretRef" . | fromYaml).key }} {{- end }} + {{- with .rabbitmq }} + rabbitmq: + host: {{ .host }} + port: {{ .port }} + vHost: {{ .vHost }} + enableTls: {{ .enableTls }} + insecureSkipVerifyTls: {{ .insecureSkipVerifyTls }} + {{- if .tlsCaSecretRef }} + caCert: /etc/wire/gundeck/rabbitmq-ca/{{ .tlsCaSecretRef.key }} + {{- end }} + {{- end }} + redis: host: {{ .redis.host }} port: {{ .redis.port }} diff --git a/charts/gundeck/templates/deployment.yaml b/charts/gundeck/templates/deployment.yaml index ee67ba1ba43..a6a3c320a3e 100644 --- a/charts/gundeck/templates/deployment.yaml +++ b/charts/gundeck/templates/deployment.yaml @@ -39,6 +39,11 @@ spec: - name: "gundeck-config" configMap: name: "gundeck" + {{- if .Values.config.rabbitmq.tlsCaSecretRef }} + - name: "rabbitmq-ca" + secret: + secretName: {{ .Values.config.rabbitmq.tlsCaSecretRef.name }} + {{- end }} {{- if eq (include "useCassandraTLS" .Values.config) "true" }} - name: "gundeck-cassandra" secret: @@ -77,7 +82,21 @@ spec: - name: "additional-redis-ca" mountPath: "/etc/wire/gundeck/additional-redis-ca/" {{- end }} + {{- if .Values.config.rabbitmq.tlsCaSecretRef }} + - name: "rabbitmq-ca" + mountPath: "/etc/wire/gundeck/rabbitmq-ca/" + {{- end }} env: + - name: RABBITMQ_USERNAME + valueFrom: + secretKeyRef: + name: gundeck + key: rabbitmqUsername + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: gundeck + key: rabbitmqPassword {{- if hasKey .Values.secrets "awsKeyId" }} - name: AWS_ACCESS_KEY_ID valueFrom: diff --git a/charts/gundeck/templates/secret.yaml b/charts/gundeck/templates/secret.yaml index eae9c4ab33d..67c61afc220 100644 --- a/charts/gundeck/templates/secret.yaml +++ b/charts/gundeck/templates/secret.yaml @@ -11,6 +11,8 @@ metadata: type: Opaque data: {{- with .Values.secrets }} + rabbitmqUsername: {{ .rabbitmq.username | b64enc | quote }} + rabbitmqPassword: {{ .rabbitmq.password | b64enc | quote }} {{- if hasKey . "awsKeyId" }} awsKeyId: {{ .awsKeyId | b64enc | quote }} {{- end }} diff --git a/charts/gundeck/templates/tests/gundeck-integration.yaml b/charts/gundeck/templates/tests/gundeck-integration.yaml index 9aa7b56347d..a2aa75e52cd 100644 --- a/charts/gundeck/templates/tests/gundeck-integration.yaml +++ b/charts/gundeck/templates/tests/gundeck-integration.yaml @@ -23,6 +23,11 @@ spec: secret: secretName: {{ include "redisTlsSecretName" .Values.config }} {{- end }} + {{- if .Values.config.rabbitmq.tlsCaSecretRef }} + - name: "rabbitmq-ca" + secret: + secretName: {{ .Values.config.rabbitmq.tlsCaSecretRef.name }} + {{- end }} containers: - name: integration # TODO: When deployed to staging (or real AWS env), _all_ tests should be run @@ -72,6 +77,10 @@ spec: - name: "redis-ca" mountPath: "/etc/wire/gundeck/redis-ca/" {{- end }} + {{- if .Values.config.rabbitmq.tlsCaSecretRef }} + - name: "rabbitmq-ca" + mountPath: "/etc/wire/gundeck/rabbitmq-ca/" + {{- end }} env: # these dummy values are necessary for Amazonka's "Discover" - name: AWS_ACCESS_KEY_ID @@ -82,6 +91,11 @@ spec: value: "eu-west-1" - name: TEST_XML value: /tmp/result.xml + # RabbitMQ needs dummy credentials for the tests to run + - name: RABBITMQ_USERNAME + value: "guest" + - name: RABBITMQ_PASSWORD + value: "guest" {{- if hasKey .Values.secrets "redisUsername" }} - name: REDIS_USERNAME valueFrom: diff --git a/charts/gundeck/values.yaml b/charts/gundeck/values.yaml index 9749dd94be8..e5500c02db5 100644 --- a/charts/gundeck/values.yaml +++ b/charts/gundeck/values.yaml @@ -18,6 +18,13 @@ config: logLevel: Info logFormat: StructuredJSON logNetStrings: false + rabbitmq: + host: rabbitmq + port: 5672 + adminPort: 15672 + vHost: / + enableTls: false + insecureSkipVerifyTls: false cassandra: host: aws-cassandra # To enable TLS provide a CA: diff --git a/charts/integration/templates/integration-integration.yaml b/charts/integration/templates/integration-integration.yaml index 3fe4284dc5b..701c8fec634 100644 --- a/charts/integration/templates/integration-integration.yaml +++ b/charts/integration/templates/integration-integration.yaml @@ -261,6 +261,9 @@ spec: - name: rabbitmq-ca mountPath: /etc/wire/background-worker/rabbitmq-ca + - name: rabbitmq-ca + mountPath: /etc/wire/gundeck/rabbitmq-ca + {{- if eq (include "useCassandraTLS" .Values.config) "true" }} - name: "integration-cassandra" mountPath: "/certs" diff --git a/deploy/dockerephemeral/docker-compose.yaml b/deploy/dockerephemeral/docker-compose.yaml index 13061660d8c..73c7324ae99 100644 --- a/deploy/dockerephemeral/docker-compose.yaml +++ b/deploy/dockerephemeral/docker-compose.yaml @@ -271,6 +271,7 @@ services: ports: - '127.0.0.1:5671:5671' - '127.0.0.1:15671:15671' + - '127.0.0.1:15672:15672' volumes: - ./rabbitmq-config/rabbitmq.conf:/etc/rabbitmq/conf.d/20-wire.conf - ./rabbitmq-config/certificates:/etc/rabbitmq/certificates diff --git a/hack/helm_vars/wire-server/values.yaml.gotmpl b/hack/helm_vars/wire-server/values.yaml.gotmpl index e7f72583f39..8bd68830837 100644 --- a/hack/helm_vars/wire-server/values.yaml.gotmpl +++ b/hack/helm_vars/wire-server/values.yaml.gotmpl @@ -80,7 +80,7 @@ brig: enableTls: true insecureSkipVerifyTls: false tlsCaSecretRef: - name: rabbitmq-certificate + name: "rabbitmq-certificate" key: "ca.crt" authSettings: userTokenTimeout: 120 @@ -205,7 +205,23 @@ cannon: memory: 512Mi drainTimeout: 0 config: + cassandra: + host: {{ .Values.cassandraHost }} + replicaCount: 1 disabledAPIVersions: [] + rabbitmq: + port: 5671 + adminPort: 15671 + enableTls: true + insecureSkipVerifyTls: false + tlsCaSecretRef: + name: "rabbitmq-certificate" + key: "ca.crt" + secrets: + rabbitmq: + username: {{ .Values.rabbitmqUsername }} + password: {{ .Values.rabbitmqPassword }} + cargohold: replicaCount: 1 imagePullPolicy: {{ .Values.imagePullPolicy }} @@ -252,7 +268,7 @@ galley: enableTls: true insecureSkipVerifyTls: false tlsCaSecretRef: - name: rabbitmq-certificate + name: "rabbitmq-certificate" key: "ca.crt" enableFederation: true # keep in sync with brig.config.enableFederation, cargohold.config.enableFederation and tags.federator! settings: @@ -373,6 +389,14 @@ gundeck: name: "cassandra-jks-keystore" key: "ca.crt" {{- end }} + rabbitmq: + port: 5671 + adminPort: 15671 + enableTls: true + insecureSkipVerifyTls: false + tlsCaSecretRef: + name: "rabbitmq-certificate" + key: "ca.crt" redis: host: redis-ephemeral-master connectionMode: master @@ -395,6 +419,9 @@ gundeck: awsKeyId: dummykey awsSecretKey: dummysecret redisPassword: very-secure-redis-master-password + rabbitmq: + username: {{ .Values.rabbitmqUsername }} + password: {{ .Values.rabbitmqPassword }} tests: {{- if .Values.uploadXml }} config: @@ -518,13 +545,21 @@ background-worker: pushBackoffMinWait: 1000 # 1ms pushBackoffMaxWait: 500000 # 0.5s remotesRefreshInterval: 1000000 # 1s + cassandra: + host: {{ .Values.cassandraHost }} + replicaCount: 1 + {{- if .Values.useK8ssandraSSL.enabled }} + tlsCaSecretRef: + name: "cassandra-jks-keystore" + key: "ca.crt" + {{- end }} rabbitmq: port: 5671 adminPort: 15671 enableTls: true insecureSkipVerifyTls: false tlsCaSecretRef: - name: rabbitmq-certificate + name: "rabbitmq-certificate" key: "ca.crt" secrets: rabbitmq: diff --git a/hack/helmfile.yaml b/hack/helmfile.yaml index 3581c373a78..447b980777e 100644 --- a/hack/helmfile.yaml +++ b/hack/helmfile.yaml @@ -1,5 +1,5 @@ --- -# This helfile is used for the setup of two ephemeral backends on kubernetes +# This helmfile is used for the setup of two ephemeral backends on kubernetes # during integration testing (including federation integration tests spanning # over 2 backends) # This helmfile is used via the './hack/bin/integration-setup-federation.sh' via diff --git a/integration/integration.cabal b/integration/integration.cabal index fa293712177..61828917dc8 100644 --- a/integration/integration.cabal +++ b/integration/integration.cabal @@ -121,6 +121,7 @@ library Test.Demo Test.EJPD Test.Errors + Test.Events Test.ExternalPartner Test.FeatureFlags Test.FeatureFlags.AppLock diff --git a/integration/test/API/Galley.hs b/integration/test/API/Galley.hs index 71b148bab7d..902df3862c6 100644 --- a/integration/test/API/Galley.hs +++ b/integration/test/API/Galley.hs @@ -738,3 +738,12 @@ getTeamMembersCsv :: (HasCallStack, MakesValue user) => user -> String -> App Re getTeamMembersCsv user tid = do req <- baseRequest user Galley Versioned (joinHttpPath ["teams", tid, "members", "csv"]) submit "GET" req + +-- | https://staging-nginz-https.zinfra.io/v6/api/swagger-ui/#/default/post_conversations__cnv_domain___cnv__typing +sendTypingStatus :: (HasCallStack, MakesValue user, MakesValue conv) => user -> conv -> String -> App Response +sendTypingStatus user conv status = do + convDomain <- objDomain conv + convId <- objId conv + req <- baseRequest user Galley Versioned (joinHttpPath ["conversations", convDomain, convId, "typing"]) + submit "POST" + $ addJSONObject ["status" .= status] req diff --git a/integration/test/API/GalleyInternal.hs b/integration/test/API/GalleyInternal.hs index de6f5c21c47..bb8a471ce36 100644 --- a/integration/test/API/GalleyInternal.hs +++ b/integration/test/API/GalleyInternal.hs @@ -114,13 +114,6 @@ patchTeamFeatureConfig domain team featureName payload = do req <- baseRequest domain Galley Unversioned $ joinHttpPath ["i", "teams", tid, "features", fn] submit "PATCH" $ req & addJSON p --- https://staging-nginz-https.zinfra.io/api-internal/swagger-ui/galley/#/galley/post_i_features_multi_teams_searchVisibilityInbound -getFeatureStatusMulti :: (HasCallStack, MakesValue domain, MakesValue featureName) => domain -> featureName -> [String] -> App Response -getFeatureStatusMulti domain featureName tids = do - fn <- asString featureName - req <- baseRequest domain Galley Unversioned $ joinHttpPath ["i", "features-multi-teams", fn] - submit "POST" $ req & addJSONObject ["teams" .= tids] - patchTeamFeature :: (HasCallStack, MakesValue domain, MakesValue team) => domain -> team -> String -> Value -> App Response patchTeamFeature domain team featureName payload = do tid <- asString team diff --git a/integration/test/MLS/Util.hs b/integration/test/MLS/Util.hs index ae3d748f04d..e70fa74d259 100644 --- a/integration/test/MLS/Util.hs +++ b/integration/test/MLS/Util.hs @@ -835,9 +835,6 @@ createApplicationMessage convId cid messageContent = do groupInfo = Nothing } -setMLSCiphersuite :: ConvId -> Ciphersuite -> App () -setMLSCiphersuite convId suite = modifyMLSState $ \mls -> mls {convs = Map.adjust (\conv -> conv {ciphersuite = suite}) convId mls.convs} - leaveConv :: (HasCallStack) => ConvId -> diff --git a/integration/test/Notifications.hs b/integration/test/Notifications.hs index 7c99e254d11..e5a0c59b2d3 100644 --- a/integration/test/Notifications.hs +++ b/integration/test/Notifications.hs @@ -47,7 +47,7 @@ assertNoNotifications u uc since0 p = do awaitNotifications :: (HasCallStack, MakesValue user, MakesValue client) => user -> - client -> + Maybe client -> Maybe String -> -- | Max no. of notifications Int -> @@ -62,11 +62,11 @@ awaitNotifications user client since0 n selector = do | timeRemaining <= 0 = pure res0 | otherwise = do - c <- make client & asString + c <- for client (asString . make) notifs <- getNotifications user - def {since = since, client = Just c} + def {since = since, client = c} `bindResponse` \resp -> asList (resp.json %. "notifications") lastNotifId <- case notifs of [] -> pure since @@ -85,16 +85,26 @@ awaitNotifications user client since0 n selector = do threadDelay 1_000 go (timeRemaining - 1) lastNotifId res -awaitNotification :: +awaitNotificationClient :: (HasCallStack, MakesValue user, MakesValue client, MakesValue lastNotifId) => user -> client -> Maybe lastNotifId -> (Value -> App Bool) -> App Value -awaitNotification user client lastNotifId selector = do +awaitNotificationClient user client lastNotifId selector = do + since0 <- mapM objId lastNotifId + head <$> awaitNotifications user (Just client) since0 1 selector + +awaitNotification :: + (HasCallStack, MakesValue user, MakesValue lastNotifId) => + user -> + Maybe lastNotifId -> + (Value -> App Bool) -> + App Value +awaitNotification user lastNotifId selector = do since0 <- mapM objId lastNotifId - head <$> awaitNotifications user client since0 1 selector + head <$> awaitNotifications user (Nothing :: Maybe ()) since0 1 selector isDeleteUserNotif :: (MakesValue a) => a -> App Bool isDeleteUserNotif n = @@ -225,9 +235,9 @@ assertLeaveNotification :: App () assertLeaveNotification fromUser conv user client leaver = void - $ awaitNotification + $ awaitNotificationClient user - client + (Just client) noValue ( allPreds [ isConvLeaveNotif, diff --git a/integration/test/Test/Conversation.hs b/integration/test/Test/Conversation.hs index a9ef7595714..3ae4a379e5d 100644 --- a/integration/test/Test/Conversation.hs +++ b/integration/test/Test/Conversation.hs @@ -492,15 +492,14 @@ testSynchroniseUserRemovalNotification domain = do otherDomain <- make domain [alice, bob] <- createAndConnectUsers [ownDomain, otherDomain] runCodensity (acquireResources 1 resourcePool) $ \[dynBackend] -> do - (conv, charlie, client) <- + (conv, charlie) <- runCodensity (startDynamicBackend dynBackend mempty) $ \_ -> do charlie <- randomUser dynBackend.berDomain def - client <- objId $ bindResponse (addClient charlie def) $ getJSON 201 mapM_ (connectTwoUsers charlie) [alice, bob] conv <- postConversation alice (defProteus {qualifiedUsers = [bob, charlie]}) >>= getJSON 201 - pure (conv, charlie, client) + pure (conv, charlie) let newConvName = "The new conversation name" bindResponse (changeConversationName alice conv newConvName) $ \resp -> @@ -508,10 +507,10 @@ testSynchroniseUserRemovalNotification domain = do bindResponse (removeMember alice conv charlie) $ \resp -> resp.status `shouldMatchInt` 200 runCodensity (startDynamicBackend dynBackend mempty) $ \_ -> do - nameNotif <- awaitNotification charlie client noValue isConvNameChangeNotif + nameNotif <- awaitNotification charlie noValue isConvNameChangeNotif nameNotif %. "payload.0.qualified_conversation" `shouldMatch` objQidObject conv nameNotif %. "payload.0.data.name" `shouldMatch` newConvName - leaveNotif <- awaitNotification charlie client noValue isConvLeaveNotif + leaveNotif <- awaitNotification charlie noValue isConvLeaveNotif leaveNotif %. "payload.0.qualified_conversation" `shouldMatch` objQidObject conv testConvRenaming :: (HasCallStack) => App () @@ -648,19 +647,18 @@ testDeleteTeamConversationWithUnreachableRemoteMembers = do notif %. "payload.0.qualified_from" `shouldMatch` objQidObject alice runCodensity (acquireResources 1 resourcePool) $ \[dynBackend] -> do - (bob, bobClient) <- runCodensity (startDynamicBackend dynBackend mempty) $ \_ -> do + bob <- runCodensity (startDynamicBackend dynBackend mempty) $ \_ -> do bob <- randomUser dynBackend.berDomain def - bobClient <- objId $ bindResponse (addClient bob def) $ getJSON 201 connectTwoUsers alice bob mem <- bob %. "qualified_id" void $ addMembers alice conv def {users = [mem]} >>= getBody 200 - pure (bob, bobClient) + pure bob withWebSocket alice $ \ws -> do void $ deleteTeamConversation team conv alice >>= getBody 200 notif <- awaitMatch isConvDeleteNotif ws assertNotification notif void $ runCodensity (startDynamicBackend dynBackend mempty) $ \_ -> do - notif <- awaitNotification bob bobClient noValue isConvDeleteNotif + notif <- awaitNotification bob noValue isConvDeleteNotif assertNotification notif testDeleteTeamMemberLimitedEventFanout :: (HasCallStack) => App () diff --git a/integration/test/Test/Events.hs b/integration/test/Test/Events.hs new file mode 100644 index 00000000000..29f6fd4f945 --- /dev/null +++ b/integration/test/Test/Events.hs @@ -0,0 +1,435 @@ +module Test.Events where + +import API.Brig +import API.BrigCommon +import API.Common +import API.Galley +import API.Gundeck +import qualified Control.Concurrent.Timeout as Timeout +import Control.Retry +import Data.ByteString.Conversion (toByteString') +import qualified Data.Text as Text +import Data.Timeout +import qualified Network.WebSockets as WS +import Notifications +import SetupHelpers +import Testlib.Prelude hiding (assertNoEvent) +import Testlib.Printing +import UnliftIO hiding (handle) + +-- FUTUREWORK: Investigate why these tests are failing without +-- `withModifiedBackend`; No events are received otherwise. +testConsumeEventsOneWebSocket :: (HasCallStack) => App () +testConsumeEventsOneWebSocket = do + withModifiedBackend def \domain -> do + alice <- randomUser domain def + + lastNotifResp <- + retrying + (constantDelay 10_000 <> limitRetries 10) + (\_ resp -> pure $ resp.status == 404) + (\_ -> getLastNotification alice def) + lastNotifId <- lastNotifResp.json %. "id" & asString + + client <- addClient alice def {acapabilities = Just ["consumable-notifications"]} >>= getJSON 201 + clientId <- objId client + + withEventsWebSocket alice clientId $ \eventsChan ackChan -> do + deliveryTag <- assertEvent eventsChan $ \e -> do + e %. "type" `shouldMatch` "event" + e %. "data.event.payload.0.type" `shouldMatch` "user.client-add" + e %. "data.event.payload.0.client.id" `shouldMatch` clientId + e %. "data.delivery_tag" + assertNoEvent eventsChan + + sendAck ackChan deliveryTag False + assertNoEvent eventsChan + + handle <- randomHandle + putHandle alice handle >>= assertSuccess + + assertEvent eventsChan $ \e -> do + e %. "type" `shouldMatch` "event" + e %. "data.event.payload.0.type" `shouldMatch` "user.update" + e %. "data.event.payload.0.user.handle" `shouldMatch` handle + + -- No new notifications should be stored in Cassandra as the user doesn't have + -- any legacy clients + getNotifications alice def {since = Just lastNotifId} `bindResponse` \resp -> do + resp.status `shouldMatchInt` 200 + shouldBeEmpty $ resp.json %. "notifications" + +testConsumeEventsForDifferentUsers :: (HasCallStack) => App () +testConsumeEventsForDifferentUsers = do + withModifiedBackend def $ \domain -> do + alice <- randomUser domain def + bob <- randomUser domain def + + aliceClient <- addClient alice def {acapabilities = Just ["consumable-notifications"]} >>= getJSON 201 + aliceClientId <- objId aliceClient + + bobClient <- addClient bob def {acapabilities = Just ["consumable-notifications"]} >>= getJSON 201 + bobClientId <- objId bobClient + + withEventsWebSockets [(alice, aliceClientId), (bob, bobClientId)] $ \[(aliceEventsChan, aliceAckChan), (bobEventsChan, bobAckChan)] -> do + assertClientAdd aliceClientId aliceEventsChan aliceAckChan + assertClientAdd bobClientId bobEventsChan bobAckChan + where + assertClientAdd :: (HasCallStack) => String -> TChan Value -> TChan Value -> App () + assertClientAdd clientId eventsChan ackChan = do + deliveryTag <- assertEvent eventsChan $ \e -> do + e %. "data.event.payload.0.type" `shouldMatch` "user.client-add" + e %. "data.event.payload.0.client.id" `shouldMatch` clientId + e %. "data.delivery_tag" + assertNoEvent eventsChan + sendAck ackChan deliveryTag False + +testConsumeEventsWhileHavingLegacyClients :: (HasCallStack) => App () +testConsumeEventsWhileHavingLegacyClients = do + withModifiedBackend def $ \domain -> do + alice <- randomUser domain def + + -- Even if alice has no clients, the notifications should still be persisted + -- in Cassandra. This choice is kinda arbitrary as these notifications + -- probably don't mean much, however, it ensures backwards compatibility. + lastNotifId <- + awaitNotification alice noValue (const $ pure True) >>= \notif -> do + notif %. "payload.0.type" `shouldMatch` "user.activate" + -- There is only one notification (at the time of writing), so we assume + -- it to be the last one. + notif %. "id" & asString + + oldClient <- addClient alice def {acapabilities = Just []} >>= getJSON 201 + + withWebSocket (alice, "anything-but-conn", oldClient %. "id") $ \oldWS -> do + newClient <- addClient alice def {acapabilities = Just ["consumable-notifications"]} >>= getJSON 201 + newClientId <- newClient %. "id" & asString + + oldNotif <- awaitMatch isUserClientAddNotif oldWS + oldNotif %. "payload.0.client.id" `shouldMatch` newClientId + + withEventsWebSocket alice newClientId $ \eventsChan _ -> + assertEvent eventsChan $ \e -> do + e %. "data.event.payload.0.type" `shouldMatch` "user.client-add" + e %. "data.event.payload.0.client.id" `shouldMatch` newClientId + + -- All notifs are also in Cassandra because of the legacy client + getNotifications alice def {since = Just lastNotifId} `bindResponse` \resp -> do + resp.status `shouldMatchInt` 200 + resp.json %. "notifications.0.payload.0.type" `shouldMatch` "user.client-add" + resp.json %. "notifications.1.payload.0.type" `shouldMatch` "user.client-add" + +testConsumeEventsAcks :: (HasCallStack) => App () +testConsumeEventsAcks = do + withModifiedBackend def $ \domain -> do + alice <- randomUser domain def + client <- addClient alice def {acapabilities = Just ["consumable-notifications"]} >>= getJSON 201 + clientId <- objId client + + withEventsWebSocket alice clientId $ \eventsChan _ackChan -> do + assertEvent eventsChan $ \e -> do + e %. "data.event.payload.0.type" `shouldMatch` "user.client-add" + e %. "data.event.payload.0.client.id" `shouldMatch` clientId + + -- without ack, we receive the same event again + withEventsWebSocket alice clientId $ \eventsChan ackChan -> do + deliveryTag <- assertEvent eventsChan $ \e -> do + e %. "data.event.payload.0.type" `shouldMatch` "user.client-add" + e %. "data.event.payload.0.client.id" `shouldMatch` clientId + e %. "data.delivery_tag" + sendAck ackChan deliveryTag False + + withEventsWebSocket alice clientId $ \eventsChan _ -> do + assertNoEvent eventsChan + +testConsumeEventsMultipleAcks :: (HasCallStack) => App () +testConsumeEventsMultipleAcks = do + withModifiedBackend def $ \domain -> do + alice <- randomUser domain def + client <- addClient alice def {acapabilities = Just ["consumable-notifications"]} >>= getJSON 201 + clientId <- objId client + + handle <- randomHandle + putHandle alice handle >>= assertSuccess + + withEventsWebSocket alice clientId $ \eventsChan ackChan -> do + assertEvent eventsChan $ \e -> do + e %. "data.event.payload.0.type" `shouldMatch` "user.client-add" + e %. "data.event.payload.0.client.id" `shouldMatch` clientId + + deliveryTag <- assertEvent eventsChan $ \e -> do + e %. "data.event.payload.0.type" `shouldMatch` "user.update" + e %. "data.event.payload.0.user.handle" `shouldMatch` handle + e %. "data.delivery_tag" + + sendAck ackChan deliveryTag True + + withEventsWebSocket alice clientId $ \eventsChan _ -> do + assertNoEvent eventsChan + +testConsumeEventsAckNewEventWithoutAckingOldOne :: (HasCallStack) => App () +testConsumeEventsAckNewEventWithoutAckingOldOne = do + withModifiedBackend def $ \domain -> do + alice <- randomUser domain def + client <- addClient alice def {acapabilities = Just ["consumable-notifications"]} >>= getJSON 201 + clientId <- objId client + + handle <- randomHandle + putHandle alice handle >>= assertSuccess + + withEventsWebSocket alice clientId $ \eventsChan ackChan -> do + assertEvent eventsChan $ \e -> do + e %. "data.event.payload.0.type" `shouldMatch` "user.client-add" + e %. "data.event.payload.0.client.id" `shouldMatch` clientId + + deliveryTagHandleAdd <- assertEvent eventsChan $ \e -> do + e %. "data.event.payload.0.type" `shouldMatch` "user.update" + e %. "data.event.payload.0.user.handle" `shouldMatch` handle + e %. "data.delivery_tag" + + -- Only ack the handle add delivery tag + sendAck ackChan deliveryTagHandleAdd False + + -- Expect client-add event to be delivered again. + withEventsWebSocket alice clientId $ \eventsChan ackChan -> do + deliveryTagClientAdd <- assertEvent eventsChan $ \e -> do + e %. "data.event.payload.0.type" `shouldMatch` "user.client-add" + e %. "data.event.payload.0.client.id" `shouldMatch` clientId + e %. "data.delivery_tag" + + sendAck ackChan deliveryTagClientAdd False + + withEventsWebSocket alice clientId $ \eventsChan _ -> do + assertNoEvent eventsChan + +testEventsDeadLettered :: (HasCallStack) => App () +testEventsDeadLettered = do + let notifTTL = 1 # Second + withModifiedBackend (def {gundeckCfg = setField "settings.notificationTTL" (notifTTL #> Second)}) $ \domain -> do + alice <- randomUser domain def + + -- This generates an event + client <- addClient alice def {acapabilities = Just ["consumable-notifications"]} >>= getJSON 201 + clientId <- objId client + + -- We expire the add client event by waiting it out + Timeout.threadDelay (notifTTL + 500 # MilliSecond) + + -- Generate a second event + handle1 <- randomHandle + putHandle alice handle1 >>= assertSuccess + + withEventsWebSocket alice clientId $ \eventsChan ackChan -> do + assertEvent eventsChan $ \e -> do + e %. "type" `shouldMatch` "notifications.missed" + + -- Until we ack the full sync, we can't get new events + ackFullSync ackChan + + -- withEventsWebSocket alice clientId $ \eventsChan ackChan -> do + -- Now we can see the next event + assertEvent eventsChan $ \e -> do + e %. "data.event.payload.0.type" `shouldMatch` "user.update" + e %. "data.event.payload.0.user.handle" `shouldMatch` handle1 + ackEvent ackChan e + + -- We've consumed the whole queue. + assertNoEvent eventsChan + +testTransientEventsDoNotTriggerDeadLetters :: (HasCallStack) => App () +testTransientEventsDoNotTriggerDeadLetters = do + let notifTTL = 1 # Second + withModifiedBackend (def {gundeckCfg = setField "settings.notificationTTL" (notifTTL #> Second)}) $ \domain -> do + alice <- randomUser domain def + -- Creates a non-transient event + client <- addClient alice def {acapabilities = Just ["consumable-notifications"]} >>= getJSON 201 + clientId <- objId client + + -- consume it + withEventsWebSocket alice clientId $ \eventsChan ackChan -> do + assertEvent eventsChan $ \e -> do + e %. "data.event.payload.0.type" `shouldMatch` "user.client-add" + e %. "type" `shouldMatch` "event" + e %. "data.event.payload.0.type" `shouldMatch` "user.client-add" + e %. "data.event.payload.0.client.id" `shouldMatch` clientId + deliveryTag <- e %. "data.delivery_tag" + sendAck ackChan deliveryTag False + + -- Self conv ID is same as user's ID, we'll use this to send typing + -- indicators, so we don't have to create another conv. + selfConvId <- objQidObject alice + -- Typing status is transient, currently no one is listening. + sendTypingStatus alice selfConvId "started" >>= assertSuccess + + withEventsWebSocket alice clientId $ \eventsChan _ackChan -> do + assertNoEvent eventsChan + +testTransientEvents :: (HasCallStack) => App () +testTransientEvents = do + withModifiedBackend def $ \domain -> do + alice <- randomUser domain def + client <- addClient alice def {acapabilities = Just ["consumable-notifications"]} >>= getJSON 201 + clientId <- objId client + + -- Self conv ID is same as user's ID, we'll use this to send typing + -- indicators, so we don't have to create another conv. + selfConvId <- objQidObject alice + + withEventsWebSocket alice clientId $ \eventsChan ackChan -> do + consumeAllEvents eventsChan ackChan + sendTypingStatus alice selfConvId "started" >>= assertSuccess + assertEvent eventsChan $ \e -> do + e %. "data.event.payload.0.type" `shouldMatch` "conversation.typing" + e %. "data.event.payload.0.qualified_conversation" `shouldMatch` selfConvId + deliveryTag <- e %. "data.delivery_tag" + sendAck ackChan deliveryTag False + + handle1 <- randomHandle + putHandle alice handle1 >>= assertSuccess + + sendTypingStatus alice selfConvId "stopped" >>= assertSuccess + + handle2 <- randomHandle + putHandle alice handle2 >>= assertSuccess + + -- We shouldn't see the stopped typing status because we were not connected to + -- the websocket when it was sent. The other events should still show up in + -- order. + withEventsWebSocket alice clientId $ \eventsChan ackChan -> do + for_ [handle1, handle2] $ \handle -> + assertEvent eventsChan $ \e -> do + e %. "data.event.payload.0.type" `shouldMatch` "user.update" + e %. "data.event.payload.0.user.handle" `shouldMatch` handle + ackEvent ackChan e + + assertNoEvent eventsChan + +---------------------------------------------------------------------- +-- helpers + +withEventsWebSockets :: forall uid a. (HasCallStack, MakesValue uid) => [(uid, String)] -> ([(TChan Value, TChan Value)] -> App a) -> App a +withEventsWebSockets userClients k = go [] $ reverse userClients + where + go :: [(TChan Value, TChan Value)] -> [(uid, String)] -> App a + go chans [] = k chans + go chans ((uid, cid) : remaining) = + withEventsWebSocket uid cid $ \eventsChan ackChan -> + go ((eventsChan, ackChan) : chans) remaining + +withEventsWebSocket :: (HasCallStack, MakesValue uid) => uid -> String -> (TChan Value -> TChan Value -> App a) -> App a +withEventsWebSocket uid cid k = do + closeWS <- newEmptyMVar + bracket (setup closeWS) (\(_, _, wsThread) -> cancel wsThread) $ \(eventsChan, ackChan, wsThread) -> do + x <- k eventsChan ackChan + + -- Ensure all the acks are sent before closing the websocket + isAckChanEmpty <- + retrying + (limitRetries 5 <> constantDelay 10_000) + (\_ isEmpty -> pure $ not isEmpty) + (\_ -> atomically $ isEmptyTChan ackChan) + unless isAckChanEmpty $ do + putStrLn $ colored yellow $ "The ack chan is not empty after 50ms, some acks may not make it to the server" + + void $ tryPutMVar closeWS () + + timeout 1_000_000 (wait wsThread) >>= \case + Nothing -> + putStrLn $ colored yellow $ "The websocket thread did not close after waiting for 1s" + Just () -> pure () + + pure x + where + setup :: (HasCallStack) => MVar () -> App (TChan Value, TChan Value, Async ()) + setup closeWS = do + (eventsChan, ackChan) <- liftIO $ (,) <$> newTChanIO <*> newTChanIO + wsThread <- eventsWebSocket uid cid eventsChan ackChan closeWS + pure (eventsChan, ackChan, wsThread) + +sendMsg :: (HasCallStack) => TChan Value -> Value -> App () +sendMsg eventsChan msg = liftIO $ atomically $ writeTChan eventsChan msg + +ackFullSync :: (HasCallStack) => TChan Value -> App () +ackFullSync ackChan = do + sendMsg ackChan + $ object ["type" .= "ack_full_sync"] + +ackEvent :: (HasCallStack) => TChan Value -> Value -> App () +ackEvent ackChan event = do + deliveryTag <- event %. "data.delivery_tag" + sendAck ackChan deliveryTag False + +sendAck :: (HasCallStack) => TChan Value -> Value -> Bool -> App () +sendAck ackChan deliveryTag multiple = do + sendMsg ackChan + $ object + [ "type" .= "ack", + "data" + .= object + [ "delivery_tag" .= deliveryTag, + "multiple" .= multiple + ] + ] + +assertEvent :: (HasCallStack) => TChan Value -> ((HasCallStack) => Value -> App a) -> App a +assertEvent eventsChan expectations = do + timeout 10_000_000 (atomically (readTChan eventsChan)) >>= \case + Nothing -> assertFailure "No event received for 10s" + Just e -> do + pretty <- prettyJSON e + addFailureContext ("event:\n" <> pretty) + $ expectations e + +assertNoEvent :: (HasCallStack) => TChan Value -> App () +assertNoEvent eventsChan = do + timeout 1_000_000 (atomically (readTChan eventsChan)) >>= \case + Nothing -> pure () + Just e -> do + eventJSON <- prettyJSON e + assertFailure $ "Did not expect event: \n" <> eventJSON + +consumeAllEvents :: TChan Value -> TChan Value -> App () +consumeAllEvents eventsChan ackChan = do + timeout 1_000_000 (atomically (readTChan eventsChan)) >>= \case + Nothing -> pure () + Just e -> do + ackEvent ackChan e + consumeAllEvents eventsChan ackChan + +eventsWebSocket :: (MakesValue user) => user -> String -> TChan Value -> TChan Value -> MVar () -> App (Async ()) +eventsWebSocket user clientId eventsChan ackChan closeWS = do + serviceMap <- getServiceMap =<< objDomain user + uid <- objId =<< objQidObject user + let HostPort caHost caPort = serviceHostPort serviceMap Cannon + path = "/events?client=" <> clientId + caHdrs = [(fromString "Z-User", toByteString' uid)] + app conn = do + r <- + async $ wsRead conn `catch` \(e :: WS.ConnectionException) -> + case e of + WS.CloseRequest {} -> pure () + _ -> throwIO e + w <- async $ wsWrite conn + void $ waitAny [r, w] + + wsRead conn = forever $ do + bs <- WS.receiveData conn + case decodeStrict' bs of + Just n -> atomically $ writeTChan eventsChan n + Nothing -> + error $ "Failed to decode events: " ++ show bs + + wsWrite conn = forever $ do + eitherAck <- race (readMVar closeWS) (atomically $ readTChan ackChan) + case eitherAck of + Left () -> WS.sendClose conn (Text.pack "") + Right ack -> WS.sendBinaryData conn (encode ack) + liftIO + $ async + $ WS.runClientWith + caHost + (fromIntegral caPort) + path + WS.defaultConnectionOptions + caHdrs + app diff --git a/integration/test/Test/Federation.hs b/integration/test/Test/Federation.hs index ff1f2ae2304..7bd0c888ac0 100644 --- a/integration/test/Test/Federation.hs +++ b/integration/test/Test/Federation.hs @@ -114,7 +114,7 @@ testNotificationsForOfflineBackends = do objQid delUserDeletedNotif `shouldMatch` objQid delUser runCodensity (startDynamicBackend downBackend mempty) $ \_ -> do - newMsgNotif <- awaitNotification downUser1 downClient1 noValue isNewMessageNotif + newMsgNotif <- awaitNotificationClient downUser1 downClient1 noValue isNewMessageNotif newMsgNotif %. "payload.0.qualified_conversation" `shouldMatch` objQidObject upBackendConv newMsgNotif %. "payload.0.data.text" `shouldMatchBase64` "success message for down user" @@ -124,11 +124,11 @@ testNotificationsForOfflineBackends = do isNotifConv downBackendConv, isNotifForUser delUser ] - void $ awaitNotification downUser1 downClient1 (Just newMsgNotif) isDelUserLeaveDownConvNotif + void $ awaitNotificationClient downUser1 (Just downClient1) (Just newMsgNotif) isDelUserLeaveDownConvNotif -- FUTUREWORK: Uncomment after fixing this bug: https://wearezeta.atlassian.net/browse/WPB-3664 -- void $ awaitNotification downUser1 downClient1 (Just newMsgNotif) 1 isOtherUser2LeaveUpConvNotif -- void $ awaitNotification otherUser otherClient (Just newMsgNotif) isDelUserLeaveDownConvNotif - delUserDeletedNotif <- nPayload $ awaitNotification downUser1 downClient1 (Just newMsgNotif) isDeleteUserNotif + delUserDeletedNotif <- nPayload $ awaitNotificationClient downUser1 downClient1 (Just newMsgNotif) isDeleteUserNotif objQid delUserDeletedNotif `shouldMatch` objQid delUser diff --git a/integration/test/Test/LegalHold.hs b/integration/test/Test/LegalHold.hs index 7b0e3680aeb..f049acbaf70 100644 --- a/integration/test/Test/LegalHold.hs +++ b/integration/test/Test/LegalHold.hs @@ -319,7 +319,7 @@ testLHRequestDevice v = do [bobc1, bobc2] <- replicateM 2 do objId $ addClient bob def `bindResponse` getJSON 201 for_ [bobc1, bobc2] \client -> - awaitNotification bob client noValue isUserLegalholdRequestNotif >>= \notif -> do + awaitNotificationClient bob client noValue isUserLegalholdRequestNotif >>= \notif -> do notif %. "payload.0.last_prekey" `shouldMatch` lpk notif %. "payload.0.id" `shouldMatch` objId bob @@ -411,15 +411,14 @@ testLHApproveDevice = do replicateM 2 do objId $ addClient bob def `bindResponse` getJSON 201 >>= traverse_ \client -> - awaitNotification bob client noValue isUserClientAddNotif >>= \notif -> do + awaitNotificationClient bob client noValue isUserClientAddNotif >>= \notif -> do notif %. "payload.0.client.type" `shouldMatch` "legalhold" notif %. "payload.0.client.class" `shouldMatch` "legalhold" -- the other team members receive a notification about the -- legalhold device being approved in their team for_ [alice, charlie] \user -> do - client <- objId $ addClient user def `bindResponse` getJSON 201 - awaitNotification user client noValue isUserLegalholdEnabledNotif >>= \notif -> do + awaitNotification user noValue isUserLegalholdEnabledNotif >>= \notif -> do notif %. "payload.0.id" `shouldMatch` objId bob for_ [ollie, sandy] \outsider -> do outsiderClient <- objId $ addClient outsider def `bindResponse` getJSON 201 @@ -489,9 +488,7 @@ testLHDisableForUser = do withMockServer def lhMockApp \lhDomAndPort chan -> do setUpLHDevice tid alice bob lhDomAndPort - bobc <- objId $ addClient bob def `bindResponse` getJSON 201 - - awaitNotification bob bobc noValue isUserClientAddNotif >>= \notif -> do + awaitNotification bob noValue isUserClientAddNotif >>= \notif -> do notif %. "payload.0.client.type" `shouldMatch` "legalhold" notif %. "payload.0.client.class" `shouldMatch` "legalhold" @@ -515,8 +512,8 @@ testLHDisableForUser = do mzero void $ local (setTimeoutTo 90) do - awaitNotification bob bobc noValue isUserClientRemoveNotif - *> awaitNotification bob bobc noValue isUserLegalholdDisabledNotif + awaitNotification bob noValue isUserClientRemoveNotif + *> awaitNotification bob noValue isUserLegalholdDisabledNotif bobId <- objId bob lhClients <- diff --git a/integration/test/Test/Teams.hs b/integration/test/Test/Teams.hs index 200f7758fca..aeca67789f1 100644 --- a/integration/test/Test/Teams.hs +++ b/integration/test/Test/Teams.hs @@ -165,8 +165,8 @@ testInvitePersonalUserToLargeTeam = do traverse_ (connectTwoUsers knut) [alice, dawn, eli] addFailureContext ("tid: " <> tid) $ do - uidContext <- mkContextUserIds [("owner", owner), ("alice", alice), ("knut", knut), ("dawn", dawn), ("eli", eli)] - addFailureContext uidContext $ do + let uids = [("owner", owner), ("alice", alice), ("knut", knut), ("dawn", dawn), ("eli", eli)] + addUsersToFailureContext uids $ do lastTeamNotif <- getTeamNotifications owner Nothing `bindResponse` \resp -> do resp.status `shouldMatchInt` 200 @@ -205,16 +205,6 @@ testInvitePersonalUserToLargeTeam = do resp.json %. "notifications.1.payload.0.team" `shouldMatch` tid resp.json %. "notifications.1.payload.0.data.user" `shouldMatch` objId knut -mkContextUserIds :: (MakesValue user) => [(String, user)] -> App String -mkContextUserIds = - fmap (intercalate "\n") - . traverse - ( \(name, user) -> do - uid <- objQidObject user %. "id" & asString - domain <- objDomain user - pure $ name <> ": " <> uid <> "@" <> domain - ) - testInvitePersonalUserToTeamMultipleInvitations :: (HasCallStack) => App () testInvitePersonalUserToTeamMultipleInvitations = do (owner, tid, _) <- createTeam OwnDomain 0 diff --git a/integration/test/Testlib/ModService.hs b/integration/test/Testlib/ModService.hs index 4e9dae0984d..a41244faaae 100644 --- a/integration/test/Testlib/ModService.hs +++ b/integration/test/Testlib/ModService.hs @@ -179,7 +179,9 @@ startDynamicBackend resource beOverrides = do >=> setField "federator.host" ("127.0.0.1" :: String) >=> setField "federator.port" resource.berFederatorInternal >=> setField "rabbitmq.vHost" resource.berVHost, - gundeckCfg = setField "settings.federationDomain" resource.berDomain, + gundeckCfg = + setField "settings.federationDomain" resource.berDomain + >=> setField "rabbitmq.vHost" resource.berVHost, backgroundWorkerCfg = setField "federatorInternal.port" resource.berFederatorInternal >=> setField "federatorInternal.host" ("127.0.0.1" :: String) @@ -187,7 +189,9 @@ startDynamicBackend resource beOverrides = do federatorInternalCfg = setField "federatorInternal.port" resource.berFederatorInternal >=> setField "federatorExternal.port" resource.berFederatorExternal - >=> setField "optSettings.setFederationDomain" resource.berDomain + >=> setField "optSettings.setFederationDomain" resource.berDomain, + cannonCfg = + setField "rabbitmq.vHost" resource.berVHost } setKeyspace :: ServiceOverrides diff --git a/integration/test/Testlib/Types.hs b/integration/test/Testlib/Types.hs index 0f91d48c595..f7f20c36782 100644 --- a/integration/test/Testlib/Types.hs +++ b/integration/test/Testlib/Types.hs @@ -379,11 +379,6 @@ getMLSState = do ref <- asks (.mls) liftIO $ readIORef ref -setMLSState :: MLSState -> App () -setMLSState s = do - ref <- asks (.mls) - liftIO $ writeIORef ref s - modifyMLSState :: (MLSState -> MLSState) -> App () modifyMLSState f = do ref <- asks (.mls) diff --git a/libs/extended/src/Network/AMQP/Extended.hs b/libs/extended/src/Network/AMQP/Extended.hs index 955e54c0a33..4aa48aefc5b 100644 --- a/libs/extended/src/Network/AMQP/Extended.hs +++ b/libs/extended/src/Network/AMQP/Extended.hs @@ -4,11 +4,13 @@ module Network.AMQP.Extended ( RabbitMqHooks (..), RabbitMqAdminOpts (..), AmqpEndpoint (..), + withConnection, openConnectionWithRetries, mkRabbitMqAdminClientEnv, mkRabbitMqChannelMVar, demoteOpts, RabbitMqTlsOpts (..), + mkConnectionOpts, ) where @@ -55,7 +57,7 @@ data RabbitMqTlsOpts = RabbitMqTlsOpts { caCert :: !(Maybe FilePath), insecureSkipVerifyTls :: Bool } - deriving (Show) + deriving (Eq, Show) parseTlsJson :: Object -> Parser (Maybe RabbitMqTlsOpts) parseTlsJson v = do @@ -76,7 +78,7 @@ data RabbitMqAdminOpts = RabbitMqAdminOpts tls :: Maybe RabbitMqTlsOpts, adminPort :: !Int } - deriving (Show) + deriving (Eq, Show) instance FromJSON RabbitMqAdminOpts where parseJSON = withObject "RabbitMqAdminOpts" $ \v -> @@ -111,7 +113,7 @@ data AmqpEndpoint = AmqpEndpoint vHost :: !Text, tls :: !(Maybe RabbitMqTlsOpts) } - deriving (Show) + deriving (Eq, Show) instance FromJSON AmqpEndpoint where parseJSON = withObject "AmqpEndpoint" $ \v -> @@ -145,6 +147,49 @@ data RabbitMqConnectionError = RabbitMqConnectionFailed String instance Exception RabbitMqConnectionError +-- | Connects with RabbitMQ and opens a channel. +withConnection :: + forall m a. + (MonadIO m, MonadMask m) => + Logger -> + AmqpEndpoint -> + (Q.Connection -> m a) -> + m a +withConnection l AmqpEndpoint {..} k = do + -- Jittered exponential backoff with 1ms as starting delay and 1s as total + -- wait time. + let policy = limitRetriesByCumulativeDelay 1_000_000 $ fullJitterBackoff 1000 + logError willRetry e retryStatus = do + Log.err l $ + Log.msg (Log.val "Failed to connect to RabbitMQ") + . Log.field "error" (displayException @SomeException e) + . Log.field "willRetry" willRetry + . Log.field "retryCount" retryStatus.rsIterNumber + getConn = + recovering + policy + ( skipAsyncExceptions + <> [logRetries (const $ pure True) logError] + ) + ( const $ do + Log.info l $ Log.msg (Log.val "Trying to connect to RabbitMQ") + connOpts <- mkConnectionOpts AmqpEndpoint {..} + liftIO $ Q.openConnection'' connOpts + ) + bracket getConn (liftIO . Q.closeConnection) k + +mkConnectionOpts :: (MonadIO m) => AmqpEndpoint -> m Q.ConnectionOpts +mkConnectionOpts AmqpEndpoint {..} = do + mTlsSettings <- traverse (liftIO . (mkTLSSettings host)) tls + (username, password) <- liftIO $ readCredsFromEnv + pure + Q.defaultConnectionOpts + { Q.coServers = [(host, fromIntegral port)], + Q.coVHost = vHost, + Q.coAuth = [Q.plain username password], + Q.coTLSSettings = fmap Q.TLSCustom mTlsSettings + } + -- | Connects with RabbitMQ and opens a channel. If the channel is closed for -- some reasons, reopens the channel. If the connection is closed for some -- reasons, keeps retrying to connect until it works. @@ -178,15 +223,8 @@ openConnectionWithRetries l AmqpEndpoint {..} hooks = do ) ( const $ do Log.info l $ Log.msg (Log.val "Trying to connect to RabbitMQ") - mTlsSettings <- traverse (liftIO . (mkTLSSettings host)) tls - liftIO $ - Q.openConnection'' $ - Q.defaultConnectionOpts - { Q.coServers = [(host, fromIntegral port)], - Q.coVHost = vHost, - Q.coAuth = [Q.plain username password], - Q.coTLSSettings = fmap Q.TLSCustom mTlsSettings - } + connOpts <- mkConnectionOpts AmqpEndpoint {..} + liftIO $ Q.openConnection'' connOpts ) bracket getConn (liftIO . Q.closeConnection) $ \conn -> do liftBaseWith $ \runInIO -> diff --git a/libs/jwt-tools/src/Data/Jwt/Tools.hs b/libs/jwt-tools/src/Data/Jwt/Tools.hs index e9c3ce549de..777485f6426 100644 --- a/libs/jwt-tools/src/Data/Jwt/Tools.hs +++ b/libs/jwt-tools/src/Data/Jwt/Tools.hs @@ -167,15 +167,6 @@ generateDpopToken dpopProof uid cid handle displayName tid domain nonce uri meth methodCStr <- liftIO $ newCString $ UTF8.toString $ methodToBS method backendPubkeyBundleCStr <- toCStr backendPubkeyBundle - -- log all variable inputs (can comment in if need to generate new test data) - -- traceM $ "proof = Proof " <> show (_unProof dpopProof) - -- traceM $ "uid = UserId " <> show (_unUserId uid) - -- traceM $ "nonce = Nonce " <> show (_unNonce nonce) - -- traceM $ "expires = ExpiryEpoch " <> show (_unExpiryEpoch maxExpiration) - -- traceM $ "handle = Handle " <> show (_unHandle handle) - -- traceM $ "displayName = DisplayName " <> show (_unDisplayName displayName) - -- traceM $ "tid = TeamId " <> show (_unTeamId tid) - let before = generateDpopAccessTokenFfi dpopProofCStr diff --git a/libs/types-common/src/Data/Mailbox.hs b/libs/types-common/src/Data/Mailbox.hs index c9889d051f4..1772284d932 100644 --- a/libs/types-common/src/Data/Mailbox.hs +++ b/libs/types-common/src/Data/Mailbox.hs @@ -97,8 +97,7 @@ obsNoWsCtl = do || (c == 127) ) -obsCtextParser, obsQtextParser :: Parser Char -obsCtextParser = obsNoWsCtl +obsQtextParser :: Parser Char obsQtextParser = obsNoWsCtl quotedPairParser :: Parser Char diff --git a/libs/types-common/src/Data/Range.hs b/libs/types-common/src/Data/Range.hs index b2dd1b60332..4a3ed6a6f49 100644 --- a/libs/types-common/src/Data/Range.hs +++ b/libs/types-common/src/Data/Range.hs @@ -42,9 +42,13 @@ module Data.Range rnil, rcons, (<|), + runcons, rinc, rappend, rsingleton, + rconcat, + rangeSetToList, + rangeListToSet, -- * 'Arbitrary' generators Ranged (..), @@ -86,6 +90,7 @@ import Data.Text qualified as T import Data.Text.Ascii (AsciiChar, AsciiChars, AsciiText, fromAsciiChars) import Data.Text.Ascii qualified as Ascii import Data.Text.Lazy qualified as TL +import Data.Type.Bool import Data.Type.Ord import GHC.TypeNats import Imports @@ -308,13 +313,25 @@ rcast (Range a) = Range a rnil :: (Monoid a) => Range 0 0 a rnil = Range mempty -rcons, (<|) :: (n <= m) => a -> Range n m [a] -> Range n (m + 1) [a] +rcons, (<|) :: (n <= m) => a -> Range n m [a] -> Range (n + 1) (m + 1) [a] rcons a (Range aa) = Range (a : aa) infixr 5 <| (<|) = rcons +runcons :: + ( n <= m, + n' ~ If (n >=? 1) (n - 1) 0, + m' ~ If (m >=? 1) (m - 1) 0 + ) => + Range n m [a] -> + Maybe (a, Range n' m' [a]) +runcons r = + case fromRange r of + [] -> Nothing + (x : xs) -> Just (x, Range xs) + rinc :: (Integral a, n <= m) => Range n m a -> Range n (m + 1) a rinc (Range a) = Range (a + 1) @@ -330,6 +347,20 @@ rangedNumToParamSchema _ = & S.minimum_ ?~ fromKnownNat (Proxy @n) & S.maximum_ ?~ fromKnownNat (Proxy @m) +rconcat :: Range n m [Range 0 1 [a]] -> Range 0 m [a] +rconcat (Range rs) = Range $ concatMap fromRange rs + +-- | Going from a set to a List should keep the same range because the number of +-- elements cannot grow or shrink. +rangeSetToList :: Range n m (Set a) -> Range n m [a] +rangeSetToList = Range . Set.toList . fromRange + +-- | A list can only shrink when it is converted to a Set, so the min bound +-- changes to 0 if the list can be empty, otherwise the min bound is 1 as the +-- list is guaranteed to have at least 1 element. +rangeListToSet :: (If (n >=? 1) (n' ~ 1) (n' ~ 0), Ord a) => Range n m [a] -> Range n' m (Set a) +rangeListToSet = Range . Set.fromList . fromRange + ----------------------------------------------------------------------------- class Bounds a where diff --git a/libs/wai-utilities/src/Network/Wai/Utilities/Server.hs b/libs/wai-utilities/src/Network/Wai/Utilities/Server.hs index 20f8fc9b934..b51ade2c18b 100644 --- a/libs/wai-utilities/src/Network/Wai/Utilities/Server.hs +++ b/libs/wai-utilities/src/Network/Wai/Utilities/Server.hs @@ -208,6 +208,7 @@ requestIdMiddleware logger reqIdHeaderName origApp req responder = let reqWithId = req {requestHeaders = (reqIdHeaderName, reqId) : req.requestHeaders} origApp reqWithId responder +{-# INLINEABLE catchErrors #-} catchErrors :: Logger -> HeaderName -> Middleware catchErrors l reqIdHeaderName = catchErrorsWithRequestId (lookupRequestId reqIdHeaderName) l @@ -232,8 +233,6 @@ catchErrorsWithRequestId getRequestId l app req k = er <- runHandlers ex errorHandlers onError l mReqId req k er -{-# INLINEABLE catchErrors #-} - -- | Standard handlers for turning exceptions into appropriate -- 'Error' responses. errorHandlers :: [Handler IO (Either Wai.Error JSONResponse)] @@ -349,7 +348,7 @@ rethrow5xx getRequestId logger app req k = app req k' k' resp@WaiInt.ResponseRaw {} = do -- See Note [Raw Response] let logMsg = - field "canoncalpath" (show $ pathInfo req) + field "canonicalpath" (show $ pathInfo req) . field "rawpath" (rawPathInfo req) . field "request" (fromMaybe defRequestId $ getRequestId req) . msg (val "ResponseRaw - cannot collect metrics or log info on errors") diff --git a/libs/wire-api/src/Wire/API/Error/Brig.hs b/libs/wire-api/src/Wire/API/Error/Brig.hs index 6dc470def9a..f5d2c28ad72 100644 --- a/libs/wire-api/src/Wire/API/Error/Brig.hs +++ b/libs/wire-api/src/Wire/API/Error/Brig.hs @@ -102,6 +102,7 @@ data BrigError | PropertyValueTooLarge | UserAlreadyInATeam | MLSServicesNotAllowed + | NotificationQueueConnectionError instance (Typeable (MapError e), KnownError (MapError e)) => IsSwaggerError (e :: BrigError) where addToOpenApi = addStaticErrorToSwagger @(MapError e) @@ -304,3 +305,5 @@ type instance MapError 'PropertyValueTooLarge = 'StaticError 403 "property-value type instance MapError 'UserAlreadyInATeam = 'StaticError 403 "user-already-in-a-team" "Switching teams is not allowed" type instance MapError 'MLSServicesNotAllowed = 'StaticError 409 "mls-services-not-allowed" "Services not allowed in MLS" + +type instance MapError 'NotificationQueueConnectionError = 'StaticError 500 "internal-server-error" "Internal server error" diff --git a/libs/wire-api/src/Wire/API/Event/WebSocketProtocol.hs b/libs/wire-api/src/Wire/API/Event/WebSocketProtocol.hs new file mode 100644 index 00000000000..4a9f9d5b7fe --- /dev/null +++ b/libs/wire-api/src/Wire/API/Event/WebSocketProtocol.hs @@ -0,0 +1,134 @@ +{-# LANGUAGE TemplateHaskell #-} + +module Wire.API.Event.WebSocketProtocol where + +import Control.Lens (makePrisms) +import Data.Aeson (FromJSON, ToJSON) +import Data.Aeson qualified as A +import Data.Aeson.Types qualified as A +import Data.Schema +import Data.Word +import Imports +import Wire.API.Internal.Notification +import Wire.Arbitrary + +data AckData = AckData + { deliveryTag :: Word64, + -- | Acknowledge all deliveryTags <= 'deliveryTag', see RabbitMQ + -- documenation: + -- https://www.rabbitmq.com/docs/confirms#consumer-acks-multiple-parameter + multiple :: Bool + } + deriving (Show, Eq, Generic) + deriving (Arbitrary) via (GenericUniform AckData) + deriving (FromJSON, ToJSON) via (Schema AckData) + +instance ToSchema AckData where + schema = + object "AckData" $ + AckData + <$> (.deliveryTag) .= field "delivery_tag" schema + <*> multiple .= field "multiple" schema + +data EventData = EventData + { event :: QueuedNotification, + deliveryTag :: Word64 + } + deriving (Show, Eq, Generic) + deriving (Arbitrary) via (GenericUniform EventData) + deriving (FromJSON, ToJSON) via (Schema EventData) + +instance ToSchema EventData where + schema = + object "EventData" $ + EventData + <$> event .= field "event" schema + <*> (.deliveryTag) .= field "delivery_tag" schema + +data MessageServerToClient + = EventMessage EventData + | EventFullSync + deriving (Show, Eq, Generic) + deriving (Arbitrary) via (GenericUniform MessageServerToClient) + +makePrisms ''MessageServerToClient + +data MessageClientToServer + = AckMessage AckData + | AckFullSync + deriving (Show, Eq, Generic) + deriving (Arbitrary) via (GenericUniform MessageClientToServer) + +makePrisms ''MessageClientToServer + +---------------------------------------------------------------------- +-- ServerToClient + +-- | Local type, only needed for writing the ToSchema instance for 'MessageServerToClient'. +data MessageTypeServerToClient = MsgTypeEventMessage | MsgTypeEventFullSync + deriving (Eq, Enum, Bounded) + +msgTypeSchemaServerToClient :: ValueSchema NamedSwaggerDoc MessageTypeServerToClient +msgTypeSchemaServerToClient = + enum @Text "MessageTypeServerToClient" $ + mconcat $ + [ element "event" MsgTypeEventMessage, + element "notifications.missed" MsgTypeEventFullSync + ] + +instance ToSchema MessageServerToClient where + schema = + object "MessageServerToClient" $ + fromTagged <$> toTagged .= bind (fst .= field "type" msgTypeSchemaServerToClient) (snd .= untaggedSchema) + where + toTagged :: MessageServerToClient -> (MessageTypeServerToClient, MessageServerToClient) + toTagged d@(EventMessage _) = (MsgTypeEventMessage, d) + toTagged d@EventFullSync = (MsgTypeEventFullSync, d) + + fromTagged :: (MessageTypeServerToClient, MessageServerToClient) -> MessageServerToClient + fromTagged = snd + + untaggedSchema :: SchemaP SwaggerDoc (A.Object, MessageTypeServerToClient) [A.Pair] (MessageServerToClient) (MessageServerToClient) + untaggedSchema = dispatch $ \case + MsgTypeEventMessage -> tag _EventMessage (field "data" schema) + MsgTypeEventFullSync -> tag _EventFullSync (pure ()) + +deriving via Schema MessageServerToClient instance FromJSON MessageServerToClient + +deriving via Schema MessageServerToClient instance ToJSON MessageServerToClient + +---------------------------------------------------------------------- +-- ClientToServer + +-- | Local type, only needed for writing the ToSchema instance for 'MessageClientToServer'. +data MessageTypeClientToServer = MsgTypeAckMessage | MsgTypeAckFullSync + deriving (Eq, Enum, Bounded) + +msgTypeSchemaClientToServer :: ValueSchema NamedSwaggerDoc MessageTypeClientToServer +msgTypeSchemaClientToServer = + enum @Text "MessageTypeClientToServer" $ + mconcat $ + [ element "ack" MsgTypeAckMessage, + element "ack_full_sync" MsgTypeAckFullSync + ] + +instance ToSchema MessageClientToServer where + schema = + object "MessageClientToServer" $ + fromTagged <$> toTagged .= bind (fst .= field "type" msgTypeSchemaClientToServer) (snd .= untaggedSchema) + where + toTagged :: MessageClientToServer -> (MessageTypeClientToServer, MessageClientToServer) + toTagged d@(AckMessage _) = (MsgTypeAckMessage, d) + toTagged d@AckFullSync = (MsgTypeAckFullSync, d) + + fromTagged :: (MessageTypeClientToServer, MessageClientToServer) -> MessageClientToServer + fromTagged = snd + + untaggedSchema :: SchemaP SwaggerDoc (A.Object, MessageTypeClientToServer) [A.Pair] MessageClientToServer MessageClientToServer + untaggedSchema = dispatch $ \case + MsgTypeAckMessage -> tag _AckMessage (field "data" schema) + MsgTypeAckFullSync -> tag _AckFullSync (pure ()) + +deriving via Schema MessageClientToServer instance FromJSON MessageClientToServer + +deriving via Schema MessageClientToServer instance ToJSON MessageClientToServer diff --git a/libs/wire-api/src/Wire/API/Notification.hs b/libs/wire-api/src/Wire/API/Notification.hs index 83317eb5259..cafb68bfacf 100644 --- a/libs/wire-api/src/Wire/API/Notification.hs +++ b/libs/wire-api/src/Wire/API/Notification.hs @@ -35,6 +35,12 @@ module Wire.API.Notification queuedHasMore, queuedTime, GetNotificationsResponse (..), + userNotificationExchangeName, + userNotificationDlxName, + userNotificationDlqName, + clientNotificationQueueName, + userRoutingKey, + clientRoutingKey, ) where @@ -166,3 +172,28 @@ instance AsUnion '[Respond 404 "Notification list" QueuedNotificationList, Respo fromUnion (S (Z (I xs))) = GetNotificationsSuccess xs fromUnion (Z (I xs)) = GetNotificationsWithStatusNotFound xs fromUnion (S (S x)) = case x of {} + +-------------------------------------------------------------------------------- +-- RabbitMQ exchanges and queues + +-- | The name of the RabbitMQ exchange to which user notifications are published. +userNotificationExchangeName :: Text +userNotificationExchangeName = "user-notifications" + +-- | The name of the RabbitMQ dead letter exchange for user notifications. +userNotificationDlxName :: Text +userNotificationDlxName = "dead-user-notifications" + +-- | The name of the RabbitMQ queue for dead-lettered user notifications. +userNotificationDlqName :: Text +userNotificationDlqName = "dead-user-notifications" + +clientNotificationQueueName :: UserId -> ClientId -> Text +clientNotificationQueueName uid cid = + "user-notifications." <> clientRoutingKey uid cid + +userRoutingKey :: UserId -> Text +userRoutingKey = idToText + +clientRoutingKey :: UserId -> ClientId -> Text +clientRoutingKey uid cid = userRoutingKey uid <> "." <> clientToText cid diff --git a/libs/wire-api/src/Wire/API/Routes/Internal/Gundeck.hs b/libs/wire-api/src/Wire/API/Routes/Internal/Gundeck.hs index c786d1c3020..b9f5eb40daf 100644 --- a/libs/wire-api/src/Wire/API/Routes/Internal/Gundeck.hs +++ b/libs/wire-api/src/Wire/API/Routes/Internal/Gundeck.hs @@ -95,6 +95,15 @@ type InternalAPI = :<|> Named "i-clients-delete" (ZUser :> "clients" :> Capture "cid" ClientId :> Delete '[JSON] NoContent) :<|> Named "i-user-delete" (ZUser :> "user" :> Delete '[JSON] NoContent) :<|> Named "i-push-tokens-get" ("push-tokens" :> Capture "uid" UserId :> Get '[JSON] PushTokenList) + :<|> Named + "i-reg-consumable-notifs" + ( "users" + :> Capture "uid" UserId + :> "clients" + :> Capture "cid" ClientId + :> "consumable-notifications" + :> PostNoContent + ) ) swaggerDoc :: S.OpenApi diff --git a/libs/wire-api/src/Wire/API/Routes/Public/Cannon.hs b/libs/wire-api/src/Wire/API/Routes/Public/Cannon.hs index eda1f01a8e3..c6f55ef6d6d 100644 --- a/libs/wire-api/src/Wire/API/Routes/Public/Cannon.hs +++ b/libs/wire-api/src/Wire/API/Routes/Public/Cannon.hs @@ -41,6 +41,22 @@ type CannonAPI = -- FUTUREWORK: Consider higher-level web socket combinator :> WebSocketPending ) + :<|> Named + "consume-events" + ( Summary "Consume events over a websocket connection" + :> "events" + :> ZUser + :> QueryParam' + [ -- Make this optional in https://wearezeta.atlassian.net/browse/WPB-11173 + Required, + Strict, + Description "Client ID" + ] + "client" + ClientId + -- FUTUREWORK: Consider higher-level web socket combinator + :> WebSocketPending + ) data CannonAPITag diff --git a/libs/wire-api/test/unit/Test/Wire/API/Roundtrip/Aeson.hs b/libs/wire-api/test/unit/Test/Wire/API/Roundtrip/Aeson.hs index ee312a10edf..787f4488813 100644 --- a/libs/wire-api/test/unit/Test/Wire/API/Roundtrip/Aeson.hs +++ b/libs/wire-api/test/unit/Test/Wire/API/Roundtrip/Aeson.hs @@ -38,6 +38,7 @@ import Wire.API.Conversation.Typing qualified as Conversation.Typing import Wire.API.CustomBackend qualified as CustomBackend import Wire.API.Event.Conversation qualified as Event.Conversation import Wire.API.Event.Team qualified as Event.Team +import Wire.API.Event.WebSocketProtocol qualified as EventWebSocketProtocol import Wire.API.FederationStatus qualified as FederationStatus import Wire.API.Locale qualified as Locale import Wire.API.Message qualified as Message @@ -338,6 +339,8 @@ tests = testRoundTrip @(User.Search.SearchResult User.Search.TeamContact), testRoundTrip @User.Search.PagingState, testRoundTrip @User.Search.TeamContact, + testRoundTrip @EventWebSocketProtocol.MessageServerToClient, + testRoundTrip @EventWebSocketProtocol.MessageClientToServer, testRoundTrip @(Wrapped.Wrapped "some_int" Int), testRoundTrip @Conversation.Action.SomeConversationAction, testRoundTrip @Routes.Version.Version, diff --git a/libs/wire-api/wire-api.cabal b/libs/wire-api/wire-api.cabal index 861fd9ce87b..3f27fff4519 100644 --- a/libs/wire-api/wire-api.cabal +++ b/libs/wire-api/wire-api.cabal @@ -101,6 +101,7 @@ library Wire.API.Event.Gundeck Wire.API.Event.LeaveReason Wire.API.Event.Team + Wire.API.Event.WebSocketProtocol Wire.API.FederationStatus Wire.API.FederationUpdate Wire.API.Internal.BulkPush diff --git a/libs/wire-subsystems/default.nix b/libs/wire-subsystems/default.nix index 768fcdc5cac..3653e611057 100644 --- a/libs/wire-subsystems/default.nix +++ b/libs/wire-subsystems/default.nix @@ -7,6 +7,7 @@ , amazonka , amazonka-core , amazonka-ses +, amqp , async , attoparsec , base @@ -96,6 +97,7 @@ mkDerivation { amazonka amazonka-core amazonka-ses + amqp async attoparsec base diff --git a/libs/wire-subsystems/src/Wire/GundeckAPIAccess.hs b/libs/wire-subsystems/src/Wire/GundeckAPIAccess.hs index c153cf22364..e402416a21b 100644 --- a/libs/wire-subsystems/src/Wire/GundeckAPIAccess.hs +++ b/libs/wire-subsystems/src/Wire/GundeckAPIAccess.hs @@ -17,6 +17,9 @@ data GundeckAPIAccess m a where UserDeleted :: UserId -> GundeckAPIAccess m () UnregisterPushClient :: UserId -> ClientId -> GundeckAPIAccess m () GetPushTokens :: UserId -> GundeckAPIAccess m [V2.PushToken] + RegisterConsumableNotifcationsClient :: UserId -> ClientId -> GundeckAPIAccess m () + +deriving instance Show (GundeckAPIAccess m a) makeSem ''GundeckAPIAccess @@ -50,3 +53,8 @@ runGundeckAPIAccess ep = interpret $ \case . zUser uid . expect2xx responseJsonMaybe rsp & maybe (pure []) (pure . V2.pushTokens) + RegisterConsumableNotifcationsClient uid cid -> do + void . rpcWithRetries "gundeck" ep $ + method POST + . paths ["i", "users", toByteString' uid, "clients", toByteString' cid, "consumable-notifications"] + . expect2xx diff --git a/libs/wire-subsystems/src/Wire/NotificationSubsystem.hs b/libs/wire-subsystems/src/Wire/NotificationSubsystem.hs index d854c0acb1b..9274cce698d 100644 --- a/libs/wire-subsystems/src/Wire/NotificationSubsystem.hs +++ b/libs/wire-subsystems/src/Wire/NotificationSubsystem.hs @@ -49,6 +49,7 @@ data NotificationSubsystem m a where CleanupUser :: UserId -> NotificationSubsystem m () UnregisterPushClient :: UserId -> ClientId -> NotificationSubsystem m () GetPushTokens :: UserId -> NotificationSubsystem m [PushToken] + SetupConsumableNotifications :: UserId -> ClientId -> NotificationSubsystem m () makeSem ''NotificationSubsystem diff --git a/libs/wire-subsystems/src/Wire/NotificationSubsystem/Interpreter.hs b/libs/wire-subsystems/src/Wire/NotificationSubsystem/Interpreter.hs index 5b2859d1ff1..89d80fcb70c 100644 --- a/libs/wire-subsystems/src/Wire/NotificationSubsystem/Interpreter.hs +++ b/libs/wire-subsystems/src/Wire/NotificationSubsystem/Interpreter.hs @@ -46,6 +46,7 @@ runNotificationSubsystemGundeck cfg = interpret $ \case CleanupUser uid -> GundeckAPIAccess.userDeleted uid UnregisterPushClient uid cid -> GundeckAPIAccess.unregisterPushClient uid cid GetPushTokens uid -> GundeckAPIAccess.getPushTokens uid + SetupConsumableNotifications uid cid -> GundeckAPIAccess.registerConsumableNotifcationsClient uid cid data NotificationSubsystemConfig = NotificationSubsystemConfig { fanoutLimit :: Range 1 HardTruncationLimit Int32, diff --git a/libs/wire-subsystems/src/Wire/UserSubsystem.hs b/libs/wire-subsystems/src/Wire/UserSubsystem.hs index abe7db12694..aa36224b864 100644 --- a/libs/wire-subsystems/src/Wire/UserSubsystem.hs +++ b/libs/wire-subsystems/src/Wire/UserSubsystem.hs @@ -171,14 +171,6 @@ data CheckHandleResp makeSem ''UserSubsystem -removeEmail :: - ( Member UserSubsystem r, - Member (Error UserSubsystemError) r - ) => - Local UserId -> - Sem r () -removeEmail = removeEmailEither >=> fromEither - getUserProfile :: (Member UserSubsystem r) => Local UserId -> Qualified UserId -> Sem r (Maybe UserProfile) getUserProfile luid targetUser = listToMaybe <$> getUserProfiles luid [targetUser] diff --git a/libs/wire-subsystems/test/unit/Wire/NotificationSubsystem/InterpreterSpec.hs b/libs/wire-subsystems/test/unit/Wire/NotificationSubsystem/InterpreterSpec.hs index 9fbd3babe89..aae5e6710cc 100644 --- a/libs/wire-subsystems/test/unit/Wire/NotificationSubsystem/InterpreterSpec.hs +++ b/libs/wire-subsystems/test/unit/Wire/NotificationSubsystem/InterpreterSpec.hs @@ -307,37 +307,40 @@ runMiniStackWithControlledDelay mockConfig delayControl actualPushesRef = do . runControlledDelay delayControl . runInputConst mockConfig -runGundeckAPIAccessFailure :: (Member (Embed IO) r) => IORef [[V2.Push]] -> Sem (GundeckAPIAccess : r) a -> Sem r a +runGundeckAPIAccessFailure :: forall r a. (Member (Embed IO) r) => IORef [[V2.Push]] -> Sem (GundeckAPIAccess : r) a -> Sem r a runGundeckAPIAccessFailure pushesRef = interpret $ \action -> do + let unexpectedCall :: forall x. Sem r x + unexpectedCall = do + liftIO $ expectationFailure $ "Unexpected call to GundeckAPI: " <> show action + error "impossible" case action of PushV2 pushes -> liftIO $ do modifyIORef pushesRef (<> [pushes]) throwIO TestException - GundeckAPIAccess.UserDeleted uid -> - liftIO $ expectationFailure $ "Unexpected call to GundeckAPI: UserDeleted " <> show uid - GundeckAPIAccess.UnregisterPushClient uid cid -> - liftIO $ expectationFailure $ "Unexpected call to GundeckAPI: UnregisterPushClient " <> show uid <> " " <> show cid - GundeckAPIAccess.GetPushTokens uid -> do - liftIO $ expectationFailure $ "Unexpected call to GundeckAPI: GetPushTokens " <> show uid - error "impossible" + GundeckAPIAccess.UserDeleted {} -> unexpectedCall + GundeckAPIAccess.UnregisterPushClient {} -> unexpectedCall + GundeckAPIAccess.GetPushTokens {} -> unexpectedCall + GundeckAPIAccess.RegisterConsumableNotifcationsClient {} -> unexpectedCall data TestException = TestException deriving (Show) instance Exception TestException -runGundeckAPIAccessIORef :: (Member (Embed IO) r) => IORef [[V2.Push]] -> Sem (GundeckAPIAccess : r) a -> Sem r a +runGundeckAPIAccessIORef :: forall r a. (Member (Embed IO) r) => IORef [[V2.Push]] -> Sem (GundeckAPIAccess : r) a -> Sem r a runGundeckAPIAccessIORef pushesRef = - interpret \case - PushV2 pushes -> modifyIORef pushesRef (<> [pushes]) - GundeckAPIAccess.UserDeleted uid -> - liftIO $ expectationFailure $ "Unexpected call to GundeckAPI: UserDeleted " <> show uid - GundeckAPIAccess.UnregisterPushClient uid cid -> - liftIO $ expectationFailure $ "Unexpected call to GundeckAPI: UnregisterPushClient " <> show uid <> " " <> show cid - GundeckAPIAccess.GetPushTokens uid -> do - liftIO $ expectationFailure $ "Unexpected call to GundeckAPI: GetPushTokens " <> show uid - error "impossible" + interpret \action -> do + let unexpectedCall :: forall x. Sem r x + unexpectedCall = do + liftIO $ expectationFailure $ "Unexpected call to GundeckAPI " <> show action + error "impossible" + case action of + PushV2 pushes -> modifyIORef pushesRef (<> [pushes]) + GundeckAPIAccess.UserDeleted {} -> unexpectedCall + GundeckAPIAccess.UnregisterPushClient {} -> unexpectedCall + GundeckAPIAccess.GetPushTokens {} -> unexpectedCall + GundeckAPIAccess.RegisterConsumableNotifcationsClient {} -> unexpectedCall waitUntilPushes :: IORef [a] -> Int -> IO [a] waitUntilPushes pushesRef n = do diff --git a/libs/wire-subsystems/wire-subsystems.cabal b/libs/wire-subsystems/wire-subsystems.cabal index 26bf0e5d136..f4e19a433bd 100644 --- a/libs/wire-subsystems/wire-subsystems.cabal +++ b/libs/wire-subsystems/wire-subsystems.cabal @@ -149,6 +149,7 @@ library , amazonka , amazonka-core , amazonka-ses + , amqp , async , attoparsec , base diff --git a/services/background-worker/background-worker.cabal b/services/background-worker/background-worker.cabal index 387971a5fc0..795ade421a5 100644 --- a/services/background-worker/background-worker.cabal +++ b/services/background-worker/background-worker.cabal @@ -12,6 +12,7 @@ build-type: Simple library -- cabal-fmt: expand src exposed-modules: + Wire.BackendDeadUserNotificationWatcher Wire.BackendNotificationPusher Wire.BackgroundWorker Wire.BackgroundWorker.Env @@ -29,7 +30,11 @@ library build-depends: aeson , amqp + , async , base + , bytestring + , bytestring-conversion + , cassandra-util , containers , exceptions , extended @@ -37,6 +42,7 @@ library , http-client , http2-manager , imports + , kan-extensions , metrics-wai , monad-control , prometheus-client @@ -45,6 +51,7 @@ library , servant-server , text , tinylog + , transformers , transformers-base , types-common , unliftio diff --git a/services/background-worker/background-worker.integration.yaml b/services/background-worker/background-worker.integration.yaml index c23798e63ed..9102981507b 100644 --- a/services/background-worker/background-worker.integration.yaml +++ b/services/background-worker/background-worker.integration.yaml @@ -8,6 +8,12 @@ federatorInternal: host: 127.0.0.1 port: 8097 +cassandra: + endpoint: + host: 127.0.0.1 + port: 9042 + keyspace: gundeck_test + rabbitmq: host: 127.0.0.1 port: 5671 diff --git a/services/background-worker/default.nix b/services/background-worker/default.nix index 6ccf66f8ac7..7e04820200e 100644 --- a/services/background-worker/default.nix +++ b/services/background-worker/default.nix @@ -5,8 +5,11 @@ { mkDerivation , aeson , amqp +, async , base , bytestring +, bytestring-conversion +, cassandra-util , containers , data-default , exceptions @@ -20,6 +23,7 @@ , http-types , http2-manager , imports +, kan-extensions , lib , metrics-wai , monad-control @@ -51,7 +55,11 @@ mkDerivation { libraryHaskellDepends = [ aeson amqp + async base + bytestring + bytestring-conversion + cassandra-util containers exceptions extended @@ -59,6 +67,7 @@ mkDerivation { http-client http2-manager imports + kan-extensions metrics-wai monad-control prometheus-client @@ -67,6 +76,7 @@ mkDerivation { servant-server text tinylog + transformers transformers-base types-common unliftio diff --git a/services/background-worker/src/Wire/BackendDeadUserNotificationWatcher.hs b/services/background-worker/src/Wire/BackendDeadUserNotificationWatcher.hs new file mode 100644 index 00000000000..ad8c3c38254 --- /dev/null +++ b/services/background-worker/src/Wire/BackendDeadUserNotificationWatcher.hs @@ -0,0 +1,132 @@ +{-# LANGUAGE BlockArguments #-} +{-# LANGUAGE RecordWildCards #-} + +module Wire.BackendDeadUserNotificationWatcher where + +import Cassandra +import Control.Concurrent (putMVar) +import Control.Monad.Codensity +import Control.Monad.Trans.Maybe +import Data.ByteString.Char8 qualified as BS +import Data.ByteString.Conversion +import Data.Id +import Data.Map qualified as Map +import Imports hiding (putMVar) +import Network.AMQP qualified as Q +import Network.AMQP.Extended +import Network.AMQP.Lifted qualified as QL +import Network.AMQP.Types +import System.Logger qualified as Log +import UnliftIO hiding (bracket, putMVar) +import UnliftIO.Exception (bracket) +import Wire.API.Notification +import Wire.BackgroundWorker.Env + +getLastDeathQueue :: Maybe FieldTable -> Maybe ByteString +getLastDeathQueue (Just (FieldTable headers)) = do + case Map.lookup "x-last-death-queue" headers of + Just (FVString str) -> pure str + _ -> Nothing +getLastDeathQueue Nothing = Nothing + +-- FUTUREWORK: what happens if messages expire _after_ we checked against cassandra here? +-- Should we have an async notification terminate this? +startConsumer :: Q.Channel -> AppT IO Q.ConsumerTag +startConsumer chan = do + env <- ask + markAsWorking BackendDeadUserNoticationWatcher + + cassandra <- asks (.cassandra) + + void . lift $ Q.declareQueue chan Q.newQueue {Q.queueName = userNotificationDlqName} + QL.consumeMsgs chan userNotificationDlqName Q.Ack $ \(msg, envelope) -> + if (msg.msgDeliveryMode == Just Q.NonPersistent) + then do + -- ignore transient messages, ack it so they don't clog the queue + lift $ Q.ackEnv envelope + else do + -- forward non-transient messages to the respective client + let dat = getLastDeathQueue msg.msgHeaders + let vals = fmap (BS.split '.') dat + case vals of + Nothing -> logHeaderError env msg.msgHeaders + Just ["user-notifications", uidBS, cidBS] -> do + m <- runMaybeT $ do + uid <- hoistMaybe $ fromByteString uidBS + cid <- hoistMaybe $ fromByteString cidBS + pure (uid, cid) + (uid, cid) <- maybe (logParseError env dat) pure m + markAsNeedsFullSync cassandra uid cid + lift $ Q.ackEnv envelope + _ -> void $ logParseError env dat + where + logHeaderError env headers = do + Log.err + env.logger + ( Log.msg (Log.val "Could not find x-last-death-queue in headers") + . Log.field "error_configuring_dead_letter_exchange" (show headers) + ) + error "Could not find x-last-death-queue in headers" + logParseError env dat = do + Log.err env.logger $ + Log.msg (Log.val "Could not parse msgHeaders into uid/cid for dead letter exchange message") + . Log.field "error_parsing_message" (show dat) + error "Could not parse msgHeaders into uid/cid for dead letter exchange message" + +markAsNeedsFullSync :: ClientState -> UserId -> ClientId -> AppT IO () +markAsNeedsFullSync cassandra uid cid = do + runClient cassandra do + retry x1 $ write missedNotifications (params LocalQuorum (uid, cid)) + where + missedNotifications :: PrepQuery W (UserId, ClientId) () + missedNotifications = + [sql| + INSERT INTO missed_notifications (user_id, client_id) + VALUES (?, ?) + |] + +startWorker :: + AmqpEndpoint -> + AppT IO (Async ()) +startWorker amqp = do + env <- ask + mVar <- newEmptyMVar + connOpts <- mkConnectionOpts amqp + + -- This function will open a connection to rabbitmq and start the consumer. + -- We use an mvar to signal when the connection is closed so we can re-open it. + -- If the empty mvar is filled, we know the connection itself was closed and we need to re-open it. + -- If the mvar is filled with a connection, we know the connection itself is fine, + -- so we only need to re-open the channel + let openConnection connM = do + mConn <- lowerCodensity $ do + conn <- case connM of + Nothing -> do + -- Open the rabbit mq connection + conn <- Codensity $ bracket (liftIO $ Q.openConnection'' connOpts) (liftIO . Q.closeConnection) + -- We need to recover from connection closed by restarting it + liftIO $ Q.addConnectionClosedHandler conn True do + Log.err env.logger $ + Log.msg (Log.val "BackendDeadUserNoticationWatcher: Connection closed.") + putMVar mVar Nothing + runAppT env $ markAsNotWorking BackendDeadUserNoticationWatcher + pure conn + Just conn -> pure conn + + -- After starting the connection, open the channel + chan <- Codensity $ bracket (liftIO $ Q.openChannel conn) (liftIO . Q.closeChannel) + + -- If the channel stops, we need to re-open + liftIO $ Q.addChannelExceptionHandler chan $ \e -> do + Log.err env.logger $ + Log.msg (Log.val "BackendDeadUserNoticationWatcher: Caught exception in RabbitMQ channel.") + . Log.field "exception" (displayException e) + runAppT env $ markAsNotWorking BackendDeadUserNoticationWatcher + putMVar mVar (Just conn) + + -- Set up the consumer + void $ Codensity $ bracket (startConsumer chan) (liftIO . Q.cancelConsumer chan) + lift $ takeMVar mVar + openConnection mConn + + async (openConnection Nothing) diff --git a/services/background-worker/src/Wire/BackendNotificationPusher.hs b/services/background-worker/src/Wire/BackendNotificationPusher.hs index 6a6cf2f7f62..f055ed0b492 100644 --- a/services/background-worker/src/Wire/BackendNotificationPusher.hs +++ b/services/background-worker/src/Wire/BackendNotificationPusher.hs @@ -71,7 +71,7 @@ pushNotification runningFlag targetDomain (msg, envelope) = do -- does cause problems when trying to deregister consumers from the channel. This is because -- the internal mechanism to remove a consumer goes via the same notification handling code -- as messages from the Rabbit server. If the thread is tied up in the recovery code we - -- can't cancel the consumer, and the calling code will block until the cancelation message + -- can't cancel the consumer, and the calling code will block until the cancellation message -- can be processed. -- Luckily, we can async this loop and carry on as usual due to how we have the channel setup. async $ @@ -92,7 +92,7 @@ pushNotification runningFlag targetDomain (msg, envelope) = do Left eBN -> do Log.err $ Log.msg - ( Log.val "Cannot parse a queued message as s notification " + ( Log.val "Cannot parse a queued message as a notification " <> "nor as a bundle; the message will be ignored" ) . Log.field "domain" (domainText targetDomain) @@ -201,9 +201,9 @@ pairedMaximumOn f = maximumBy (compare `on` snd) . map (id &&& f) -- Consumers is passed in explicitly so that cleanup code has a reference to the consumer tags. startPusher :: RabbitMqAdmin.AdminAPI (Servant.AsClientT IO) -> IORef (Map Domain (Q.ConsumerTag, MVar ())) -> Q.Channel -> AppT IO () startPusher adminClient consumersRef chan = do + markAsWorking BackendNotificationPusher -- This ensures that we receive notifications 1 by 1 which ensures they are -- delivered in order. - markAsWorking BackendNotificationPusher lift $ Q.qos chan 0 1 False -- Make sure threads aren't dangling if/when this async thread is killed let cleanup :: (Exception e, MonadThrow m, MonadIO m) => e -> m () @@ -291,6 +291,7 @@ getRemoteDomains adminClient = do . Log.field "queue" ("backend-notifications." <> d) . Log.field "error" e +-- FUTUREWORK: rework this in the vein of DeadLetterWatcher startWorker :: AmqpEndpoint -> AppT IO (IORef (Maybe Q.Channel), IORef (Map Domain (Q.ConsumerTag, MVar ()))) startWorker rabbitmqOpts = do env <- ask diff --git a/services/background-worker/src/Wire/BackgroundWorker.hs b/services/background-worker/src/Wire/BackgroundWorker.hs index 17b45f71ecd..d71ca7658e6 100644 --- a/services/background-worker/src/Wire/BackgroundWorker.hs +++ b/services/background-worker/src/Wire/BackgroundWorker.hs @@ -2,6 +2,7 @@ module Wire.BackgroundWorker where +import Control.Concurrent.Async (cancel) import Data.Domain import Data.Map.Strict qualified as Map import Data.Metrics.Servant qualified as Metrics @@ -14,6 +15,7 @@ import Servant import Servant.Server.Generic import System.Logger qualified as Log import Util.Options +import Wire.BackendDeadUserNotificationWatcher qualified as DeadUserNotificationWatcher import Wire.BackendNotificationPusher qualified as BackendNotificationPusher import Wire.BackgroundWorker.Env import Wire.BackgroundWorker.Health qualified as Health @@ -24,10 +26,14 @@ run opts = do env <- mkEnv opts let amqpEP = either id demoteOpts opts.rabbitmq.unRabbitMqOpts (notifChanRef, notifConsumersRef) <- runAppT env $ BackendNotificationPusher.startWorker amqpEP + deadWatcherAsync <- runAppT env $ DeadUserNotificationWatcher.startWorker amqpEP let -- cleanup will run in a new thread when the signal is caught, so we need to use IORefs and -- specific exception types to message threads to clean up l = logger env cleanup = do + -- cancel the dead letter watcher + cancel deadWatcherAsync + -- Notification pusher thread Log.info l $ Log.msg (Log.val "Cancelling the notification pusher thread") readIORef notifChanRef >>= traverse_ \chan -> do diff --git a/services/background-worker/src/Wire/BackgroundWorker/Env.hs b/services/background-worker/src/Wire/BackgroundWorker/Env.hs index dcf89d56d41..1df7805d476 100644 --- a/services/background-worker/src/Wire/BackgroundWorker/Env.hs +++ b/services/background-worker/src/Wire/BackgroundWorker/Env.hs @@ -3,6 +3,8 @@ module Wire.BackgroundWorker.Env where +import Cassandra (ClientState) +import Cassandra.Util (defInitCassandra) import Control.Monad.Base import Control.Monad.Catch import Control.Monad.Trans.Control @@ -27,6 +29,7 @@ type IsWorking = Bool -- | Eventually this will be a sum type of all the types of workers data Worker = BackendNotificationPusher + | BackendDeadUserNoticationWatcher deriving (Show, Eq, Ord) data Env = Env @@ -39,7 +42,8 @@ data Env = Env defederationTimeout :: ResponseTimeout, backendNotificationMetrics :: BackendNotificationMetrics, backendNotificationsConfig :: BackendNotificationsConfig, - statuses :: IORef (Map Worker IsWorking) + statuses :: IORef (Map Worker IsWorking), + cassandra :: ClientState } data BackendNotificationMetrics = BackendNotificationMetrics @@ -57,8 +61,9 @@ mkBackendNotificationMetrics = mkEnv :: Opts -> IO Env mkEnv opts = do - http2Manager <- initHttp2Manager logger <- Log.mkLogger opts.logLevel Nothing opts.logFormat + cassandra <- defInitCassandra opts.cassandra logger + http2Manager <- initHttp2Manager httpManager <- newManager defaultManagerSettings let federatorInternal = opts.federatorInternal defederationTimeout = diff --git a/services/background-worker/src/Wire/BackgroundWorker/Options.hs b/services/background-worker/src/Wire/BackgroundWorker/Options.hs index cdbeb1e5024..f9055d89e0b 100644 --- a/services/background-worker/src/Wire/BackgroundWorker/Options.hs +++ b/services/background-worker/src/Wire/BackgroundWorker/Options.hs @@ -14,7 +14,8 @@ data Opts = Opts rabbitmq :: !RabbitMqOpts, -- | Seconds, Nothing for no timeout defederationTimeout :: Maybe Int, - backendNotificationPusher :: BackendNotificationsConfig + backendNotificationPusher :: BackendNotificationsConfig, + cassandra :: CassandraOpts } deriving (Show, Generic) diff --git a/services/background-worker/test/Test/Wire/BackendNotificationPusherSpec.hs b/services/background-worker/test/Test/Wire/BackendNotificationPusherSpec.hs index 416a2653f82..72e1a17840f 100644 --- a/services/background-worker/test/Test/Wire/BackendNotificationPusherSpec.hs +++ b/services/background-worker/test/Test/Wire/BackendNotificationPusherSpec.hs @@ -267,6 +267,7 @@ spec = do ] logger <- Logger.new Logger.defSettings httpManager <- newManager defaultManagerSettings + let cassandra = undefined let federatorInternal = Endpoint "localhost" 8097 http2Manager = undefined statuses = undefined @@ -283,6 +284,7 @@ spec = do it "should retry fetching domains if a request fails" $ do mockAdmin <- newMockRabbitMqAdmin True ["backend-notifications.foo.example"] logger <- Logger.new Logger.defSettings + let cassandra = undefined httpManager <- newManager defaultManagerSettings let federatorInternal = Endpoint "localhost" 8097 http2Manager = undefined diff --git a/services/background-worker/test/Test/Wire/Util.hs b/services/background-worker/test/Test/Wire/Util.hs index 57ccf6bf0e1..fb06a79f686 100644 --- a/services/background-worker/test/Test/Wire/Util.hs +++ b/services/background-worker/test/Test/Wire/Util.hs @@ -14,6 +14,7 @@ testEnv :: IO Env testEnv = do http2Manager <- initHttp2Manager logger <- Logger.new Logger.defSettings + let cassandra = undefined statuses <- newIORef mempty backendNotificationMetrics <- mkBackendNotificationMetrics httpManager <- newManager defaultManagerSettings diff --git a/services/brig/src/Brig/API/Client.hs b/services/brig/src/Brig/API/Client.hs index 9a94f880659..d7db42be04b 100644 --- a/services/brig/src/Brig/API/Client.hs +++ b/services/brig/src/Brig/API/Client.hs @@ -47,6 +47,8 @@ module Brig.API.Client ) where +import Brig.API.Error (clientError) +import Brig.API.Handler (Handler) import Brig.API.Types import Brig.API.Util import Brig.App @@ -76,13 +78,14 @@ import Data.Domain import Data.HavePendingInvitations import Data.Id (ClientId, ConnId, UserId) import Data.List.Split (chunksOf) -import Data.Map.Strict qualified as Map +import Data.Map.Strict qualified as Map hiding ((\\)) import Data.Misc (PlainTextPassword6) import Data.Qualified +import Data.Set ((\\)) import Data.Set qualified as Set import Data.Text.Encoding qualified as T import Data.Text.Encoding.Error -import Imports +import Imports hiding ((\\)) import Network.HTTP.Types.Method (StdMethod) import Network.Wai.Utilities import Polysemy @@ -201,8 +204,8 @@ addClientWithReAuthPolicy policy luid@(tUnqualified -> u) con new = do >>= maybe (throwE (ClientUserNotFound u)) pure verifyCode (newClientVerificationCode new) luid maxPermClients <- fromMaybe Opt.defUserMaxPermClients <$> asks (.settings.userMaxPermClients) - let caps :: Maybe ClientCapabilityList - caps = updlhdev $ newClientCapabilities new + let mCaps :: Maybe ClientCapabilityList + mCaps = updlhdev $ newClientCapabilities new where updlhdev :: Maybe ClientCapabilityList -> Maybe ClientCapabilityList updlhdev = @@ -211,12 +214,14 @@ addClientWithReAuthPolicy policy luid@(tUnqualified -> u) con new = do else id lhcaps = ClientSupportsLegalholdImplicitConsent (clt0, old, count) <- - Data.addClientWithReAuthPolicy policy luid clientId' new maxPermClients caps + (Data.addClientWithReAuthPolicy policy luid clientId' new maxPermClients mCaps) !>> ClientDataError let clt = clt0 {clientMLSPublicKeys = newClientMLSPublicKeys new} + when (ClientSupportsConsumableNotifications `Set.member` (foldMap fromClientCapabilityList mCaps)) $ lift $ liftSem $ do + setupConsumableNotifications u clt.clientId lift $ do for_ old $ execDelete u con - liftSem $ GalleyAPIAccess.newClient u (clientId clt) + liftSem $ GalleyAPIAccess.newClient u clt.clientId liftSem $ Intra.onClientEvent u con (ClientAdded clt) when (clientType clt == LegalHoldClientType) $ liftSem $ Events.generateUserEvent u con (UserLegalHoldEnabled u) when (count > 1) $ @@ -239,17 +244,32 @@ addClientWithReAuthPolicy policy luid@(tUnqualified -> u) con new = do VerificationCodeNoPendingCode -> throwE ClientCodeAuthenticationFailed VerificationCodeNoEmail -> throwE ClientCodeAuthenticationFailed -updateClient :: (MonadClient m) => UserId -> ClientId -> UpdateClient -> ExceptT ClientError m () -updateClient u c r = do - client <- lift (Data.lookupClient u c) >>= maybe (throwE ClientNotFound) pure - for_ (updateClientLabel r) $ lift . Data.updateClientLabel u c . Just - for_ (updateClientCapabilities r) $ \caps' -> do - if client.clientCapabilities.fromClientCapabilityList `Set.isSubsetOf` caps'.fromClientCapabilityList - then lift . Data.updateClientCapabilities u c . Just $ caps' - else throwE ClientCapabilitiesCannotBeRemoved - let lk = maybeToList (unpackLastPrekey <$> updateClientLastKey r) - Data.updatePrekeys u c (lk ++ updateClientPrekeys r) !>> ClientDataError - Data.addMLSPublicKeys u c (Map.assocs (updateClientMLSPublicKeys r)) !>> ClientDataError +updateClient :: + (Member NotificationSubsystem r) => + UserId -> + ClientId -> + UpdateClient -> + (Handler r) () +updateClient uid cid req = do + client <- wrapClientE (lift (Data.lookupClient uid cid) >>= maybe (throwE ClientNotFound) pure) !>> clientError + wrapClientE $ for_ req.updateClientLabel $ lift . Data.updateClientLabel uid cid . Just + for_ req.updateClientCapabilities $ \caps -> do + if client.clientCapabilities.fromClientCapabilityList `Set.isSubsetOf` caps.fromClientCapabilityList + then do + -- first set up the notification queues then save the data is more robust than the other way around + let addedCapabilities = caps.fromClientCapabilityList \\ client.clientCapabilities.fromClientCapabilityList + when (ClientSupportsConsumableNotifications `Set.member` addedCapabilities) $ lift $ liftSem $ do + setupConsumableNotifications uid cid + wrapClientE $ lift . Data.updateClientCapabilities uid cid . Just $ caps + else throwE $ clientError ClientCapabilitiesCannotBeRemoved + let lk = maybeToList (unpackLastPrekey <$> req.updateClientLastKey) + wrapClientE + ( do + Data.updatePrekeys uid cid (lk ++ req.updateClientPrekeys) + Data.addMLSPublicKeys uid cid (Map.assocs req.updateClientMLSPublicKeys) + ) + !>> ClientDataError + !>> clientError -- nb. We must ensure that the set of clients known to brig is always -- a superset of the clients known to galley. diff --git a/services/brig/src/Brig/API/Error.hs b/services/brig/src/Brig/API/Error.hs index 289639b0c40..bde50f54b41 100644 --- a/services/brig/src/Brig/API/Error.hs +++ b/services/brig/src/Brig/API/Error.hs @@ -303,9 +303,6 @@ insufficientTeamPermissions = errorToWai @'E.InsufficientTeamPermissions noBindingTeam :: Wai.Error noBindingTeam = Wai.mkError status403 "no-binding-team" "Operation allowed only on binding teams" -propertyManagedByScim :: LText -> Wai.Error -propertyManagedByScim prop = Wai.mkError status403 "managed-by-scim" $ "Updating \"" <> prop <> "\" is not allowed, because it is managed by SCIM" - sameBindingTeamUsers :: Wai.Error sameBindingTeamUsers = Wai.mkError status403 "same-binding-team-users" "Operation not allowed to binding team users." diff --git a/services/brig/src/Brig/API/Public.hs b/services/brig/src/Brig/API/Public.hs index ff22f86f8d3..3a46223946a 100644 --- a/services/brig/src/Brig/API/Public.hs +++ b/services/brig/src/Brig/API/Public.hs @@ -173,7 +173,7 @@ import Wire.UserKeyStore import Wire.UserSearch.Types import Wire.UserStore (UserStore) import Wire.UserStore qualified as UserStore -import Wire.UserSubsystem hiding (checkHandle, checkHandles, removeEmail, requestEmailChange) +import Wire.UserSubsystem hiding (checkHandle, checkHandles, requestEmailChange) import Wire.UserSubsystem qualified as User import Wire.UserSubsystem.Error import Wire.UserSubsystem.UserSubsystemConfig @@ -362,8 +362,8 @@ servantSitemap :: Member VerificationCodeSubsystem r, Member (Concurrency 'Unsafe) r, Member BlockListStore r, - Member (ConnectionStore InternalPaging) r, Member IndexedUserStore r, + Member (ConnectionStore InternalPaging) r, Member HashPassword r, Member (Input UserSubsystemConfig) r ) => @@ -454,7 +454,7 @@ servantSitemap = userClientAPI = Named @"add-client-v6" addClient :<|> Named @"add-client" addClient - :<|> Named @"update-client" updateClient + :<|> Named @"update-client" API.updateClient :<|> Named @"delete-client" deleteClient :<|> Named @"list-clients-v6" listClients :<|> Named @"list-clients" listClients @@ -680,9 +680,6 @@ deleteClient :: deleteClient usr con clt body = API.rmClient usr con clt (Public.rmPassword body) !>> clientError -updateClient :: UserId -> ClientId -> Public.UpdateClient -> (Handler r) () -updateClient usr clt upd = wrapClientE (API.updateClient usr clt upd) !>> clientError - listClients :: UserId -> (Handler r) [Public.Client] listClients zusr = lift $ API.lookupLocalClients zusr diff --git a/services/brig/src/Brig/API/User.hs b/services/brig/src/Brig/API/User.hs index 223de6c1fac..513a1075723 100644 --- a/services/brig/src/Brig/API/User.hs +++ b/services/brig/src/Brig/API/User.hs @@ -34,7 +34,6 @@ module Brig.API.User Data.lookupName, Data.lookupUser, Data.lookupRichInfoMultiUsers, - removeEmail, revokeIdentity, deleteUserNoVerify, deleteUsersNoVerify, diff --git a/services/brig/src/Brig/App.hs b/services/brig/src/Brig/App.hs index eb7b06457e2..9785f028432 100644 --- a/services/brig/src/Brig/App.hs +++ b/services/brig/src/Brig/App.hs @@ -67,6 +67,7 @@ module Brig.App rabbitmqChannelLens, disabledVersionsLens, enableSFTFederationLens, + readChannel, -- * App Monad AppT (..), @@ -140,23 +141,31 @@ import OpenSSL.EVP.Digest (Digest, getDigestByName) import OpenSSL.Session (SSLOption (..)) import OpenSSL.Session qualified as SSL import Polysemy +import Polysemy.Error (throw) +import Polysemy.Error qualified as Polysemy import Polysemy.Fail import Polysemy.Final import Polysemy.Input (Input, input) +import Polysemy.TinyLog (TinyLog) +import Polysemy.TinyLog qualified as P import Prometheus import Ssl.Util import System.FSNotify qualified as FS import System.Logger.Class hiding (Settings, settings) import System.Logger.Class qualified as LC import System.Logger.Extended qualified as Log +import System.Timeout (timeout) import Util.Options import Util.SuffixNamer +import Wire.API.Error (errorToWai) +import Wire.API.Error.Brig qualified as E import Wire.API.Federation.Error (federationNotImplemented) import Wire.API.Locale (Locale) import Wire.API.Routes.Version import Wire.API.User.Identity import Wire.EmailSending.SMTP qualified as SMTP import Wire.EmailSubsystem.Template (TemplateBranding, forLocale) +import Wire.Error (HttpError (StdError)) import Wire.SessionStore import Wire.SessionStore.Cassandra import Wire.UserKeyStore @@ -202,23 +211,15 @@ data Env = Env indexEnv :: IndexEnv, randomPrekeyLocalLock :: Maybe (MVar ()), keyPackageLocalLock :: MVar (), - rabbitmqChannel :: Maybe (MVar Q.Channel), + rabbitmqChannel :: MVar Q.Channel, disabledVersions :: Set Version, enableSFTFederation :: Maybe Bool } makeLensesWith (lensRules & lensField .~ suffixNamer) ''Env -validateOptions :: Opts -> IO () -validateOptions o = - case (o.federatorInternal, o.rabbitmq) of - (Nothing, Just _) -> error "RabbitMQ config is specified and federator is not, please specify both or none" - (Just _, Nothing) -> error "Federator is specified and RabbitMQ config is not, please specify both or none" - _ -> pure () - newEnv :: Opts -> IO Env newEnv opts = do - validateOptions opts Just md5 <- getDigestByName "MD5" Just sha256 <- getDigestByName "SHA256" Just sha512 <- getDigestByName "SHA512" @@ -261,7 +262,7 @@ newEnv opts = do Log.info lgr $ Log.msg (Log.val "randomPrekeys: not active; using dynamoDB instead.") pure Nothing kpLock <- newMVar () - rabbitChan <- traverse (Q.mkRabbitMqChannelMVar lgr) opts.rabbitmq + rabbitChan <- Q.mkRabbitMqChannelMVar lgr opts.rabbitmq let allDisabledVersions = foldMap expandVersionExp opts.settings.disabledAPIVersions idxEnv <- mkIndexEnv opts.elasticsearch lgr (Opt.galley opts) mgr pure $! @@ -628,6 +629,21 @@ instance (MonadIndexIO (AppT r)) => MonadIndexIO (ExceptT err (AppT r)) where instance HasRequestId (AppT r) where getRequestId = asks (.requestId) +readChannel :: + ( Member (Embed IO) r, + Member (Polysemy.Error HttpError) r, + Member TinyLog r + ) => + MVar Q.Channel -> + Sem r Q.Channel +readChannel chanMVar = do + mChan <- liftIO $ timeout 1_000_000 $ readMVar chanMVar + maybe onNothing pure mChan + where + onNothing = do + P.err $ Log.msg @Text "failed to connect to RabbitMQ" + throw $ StdError $ errorToWai @'E.NotificationQueueConnectionError + ------------------------------------------------------------------------------- -- Ad hoc interpreters diff --git a/services/brig/src/Brig/CanonicalInterpreter.hs b/services/brig/src/Brig/CanonicalInterpreter.hs index 18e53608df1..4fc05542a1d 100644 --- a/services/brig/src/Brig/CanonicalInterpreter.hs +++ b/services/brig/src/Brig/CanonicalInterpreter.hs @@ -22,6 +22,7 @@ import Control.Monad.Catch (throwM) import Data.Qualified (Local, toLocalUnsafe) import Data.Time.Clock (UTCTime, getCurrentTime) import Imports +import Network.AMQP import Polysemy import Polysemy.Async import Polysemy.Conc @@ -114,6 +115,8 @@ type BrigLowerLevelEffects = PropertySubsystem, DeleteQueue, Wire.Events.Events, + NotificationSubsystem, + Input Channel, Error UserSubsystemError, Error TeamInvitationSubsystemError, Error AuthenticationSubsystemError, @@ -140,7 +143,6 @@ type BrigLowerLevelEffects = Input (Local ()), Input (Maybe AllowlistEmailDomains), Input TeamTemplates, - NotificationSubsystem, GundeckAPIAccess, FederationConfigStore, Jwk, @@ -250,7 +252,6 @@ runBrigToIO e (AppT ma) = do . interpretJwk . interpretFederationDomainConfig e.casClient e.settings.federationStrategy (foldMap (remotesMapFromCfgFile . fmap (.federationDomainConfig)) e.settings.federationDomainConfigs) . runGundeckAPIAccess e.gundeckEndpoint - . runNotificationSubsystemGundeck (defaultNotificationSubsystemConfig e.requestId) . runInputConst (teamTemplatesNoLocale e) . runInputConst e.settings.allowlistEmailDomains . runInputConst (toLocalUnsafe e.settings.federationDomain ()) @@ -277,6 +278,8 @@ runBrigToIO e (AppT ma) = do . mapError authenticationSubsystemErrorToHttpError . mapError teamInvitationErrorToHttpError . mapError userSubsystemErrorToHttpError + . runInputSem (readChannel e.rabbitmqChannel) + . runNotificationSubsystemGundeck (defaultNotificationSubsystemConfig e.requestId) . runEvents . runDeleteQueue e.internalEvents . interpretPropertySubsystem propertySubsystemConfig diff --git a/services/brig/src/Brig/Federation/Client.hs b/services/brig/src/Brig/Federation/Client.hs index 9c5302689d2..637644928cf 100644 --- a/services/brig/src/Brig/Federation/Client.hs +++ b/services/brig/src/Brig/Federation/Client.hs @@ -141,16 +141,9 @@ notifyUserDeleted self remotes = do let remoteConnections = tUnqualified remotes let notif = UserDeletedConnectionsNotification (tUnqualified self) remoteConnections remoteDomain = tDomain remotes - asks (.rabbitmqChannel) >>= \case - Just chanVar -> do - enqueueNotification (tDomain self) remoteDomain Q.Persistent chanVar $ - fedQueueClient @'OnUserDeletedConnectionsTag notif - Nothing -> - Log.err $ - Log.msg ("Federation error while notifying remote backends of a user deletion." :: ByteString) - . Log.field "user_id" (show self) - . Log.field "domain" (domainText remoteDomain) - . Log.field "error" (show FederationNotConfigured) + chanVar <- asks (.rabbitmqChannel) + enqueueNotification (tDomain self) remoteDomain Q.Persistent chanVar $ + fedQueueClient @'OnUserDeletedConnectionsTag notif -- | Enqueues notifications in RabbitMQ. Retries 3 times with a delay of 1s. enqueueNotification :: (MonadIO m, MonadMask m, Log.MonadLogger m, MonadReader Env m) => Domain -> Domain -> Q.DeliveryMode -> MVar Q.Channel -> FedQueueClient c () -> m () diff --git a/services/brig/src/Brig/Options.hs b/services/brig/src/Brig/Options.hs index 04da3707661..1a81618e8b6 100644 --- a/services/brig/src/Brig/Options.hs +++ b/services/brig/src/Brig/Options.hs @@ -389,7 +389,7 @@ data Opts = Opts -- | SFT Federation multiSFT :: !(Maybe Bool), -- | RabbitMQ settings, required when federation is enabled. - rabbitmq :: !(Maybe AmqpEndpoint), + rabbitmq :: !AmqpEndpoint, -- | AWS settings aws :: !AWSOpts, -- | Enable Random Prekey Strategy diff --git a/services/brig/test/integration/API/User/Handles.hs b/services/brig/test/integration/API/User/Handles.hs index c505e9b824a..283c5abaa4b 100644 --- a/services/brig/test/integration/API/User/Handles.hs +++ b/services/brig/test/integration/API/User/Handles.hs @@ -316,7 +316,7 @@ testGetUserByQualifiedHandleFailure brig = do testGetUserByQualifiedHandleNoFederation :: Opt.Opts -> Brig -> Http () testGetUserByQualifiedHandleNoFederation opt brig = do - let newOpts = opt {Opt.federatorInternal = Nothing, Opt.rabbitmq = Nothing} + let newOpts = opt {Opt.federatorInternal = Nothing} someUser <- randomUser brig withSettingsOverrides newOpts $ get diff --git a/services/cannon/cannon.cabal b/services/cannon/cannon.cabal index 1eb1b4cdd26..4091540a846 100644 --- a/services/cannon/cannon.cabal +++ b/services/cannon/cannon.cabal @@ -23,6 +23,7 @@ library Cannon.App Cannon.Dict Cannon.Options + Cannon.RabbitMqConsumerApp Cannon.Run Cannon.Types Cannon.WS @@ -79,12 +80,14 @@ library build-depends: aeson >=2.0.1.0 + , amqp , api-field-json-th >=0.1.0.2 , async >=2.0 , base >=4.6 && <5 , bilge >=0.12 , bytestring >=0.10 , bytestring-conversion >=0.2 + , cassandra-util , conduit >=1.3.4.2 , data-timeout >=0.3 , exceptions >=0.6 @@ -95,6 +98,7 @@ library , hs-opentelemetry-sdk , http-types >=0.8 , imports + , kan-extensions , lens >=4.4 , lens-family-core >=1.1 , metrics-wai >=0.4 diff --git a/services/cannon/cannon.integration.yaml b/services/cannon/cannon.integration.yaml index e7e7985fea8..9aeca3249f5 100644 --- a/services/cannon/cannon.integration.yaml +++ b/services/cannon/cannon.integration.yaml @@ -12,10 +12,24 @@ cannon: externalHost: 127.0.0.1 #externalHostFile: /etc/wire/cannon/cannon-host.txt +cassandra: + endpoint: + host: 127.0.0.1 + port: 9042 + keyspace: gundeck_test + gundeck: host: 127.0.0.1 port: 8086 +rabbitmq: + host: 127.0.0.1 + port: 5671 + vHost: / + enableTls: true + caCert: test/resources/rabbitmq-ca.pem + insecureSkipVerifyTls: false + drainOpts: gracePeriodSeconds: 1 millisecondsBetweenBatches: 500 diff --git a/services/cannon/cannon2.integration.yaml b/services/cannon/cannon2.integration.yaml index cb5fb6c371e..1fc81233c38 100644 --- a/services/cannon/cannon2.integration.yaml +++ b/services/cannon/cannon2.integration.yaml @@ -16,6 +16,14 @@ gundeck: host: 127.0.0.1 port: 8086 +rabbitmq: + host: 127.0.0.1 + port: 5671 + vHost: / + enableTls: true + caCert: test/resources/rabbitmq-ca.pem + insecureSkipVerifyTls: false + drainOpts: gracePeriodSeconds: 1 millisecondsBetweenBatches: 5 diff --git a/services/cannon/default.nix b/services/cannon/default.nix index c0e94ff02f7..80ad8b8e3ca 100644 --- a/services/cannon/default.nix +++ b/services/cannon/default.nix @@ -4,12 +4,14 @@ # dependencies are added or removed. { mkDerivation , aeson +, amqp , api-field-json-th , async , base , bilge , bytestring , bytestring-conversion +, cassandra-util , conduit , criterion , data-timeout @@ -22,6 +24,7 @@ , hs-opentelemetry-sdk , http-types , imports +, kan-extensions , lens , lens-family-core , lib @@ -61,12 +64,14 @@ mkDerivation { isExecutable = true; libraryHaskellDepends = [ aeson + amqp api-field-json-th async base bilge bytestring bytestring-conversion + cassandra-util conduit data-timeout exceptions @@ -77,6 +82,7 @@ mkDerivation { hs-opentelemetry-sdk http-types imports + kan-extensions lens lens-family-core metrics-wai diff --git a/services/cannon/src/Cannon/API/Public.hs b/services/cannon/src/Cannon/API/Public.hs index 4a559f9f17c..e895429ae8b 100644 --- a/services/cannon/src/Cannon/API/Public.hs +++ b/services/cannon/src/Cannon/API/Public.hs @@ -21,6 +21,7 @@ module Cannon.API.Public where import Cannon.App (wsapp) +import Cannon.RabbitMqConsumerApp (rabbitMQWebSocketApp) import Cannon.Types import Cannon.WS import Control.Monad.IO.Class @@ -32,9 +33,16 @@ import Wire.API.Routes.Named import Wire.API.Routes.Public.Cannon publicAPIServer :: ServerT CannonAPI Cannon -publicAPIServer = Named @"await-notifications" streamData +publicAPIServer = + Named @"await-notifications" streamData + :<|> Named @"consume-events" consumeEvents streamData :: UserId -> ConnId -> Maybe ClientId -> PendingConnection -> Cannon () streamData userId connId clientId con = do e <- wsenv liftIO $ wsapp (mkKey userId connId) clientId e con + +consumeEvents :: UserId -> ClientId -> PendingConnection -> Cannon () +consumeEvents userId clientId con = do + e <- wsenv + liftIO $ rabbitMQWebSocketApp userId clientId e con diff --git a/services/cannon/src/Cannon/App.hs b/services/cannon/src/Cannon/App.hs index 770bf0ff499..2ad956a087c 100644 --- a/services/cannon/src/Cannon/App.hs +++ b/services/cannon/src/Cannon/App.hs @@ -15,12 +15,7 @@ -- You should have received a copy of the GNU Affero General Public License along -- with this program. If not, see . -module Cannon.App - ( wsapp, - terminate, - maxPingInterval, - ) -where +module Cannon.App where import Cannon.WS import Control.Concurrent.Async diff --git a/services/cannon/src/Cannon/Options.hs b/services/cannon/src/Cannon/Options.hs index ae301862e1b..39d0848d2b5 100644 --- a/services/cannon/src/Cannon/Options.hs +++ b/services/cannon/src/Cannon/Options.hs @@ -30,6 +30,8 @@ module Cannon.Options logNetStrings, logFormat, drainOpts, + rabbitmq, + cassandraOpts, Opts, gracePeriodSeconds, millisecondsBetweenBatches, @@ -39,9 +41,12 @@ module Cannon.Options ) where +import Cassandra.Options (CassandraOpts) import Control.Lens (makeFields) +import Data.Aeson import Data.Aeson.APIFieldJsonTH import Imports +import Network.AMQP.Extended (AmqpEndpoint) import System.Logger.Extended (Level, LogFormat) import Wire.API.Routes.Version @@ -87,14 +92,27 @@ deriveApiFieldJSON ''DrainOpts data Opts = Opts { _optsCannon :: !Cannon, _optsGundeck :: !Gundeck, + _optsRabbitmq :: !AmqpEndpoint, _optsLogLevel :: !Level, _optsLogNetStrings :: !(Maybe (Last Bool)), _optsLogFormat :: !(Maybe (Last LogFormat)), _optsDrainOpts :: DrainOpts, - _optsDisabledAPIVersions :: !(Set VersionExp) + _optsDisabledAPIVersions :: !(Set VersionExp), + _optsCassandraOpts :: !CassandraOpts } - deriving (Eq, Show, Generic) + deriving (Show, Generic) makeFields ''Opts -deriveApiFieldJSON ''Opts +instance FromJSON Opts where + parseJSON = withObject "CannonOpts" $ \o -> + Opts + <$> o .: "cannon" + <*> o .: "gundeck" + <*> o .: "rabbitmq" + <*> o .: "logLevel" + <*> o .:? "logNetStrings" + <*> o .:? "logFormat" + <*> o .: "drainOpts" + <*> o .: "disabledAPIVersions" + <*> o .: "cassandra" diff --git a/services/cannon/src/Cannon/RabbitMqConsumerApp.hs b/services/cannon/src/Cannon/RabbitMqConsumerApp.hs new file mode 100644 index 00000000000..34af8c0530e --- /dev/null +++ b/services/cannon/src/Cannon/RabbitMqConsumerApp.hs @@ -0,0 +1,259 @@ +{-# LANGUAGE RecordWildCards #-} + +module Cannon.RabbitMqConsumerApp where + +import Cannon.App (rejectOnError) +import Cannon.WS hiding (env) +import Cassandra as C +import Control.Concurrent.Async +import Control.Exception (Handler (..), bracket, catch, catches, throwIO, try) +import Control.Monad.Codensity +import Data.Aeson +import Data.Id +import Imports +import Network.AMQP qualified as Q +import Network.AMQP.Extended (withConnection) +import Network.WebSockets +import Network.WebSockets qualified as WS +import System.Logger qualified as Log +import Wire.API.Event.WebSocketProtocol +import Wire.API.Notification + +rabbitMQWebSocketApp :: UserId -> ClientId -> Env -> ServerApp +rabbitMQWebSocketApp uid cid e pendingConn = do + wsVar <- newEmptyMVar + msgVar <- newEmptyMVar + + bracket (openWebSocket wsVar) closeWebSocket $ \(wsConn, _) -> + ( do + sendFullSyncMessageIfNeeded wsVar wsConn uid cid e + sendNotifications wsConn msgVar wsVar + ) + `catches` [ handleClientMisbehaving wsConn, + handleWebSocketExceptions wsConn + ] + where + logClient = + Log.field "user" (idToText uid) + . Log.field "client" (clientToText cid) + + openWebSocket wsVar = do + wsConn <- + acceptRequest pendingConn + `catch` rejectOnError pendingConn + -- start a reader thread for client messages + -- this needs to run asynchronously in order to promptly react to + -- client-side connection termination + a <- async $ forever $ do + catch + ( do + msg <- getClientMessage wsConn + putMVar wsVar (Right msg) + ) + $ \err -> putMVar wsVar (Left err) + pure (wsConn, a) + + -- this is only needed in case of asynchronous exceptions + closeWebSocket (wsConn, a) = do + cancel a + logCloseWebsocket + -- ignore any exceptions when sending the close message + void . try @SomeException $ WS.sendClose wsConn ("" :: ByteString) + + -- Create a rabbitmq consumer that receives messages and saves them into an MVar + createConsumer :: + Q.Channel -> + MVar (Either Q.AMQPException EventData) -> + IO Q.ConsumerTag + createConsumer chan msgVar = do + Q.consumeMsgs chan (clientNotificationQueueName uid cid) Q.Ack $ + \(msg, envelope) -> case eitherDecode @QueuedNotification msg.msgBody of + Left err -> do + logParseError err + -- This message cannot be parsed, make sure it doesn't requeue. There + -- is no need to throw an error and kill the websocket as this is + -- probably caused by a bug or someone messing with RabbitMQ. + -- + -- The bug case is slightly dangerous as it could drop a lot of events + -- en masse, if at some point we decide that Events should not be + -- pushed as JSONs, hopefully we think of the parsing side if/when + -- that happens. + Q.rejectEnv envelope False + Right notif -> + putMVar msgVar . Right $ + EventData notif envelope.envDeliveryTag + + handleWebSocketExceptions wsConn = + Handler $ + \(err :: WS.ConnectionException) -> do + case err of + CloseRequest code reason -> + Log.debug e.logg $ + Log.msg (Log.val "Client requested to close connection") + . Log.field "status_code" code + . Log.field "reason" reason + . logClient + ConnectionClosed -> + Log.info e.logg $ + Log.msg (Log.val "Client closed tcp connection abruptly") + . logClient + _ -> do + Log.info e.logg $ + Log.msg (Log.val "Failed to receive message, closing websocket") + . Log.field "error" (displayException err) + . logClient + WS.sendCloseCode wsConn 1003 ("websocket-failure" :: ByteString) + + handleClientMisbehaving wsConn = + Handler $ \(err :: WebSocketServerError) -> do + case err of + FailedToParseClientMessage _ -> do + Log.info e.logg $ + Log.msg (Log.val "Failed to parse received message, closing websocket") + . logClient + WS.sendCloseCode wsConn 1003 ("failed-to-parse" :: ByteString) + UnexpectedAck -> do + Log.info e.logg $ + Log.msg (Log.val "Client sent unexpected ack message") + . logClient + WS.sendCloseCode wsConn 1003 ("unexpected-ack" :: ByteString) + sendNotifications :: + WS.Connection -> + MVar (Either Q.AMQPException EventData) -> + MVar (Either ConnectionException MessageClientToServer) -> + IO () + sendNotifications wsConn msgVar wsVar = lowerCodensity $ do + -- create rabbitmq connection + conn <- Codensity $ withConnection e.logg e.rabbitmq + + -- create rabbitmq channel + amqpChan <- Codensity $ bracket (Q.openChannel conn) Q.closeChannel + + -- propagate rabbitmq connection failure + lift $ Q.addConnectionClosedHandler conn True $ do + putMVar msgVar $ + Left (Q.ConnectionClosedException Q.Normal "") + + -- register consumer that pushes rabbitmq messages into msgVar + void $ + Codensity $ + bracket + (createConsumer amqpChan msgVar) + (Q.cancelConsumer amqpChan) + + -- get data from msgVar and push to client + let consumeRabbitMq = forever $ do + eventData' <- takeMVar msgVar + either throwIO pure eventData' >>= \eventData -> do + logEvent eventData.event + catch (WS.sendBinaryData wsConn (encode (EventMessage eventData))) $ + \(err :: SomeException) -> do + logSendFailure err + throwIO err + + -- get ack from wsVar and forward to rabbitmq + let consumeWebsocket = forever $ do + v <- takeMVar wsVar + either throwIO pure v >>= \case + AckFullSync -> throwIO UnexpectedAck + AckMessage ackData -> do + logAckReceived ackData + void $ Q.ackMsg amqpChan ackData.deliveryTag ackData.multiple + + -- run both loops concurrently, so that + -- - notifications are delivered without having to wait for acks + -- - exceptions on either side do not cause a deadlock + lift $ concurrently_ consumeRabbitMq consumeWebsocket + + logParseError :: String -> IO () + logParseError err = + Log.err e.logg $ + Log.msg (Log.val "failed to decode event from the queue as a JSON") + . logClient + . Log.field "parse_error" err + + logEvent :: QueuedNotification -> IO () + logEvent event = + Log.debug e.logg $ + Log.msg (Log.val "got event") + . logClient + . Log.field "event" (encode event) + + logSendFailure :: SomeException -> IO () + logSendFailure err = + Log.err e.logg $ + Log.msg (Log.val "Pushing to WS failed, closing connection") + . Log.field "error" (displayException err) + . logClient + + logAckReceived :: AckData -> IO () + logAckReceived ackData = + Log.debug e.logg $ + Log.msg (Log.val "Received ACK") + . Log.field "delivery_tag" ackData.deliveryTag + . Log.field "multiple" ackData.multiple + . logClient + + logCloseWebsocket :: IO () + logCloseWebsocket = + Log.debug e.logg $ + Log.msg (Log.val "Closing the websocket") + . logClient + +-- | Check if client has missed messages. If so, send a full synchronisation +-- message and wait for the corresponding ack. +sendFullSyncMessageIfNeeded :: + MVar (Either ConnectionException MessageClientToServer) -> + WS.Connection -> + UserId -> + ClientId -> + Env -> + IO () +sendFullSyncMessageIfNeeded wsVar wsConn uid cid env = do + row <- C.runClient env.cassandra do + retry x5 $ query1 q (params LocalQuorum (uid, cid)) + for_ row $ \_ -> sendFullSyncMessage uid cid wsVar wsConn env + where + q :: PrepQuery R (UserId, ClientId) (Identity (Maybe UserId)) + q = + [sql| SELECT user_id FROM missed_notifications + WHERE user_id = ? and client_id = ? + |] + +sendFullSyncMessage :: + UserId -> + ClientId -> + MVar (Either ConnectionException MessageClientToServer) -> + WS.Connection -> + Env -> + IO () +sendFullSyncMessage uid cid wsVar wsConn env = do + let event = encode EventFullSync + WS.sendBinaryData wsConn event + res <- takeMVar wsVar >>= either throwIO pure + case res of + AckMessage _ -> throwIO UnexpectedAck + AckFullSync -> + C.runClient env.cassandra do + retry x1 $ write delete (params LocalQuorum (uid, cid)) + where + delete :: PrepQuery W (UserId, ClientId) () + delete = + [sql| + DELETE FROM missed_notifications + WHERE user_id = ? and client_id = ? + |] + +getClientMessage :: WS.Connection -> IO MessageClientToServer +getClientMessage wsConn = do + msg <- WS.receiveData wsConn + case eitherDecode msg of + Left err -> throwIO (FailedToParseClientMessage err) + Right m -> pure m + +data WebSocketServerError + = FailedToParseClientMessage String + | UnexpectedAck + deriving (Show) + +instance Exception WebSocketServerError diff --git a/services/cannon/src/Cannon/Run.hs b/services/cannon/src/Cannon/Run.hs index eefd22f4af5..cfb5806a3ff 100644 --- a/services/cannon/src/Cannon/Run.hs +++ b/services/cannon/src/Cannon/Run.hs @@ -28,7 +28,8 @@ import Cannon.App (maxPingInterval) import Cannon.Dict qualified as D import Cannon.Options import Cannon.Types (Cannon, applog, clients, env, mkEnv, runCannon, runCannonToServant) -import Cannon.WS hiding (env) +import Cannon.WS hiding (drainOpts, env) +import Cassandra.Util (defInitCassandra) import Control.Concurrent import Control.Concurrent.Async qualified as Async import Control.Exception qualified as E @@ -72,12 +73,14 @@ run o = withTracer \tracer -> do error "drainOpts.gracePeriodSeconds must not be set to 0." ext <- loadExternal g <- L.mkLogger (o ^. logLevel) (o ^. logNetStrings) (o ^. logFormat) + cassandra <- defInitCassandra (o ^. cassandraOpts) g e <- - mkEnv ext o g + mkEnv ext o cassandra g <$> D.empty 128 <*> newManager defaultManagerSettings {managerConnCount = 128} <*> createSystemRandom <*> mkClock + <*> pure (o ^. Cannon.Options.rabbitmq) refreshMetricsThread <- Async.async $ runCannon e refreshMetrics s <- newSettings $ Server (o ^. cannon . host) (o ^. cannon . port) (applog e) (Just idleTimeout) diff --git a/services/cannon/src/Cannon/Types.hs b/services/cannon/src/Cannon/Types.hs index 6fa37b78a65..c37f2d9f68c 100644 --- a/services/cannon/src/Cannon/Types.hs +++ b/services/cannon/src/Cannon/Types.hs @@ -18,11 +18,7 @@ -- with this program. If not, see . module Cannon.Types - ( Env, - opts, - applog, - dict, - env, + ( Env (..), Cannon, mapConcurrentlyCannon, mkEnv, @@ -39,12 +35,14 @@ import Cannon.Dict (Dict) import Cannon.Options import Cannon.WS (Clock, Key, Websocket) import Cannon.WS qualified as WS +import Cassandra (ClientState) import Control.Concurrent.Async (mapConcurrently) import Control.Lens ((^.)) import Control.Monad.Catch import Data.Id import Data.Text.Encoding import Imports +import Network.AMQP.Extended (AmqpEndpoint) import Prometheus import Servant qualified import System.Logger qualified as Logger @@ -94,15 +92,17 @@ instance HasRequestId Cannon where mkEnv :: ByteString -> Opts -> + ClientState -> Logger -> Dict Key Websocket -> Manager -> GenIO -> Clock -> + AmqpEndpoint -> Env -mkEnv external o l d p g t = +mkEnv external o cs l d p g t rabbitmqOpts = Env o l d (RequestId defRequestId) $ - WS.env external (o ^. cannon . port) (encodeUtf8 $ o ^. gundeck . host) (o ^. gundeck . port) l p d g t (o ^. drainOpts) + WS.env external (o ^. cannon . port) (encodeUtf8 $ o ^. gundeck . host) (o ^. gundeck . port) l p d g t (o ^. drainOpts) rabbitmqOpts cs runCannon :: Env -> Cannon a -> IO a runCannon e c = runReaderT (unCannon c) e diff --git a/services/cannon/src/Cannon/WS.hs b/services/cannon/src/Cannon/WS.hs index ea106f4cf03..b6be5e5b2ba 100644 --- a/services/cannon/src/Cannon/WS.hs +++ b/services/cannon/src/Cannon/WS.hs @@ -18,7 +18,7 @@ -- with this program. If not, see . module Cannon.WS - ( Env, + ( Env (..), WS, env, runWS, @@ -52,6 +52,7 @@ import Bilge.Retry import Cannon.Dict (Dict) import Cannon.Dict qualified as D import Cannon.Options (DrainOpts, gracePeriodSeconds, millisecondsBetweenBatches, minBatchSize) +import Cassandra (ClientState) import Conduit import Control.Concurrent.Timeout import Control.Lens ((^.)) @@ -67,6 +68,7 @@ import Data.List.Extra (chunksOf) import Data.Text.Encoding (decodeUtf8) import Data.Timeout (TimeoutUnit (..), (#)) import Imports hiding (threadDelay) +import Network.AMQP.Extended import Network.HTTP.Types.Method import Network.HTTP.Types.Status import Network.Wai.Utilities.Error @@ -145,7 +147,9 @@ data Env = Env dict :: !(Dict Key Websocket), rand :: !GenIO, clock :: !Clock, - drainOpts :: DrainOpts + drainOpts :: DrainOpts, + rabbitmq :: !AmqpEndpoint, + cassandra :: ClientState } setRequestId :: RequestId -> Env -> Env @@ -191,8 +195,10 @@ env :: GenIO -> Clock -> DrainOpts -> + AmqpEndpoint -> + ClientState -> Env -env leh lp gh gp = Env leh lp (host gh . port gp $ empty) (RequestId defRequestId) +env leh lp gh gp = Env leh lp (Bilge.host gh . Bilge.port gp $ empty) (RequestId defRequestId) runWS :: (MonadIO m) => Env -> WS a -> m a runWS e m = liftIO $ runReaderT (_conn m) e diff --git a/services/cannon/test/resources/rabbitmq-ca.pem b/services/cannon/test/resources/rabbitmq-ca.pem new file mode 100644 index 00000000000..2aa8d89e4ac --- /dev/null +++ b/services/cannon/test/resources/rabbitmq-ca.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDJTCCAg2gAwIBAgIUBbMHNT+GZgCVyopxX3sciD+E5uowDQYJKoZIhvcNAQEL +BQAwIjEgMB4GA1UEAwwXcmFiYml0bXEuY2EuZXhhbXBsZS5jb20wHhcNMjQwOTAz +MTIwMzQwWhcNMzQwOTAxMTIwMzQwWjAiMSAwHgYDVQQDDBdyYWJiaXRtcS5jYS5l +eGFtcGxlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJP2rB1X +qxpRAE6hSYkYbfd/pdfOrVbSwsZqYj866ijrtZFh0+AWtjYUkiLFYGiXINZdns6S +LVP8afPCXKynjZ/2zzIpuvX51zhHtrulxBcKyN85gckm03KGLz5GNGFNl8CeYJfu +RWJSA+AOxkR28CkBBD5eR0cRc8j0E9buDtY36wmgqEtkDvAc4PvmgAIPL2KovmmM +ohGy2hHJZLKCA+QGzeLUqQx8MTF3RsajV8ttRg+wfUQM6wdMJbub93wmgLVfncaQ +dO9E/jEVr2kU0WeJ1kmxs40d1bd02U3/8omGyayRGX9qqfaF3g+oDzAoiF7LbDuC +7VNVEc8/PP1t6b0CAwEAAaNTMFEwHQYDVR0OBBYEFOv/4GK9l7p7p9nk2hf/59sD +PhEVMB8GA1UdIwQYMBaAFOv/4GK9l7p7p9nk2hf/59sDPhEVMA8GA1UdEwEB/wQF +MAMBAf8wDQYJKoZIhvcNAQELBQADggEBABt+JodEGOjnFA+VCnRWOGl1q4wlcEbl ++5mEuVwwWGzbispmJxIdf+FlOotonvhksGQUDZ3gr7FvLcsGy6OnOK2YBSLOcnRP +amKPaiQwB38VcxQEUOL+1ZqLsLTseGJUCkGk+OmfjInqCURS5jRUbVtYZiqkzD40 +7Rz5iyrXwv1vbuXpW2s/kUgD6dLrRwt1ydaxCbA3C92farZJFvpUwTyhAXUkKyPZ +Hgu5E/nppujH2h6nOJfHGcyaVHai7pDManjO1icWmfx+t2s94rdAEevvBu0k/qL4 +tXWWSh81MtGjLjQ88ozbmr7/LSo3KaAB7M/AnZdL3JjtmFy9eFhqQaY= +-----END CERTIFICATE----- diff --git a/services/galley/src/Galley/App.hs b/services/galley/src/Galley/App.hs index 6d29d6081ca..41dee1da8d2 100644 --- a/services/galley/src/Galley/App.hs +++ b/services/galley/src/Galley/App.hs @@ -292,7 +292,7 @@ evalGalley e = . interpretExternalAccess . runRpcWithHttp (e ^. manager) (e ^. reqId) . runGundeckAPIAccess (e ^. options . gundeck) - . runNotificationSubsystemGundeck (notificationSubssystemConfig e) + . runNotificationSubsystemGundeck (notificationSubsystemConfig e) . interpretSparAccess . interpretBrigAccess where diff --git a/services/galley/src/Galley/Env.hs b/services/galley/src/Galley/Env.hs index 9d88c703b86..d5c8bc23c67 100644 --- a/services/galley/src/Galley/Env.hs +++ b/services/galley/src/Galley/Env.hs @@ -109,8 +109,8 @@ currentFanoutLimit o = do let maxSize = fromIntegral (o ^. (O.settings . maxTeamSize)) unsafeRange (min maxSize optFanoutLimit) -notificationSubssystemConfig :: Env -> NotificationSubsystemConfig -notificationSubssystemConfig env = +notificationSubsystemConfig :: Env -> NotificationSubsystemConfig +notificationSubsystemConfig env = NotificationSubsystemConfig { chunkSize = defaultChunkSize, fanoutLimit = currentFanoutLimit env._options, diff --git a/services/gundeck/default.nix b/services/gundeck/default.nix index 7629d0712c5..9901752a620 100644 --- a/services/gundeck/default.nix +++ b/services/gundeck/default.nix @@ -9,6 +9,7 @@ , amazonka-core , amazonka-sns , amazonka-sqs +, amqp , async , attoparsec , auto-update @@ -22,6 +23,7 @@ , containers , criterion , crypton-x509-store +, data-timeout , errors , exceptions , extended @@ -69,6 +71,7 @@ , tasty-hunit , tasty-quickcheck , text +, these , time , tinylog , tls @@ -98,6 +101,7 @@ mkDerivation { amazonka-core amazonka-sns amazonka-sqs + amqp async attoparsec auto-update @@ -108,6 +112,7 @@ mkDerivation { cassandra-util containers crypton-x509-store + data-timeout errors exceptions extended @@ -135,6 +140,7 @@ mkDerivation { servant servant-server text + these time tinylog tls diff --git a/services/gundeck/gundeck.cabal b/services/gundeck/gundeck.cabal index 19ce66fb3e2..75b62eccb54 100644 --- a/services/gundeck/gundeck.cabal +++ b/services/gundeck/gundeck.cabal @@ -46,6 +46,7 @@ library Gundeck.Schema.V1 Gundeck.Schema.V10 Gundeck.Schema.V11 + Gundeck.Schema.V12 Gundeck.Schema.V2 Gundeck.Schema.V3 Gundeck.Schema.V4 @@ -116,6 +117,7 @@ library , amazonka-core >=2 , amazonka-sns >=2 , amazonka-sqs >=2 + , amqp , async >=2.0 , attoparsec >=0.10 , auto-update >=0.1 @@ -126,6 +128,7 @@ library , cassandra-util >=0.16.2 , containers >=0.5 , crypton-x509-store + , data-timeout , errors >=2.0 , exceptions >=0.4 , extended @@ -153,6 +156,7 @@ library , servant , servant-server , text >=1.1 + , these , time >=1.4 , tinylog >=0.10 , tls >=1.7.0 diff --git a/services/gundeck/gundeck.integration.yaml b/services/gundeck/gundeck.integration.yaml index adf2914f6aa..075ddccfc5c 100644 --- a/services/gundeck/gundeck.integration.yaml +++ b/services/gundeck/gundeck.integration.yaml @@ -34,6 +34,14 @@ aws: sqsEndpoint: http://localhost:4568 # https://sqs.eu-west-1.amazonaws.com snsEndpoint: http://localhost:4575 # https://sns.eu-west-1.amazonaws.com +rabbitmq: + host: 127.0.0.1 + port: 5671 + vHost: / + enableTls: true + caCert: test/resources/rabbitmq-ca.pem + insecureSkipVerifyTls: false + settings: httpPoolSize: 1024 notificationTTL: 24192200 diff --git a/services/gundeck/src/Gundeck/API/Internal.hs b/services/gundeck/src/Gundeck/API/Internal.hs index f0dfabe1d19..d97a0a695dd 100644 --- a/services/gundeck/src/Gundeck/API/Internal.hs +++ b/services/gundeck/src/Gundeck/API/Internal.hs @@ -24,6 +24,7 @@ where import Cassandra qualified import Control.Lens (view) import Data.Id +import Gundeck.Client import Gundeck.Client qualified as Client import Gundeck.Monad import Gundeck.Presence qualified as Presence @@ -49,6 +50,7 @@ servantSitemap = :<|> Named @"i-clients-delete" unregisterClientH :<|> Named @"i-user-delete" removeUserH :<|> Named @"i-push-tokens-get" getPushTokensH + :<|> Named @"i-reg-consumable-notifs" registerConsumableNotifcationsClient statusH :: (Applicative m) => m NoContent statusH = pure NoContent @@ -64,3 +66,9 @@ removeUserH uid = NoContent <$ Client.removeUser uid getPushTokensH :: UserId -> Gundeck PushTok.PushTokenList getPushTokensH uid = PushTok.PushTokenList <$> (view PushTok.addrPushToken <$$> PushTok.lookup uid Cassandra.All) + +registerConsumableNotifcationsClient :: UserId -> ClientId -> Gundeck NoContent +registerConsumableNotifcationsClient uid cid = do + chan <- getRabbitMqChan + void . liftIO $ setupConsumableNotifications chan uid cid + pure NoContent diff --git a/services/gundeck/src/Gundeck/Client.hs b/services/gundeck/src/Gundeck/Client.hs index 43323ade9cc..486ab4b63c8 100644 --- a/services/gundeck/src/Gundeck/Client.hs +++ b/services/gundeck/src/Gundeck/Client.hs @@ -15,19 +15,20 @@ -- You should have received a copy of the GNU Affero General Public License along -- with this program. If not, see . -module Gundeck.Client - ( unregister, - removeUser, - ) -where +module Gundeck.Client where import Control.Lens (view) import Data.Id +import Data.Map qualified as Map +import Data.Text.Encoding (encodeUtf8) import Gundeck.Monad import Gundeck.Notification.Data qualified as Notifications import Gundeck.Push.Data qualified as Push import Gundeck.Push.Native import Imports +import Network.AMQP +import Network.AMQP.Types +import Wire.API.Notification unregister :: UserId -> ClientId -> Gundeck () unregister uid cid = do @@ -42,3 +43,32 @@ removeUser user = do deleteTokens toks Nothing Push.erase user Notifications.deleteAll user + +setupConsumableNotifications :: + Channel -> + UserId -> + ClientId -> + IO Text +setupConsumableNotifications chan uid cid = do + let qName = clientNotificationQueueName uid cid + void $ + declareQueue + chan + newQueue + { queueName = qName, + -- TODO: make this less ugly to read + queueHeaders = + FieldTable $ + Map.fromList + [ ( "x-dead-letter-exchange", + FVString $ + encodeUtf8 userNotificationDlxName + ), + ( "x-dead-letter-routing-key", + FVString $ encodeUtf8 userNotificationDlqName + ) + ] + } + for_ [userRoutingKey uid, clientRoutingKey uid cid] $ + bindQueue chan qName userNotificationExchangeName + pure qName diff --git a/services/gundeck/src/Gundeck/Env.hs b/services/gundeck/src/Gundeck/Env.hs index 2397005c68a..ed91feeaafc 100644 --- a/services/gundeck/src/Gundeck/Env.hs +++ b/services/gundeck/src/Gundeck/Env.hs @@ -41,6 +41,8 @@ import Gundeck.Redis qualified as Redis import Gundeck.Redis.HedisExtensions qualified as Redis import Gundeck.ThreadBudget import Imports +import Network.AMQP (Channel) +import Network.AMQP.Extended qualified as Q import Network.HTTP.Client (responseTimeoutMicro) import Network.HTTP.Client.TLS (tlsManagerSettings) import Network.TLS as TLS @@ -58,7 +60,8 @@ data Env = Env _rstateAdditionalWrite :: !(Maybe Redis.RobustConnection), _awsEnv :: !Aws.Env, _time :: !(IO Milliseconds), - _threadBudgetState :: !(Maybe ThreadBudgetState) + _threadBudgetState :: !(Maybe ThreadBudgetState), + _rabbitMqChannel :: MVar Channel } makeLenses ''Env @@ -101,7 +104,8 @@ createEnv o = do { updateAction = Ms . round . (* 1000) <$> getPOSIXTime } mtbs <- mkThreadBudgetState `mapM` (o ^. settings . maxConcurrentNativePushes) - pure $! (rThread : rAdditionalThreads,) $! Env (RequestId defRequestId) o l n p r rAdditional a io mtbs + rabbitMqChannelMVar <- Q.mkRabbitMqChannelMVar l (o ^. rabbitmq) + pure $! (rThread : rAdditionalThreads,) $! Env (RequestId defRequestId) o l n p r rAdditional a io mtbs rabbitMqChannelMVar reqIdMsg :: RequestId -> Logger.Msg -> Logger.Msg reqIdMsg = ("request" Logger..=) . unRequestId diff --git a/services/gundeck/src/Gundeck/Monad.hs b/services/gundeck/src/Gundeck/Monad.hs index 6d4147ea70a..3e1881dc2cc 100644 --- a/services/gundeck/src/Gundeck/Monad.hs +++ b/services/gundeck/src/Gundeck/Monad.hs @@ -32,6 +32,7 @@ module Gundeck.Monad runDirect, runGundeck, posixTime, + getRabbitMqChan, -- * Select which redis to target runWithDefaultRedis, @@ -53,11 +54,15 @@ import Database.Redis qualified as Redis import Gundeck.Env import Gundeck.Redis qualified as Redis import Imports +import Network.AMQP +import Network.HTTP.Types import Network.Wai +import Network.Wai.Utilities.Error import Prometheus -import System.Logger qualified as Log +import System.Logger (Logger) import System.Logger qualified as Logger -import System.Logger.Class +import System.Logger.Class qualified as Log +import System.Timeout import UnliftIO (async) -- | TODO: 'Client' already has an 'Env'. Why do we need two? How does this even work? We should @@ -99,7 +104,7 @@ newtype WithDefaultRedis a = WithDefaultRedis {runWithDefaultRedis :: Gundeck a} MonadReader Env, MonadClient, MonadUnliftIO, - MonadLogger + Log.MonadLogger ) instance Redis.MonadRedis WithDefaultRedis where @@ -128,7 +133,7 @@ newtype WithAdditionalRedis a = WithAdditionalRedis {runWithAdditionalRedis :: G MonadReader Env, MonadClient, MonadUnliftIO, - MonadLogger + Log.MonadLogger ) instance Redis.MonadRedis WithAdditionalRedis where @@ -148,7 +153,7 @@ instance Redis.RedisCtx WithAdditionalRedis (Either Redis.Reply) where returnDecode :: (Redis.RedisResult a) => Redis.Reply -> WithAdditionalRedis (Either Redis.Reply a) returnDecode = Redis.liftRedis . Redis.returnDecode -instance MonadLogger Gundeck where +instance Log.MonadLogger Gundeck where log l m = do e <- ask Logger.log (e ^. applog) l (reqIdMsg (e ^. reqId) . m) @@ -173,7 +178,7 @@ runDirect e m = `catch` ( \(exception :: SomeException) -> do case fromException exception of Nothing -> - Log.err (e ^. applog) $ + Logger.err (e ^. applog) $ Log.msg ("IO Exception occurred" :: ByteString) . Log.field "message" (displayException exception) . Log.field "request" (unRequestId (e ^. reqId)) @@ -186,16 +191,23 @@ lookupReqId l r = case lookup requestIdName (requestHeaders r) of Just rid -> pure $ RequestId rid Nothing -> do localRid <- RequestId . UUID.toASCIIBytes <$> UUID.nextRandom - Log.info l $ - "request-id" - .= localRid - ~~ "method" - .= requestMethod r - ~~ "path" - .= rawPathInfo r - ~~ msg (val "generated a new request id for local request") + Logger.info l $ + Log.field "request-id" localRid + . Log.field "method" (requestMethod r) + . Log.field "path" (rawPathInfo r) + . Log.msg (Log.val "generated a new request id for local request") pure localRid posixTime :: Gundeck Milliseconds posixTime = view time >>= liftIO {-# INLINE posixTime #-} + +getRabbitMqChan :: Gundeck Channel +getRabbitMqChan = do + chanMVar <- view rabbitMqChannel + mChan <- liftIO $ System.Timeout.timeout 1_000_000 $ readMVar chanMVar + case mChan of + Nothing -> do + Log.err $ Log.msg (Log.val "Could not retrieve RabbitMQ channel") + throwM $ mkError status500 "internal-server-error" "Could not retrieve RabbitMQ channel" + Just chan -> pure chan diff --git a/services/gundeck/src/Gundeck/Options.hs b/services/gundeck/src/Gundeck/Options.hs index 5f67081e178..f09b6177d19 100644 --- a/services/gundeck/src/Gundeck/Options.hs +++ b/services/gundeck/src/Gundeck/Options.hs @@ -25,6 +25,7 @@ import Data.Aeson.TH import Data.Yaml (FromJSON) import Gundeck.Aws.Arn import Imports +import Network.AMQP.Extended import System.Logger.Extended (Level, LogFormat) import Util.Options import Util.Options.Common @@ -133,6 +134,7 @@ data Opts = Opts _redis :: !RedisEndpoint, _redisAdditionalWrite :: !(Maybe RedisEndpoint), _aws :: !AWSOpts, + _rabbitmq :: !AmqpEndpoint, _discoUrl :: !(Maybe Text), _settings :: !Settings, -- Logging diff --git a/services/gundeck/src/Gundeck/Push.hs b/services/gundeck/src/Gundeck/Push.hs index 098223a5547..994a8987eff 100644 --- a/services/gundeck/src/Gundeck/Push.hs +++ b/services/gundeck/src/Gundeck/Push.hs @@ -29,19 +29,25 @@ module Gundeck.Push ) where +import Bilge qualified +import Control.Arrow ((&&&)) import Control.Error import Control.Exception (ErrorCall (ErrorCall)) import Control.Lens (to, view, (.~), (^.)) import Control.Monad.Catch import Control.Monad.Except (throwError) -import Data.Aeson as Aeson (Object) +import Data.Aeson qualified as Aeson +import Data.ByteString.Conversion (toByteString') import Data.Id import Data.List.Extra qualified as List -import Data.List1 (List1, list1) +import Data.List1 (List1, list1, toNonEmpty) import Data.Map qualified as Map +import Data.Misc import Data.Range import Data.Set qualified as Set import Data.Text qualified as Text +import Data.These +import Data.Timeout import Data.UUID qualified as UUID import Gundeck.Aws (endpointUsers) import Gundeck.Aws qualified as Aws @@ -58,15 +64,22 @@ import Gundeck.Push.Websocket qualified as Web import Gundeck.ThreadBudget import Gundeck.Util import Imports +import Network.AMQP (Message (..)) +import Network.AMQP qualified as Q import Network.HTTP.Types import Network.Wai.Utilities import System.Logger.Class (msg, val, (+++), (.=), (~~)) import System.Logger.Class qualified as Log +import UnliftIO (pooledMapConcurrentlyN) +import Util.Options import Wire.API.Internal.Notification +import Wire.API.Notification import Wire.API.Presence (Presence (..)) import Wire.API.Presence qualified as Presence import Wire.API.Push.Token qualified as Public import Wire.API.Push.V2 +import Wire.API.User (UserSet (..)) +import Wire.API.User.Client (Client (..), ClientCapability (..), ClientCapabilityList (..), UserClientsFull (..)) push :: [Push] -> Gundeck () push ps = do @@ -84,6 +97,8 @@ class (MonadThrow m) => MonadPushAll m where mpaPushNative :: Notification -> Priority -> [Address] -> m () mpaForkIO :: m () -> m () mpaRunWithBudget :: Int -> a -> m a -> m a + mpaGetClients :: Set UserId -> m UserClientsFull + mpaPublishToRabbitMq :: Text -> Q.Message -> m () instance MonadPushAll Gundeck where mpaNotificationTTL = view (options . settings . notificationTTL) @@ -94,6 +109,13 @@ instance MonadPushAll Gundeck where mpaPushNative = pushNative mpaForkIO = void . forkIO mpaRunWithBudget = runWithBudget'' + mpaGetClients = getClients + mpaPublishToRabbitMq = publishToRabbitMq + +publishToRabbitMq :: Text -> Q.Message -> Gundeck () +publishToRabbitMq routingKey qMsg = do + chan <- getRabbitMqChan + void $ liftIO $ Q.publishMsg chan userNotificationExchangeName routingKey qMsg -- | Another layer of wrap around 'runWithBudget'. runWithBudget'' :: Int -> a -> Gundeck a -> Gundeck a @@ -123,10 +145,102 @@ instance MonadMapAsync Gundeck where Nothing -> mapAsync f l Just chunkSize -> concat <$> mapM (mapAsync f) (List.chunksOf chunkSize l) +splitPushes :: (MonadPushAll m) => [Push] -> m ([Push], [Push]) +splitPushes ps = do + allUserClients <- mpaGetClients (Set.unions $ map (\p -> Set.map (._recipientId) $ p._pushRecipients.fromRange) ps) + pure . partitionHereThere $ map (splitPush allUserClients) ps + +-- | Split a push into rabbitmq and legacy push. This code exists to help with +-- migration. Once it is completed and old APIs are not supported anymore we can +-- assume everything is meant for RabbtiMQ and stop splitting. +splitPush :: + UserClientsFull -> + Push -> + These Push Push +splitPush clientsFull p = do + let (rabbitmqRecipients, legacyRecipients) = + partitionHereThereRange . rcast @_ @_ @1024 $ + mapRange splitRecipient (rangeSetToList $ p._pushRecipients) + case (runcons rabbitmqRecipients, runcons legacyRecipients) of + (Nothing, _) -> (That p) + (_, Nothing) -> (This p) + (Just (rabbit0, rabbits), Just (legacy0, legacies)) -> + These + p {_pushRecipients = rangeListToSet $ rcons rabbit0 rabbits} + p {_pushRecipients = rangeListToSet $ rcons legacy0 legacies} + where + splitRecipient :: Recipient -> These Recipient Recipient + splitRecipient rcpt = do + let allClients = Map.findWithDefault mempty rcpt._recipientId $ clientsFull.userClientsFull + relevantClients = case rcpt._recipientClients of + RecipientClientsSome cs -> + Set.filter (\c -> c.clientId `elem` toList cs) allClients + RecipientClientsAll -> allClients + isClientForRabbitMq c = ClientSupportsConsumableNotifications `Set.member` c.clientCapabilities.fromClientCapabilityList + (rabbitmqClients, legacyClients) = Set.partition isClientForRabbitMq relevantClients + rabbitmqClientIds = (.clientId) <$> Set.toList rabbitmqClients + legacyClientIds = (.clientId) <$> Set.toList legacyClients + case (rabbitmqClientIds, legacyClientIds) of + ([], _) -> + -- Checking for rabbitmqClientIds first ensures that we fall back to + -- old behaviour even if legacyClientIds is empty too. This way we + -- won't break things before clients are ready for it. + (That rcpt) + (_, []) -> + (This rcpt) + (r : rs, l : ls) -> + These + rcpt {_recipientClients = RecipientClientsSome $ list1 r rs} + rcpt {_recipientClients = RecipientClientsSome $ list1 l ls} + + partitionHereThereRange :: Range 0 m [These a b] -> (Range 0 m [a], Range 0 m [b]) + partitionHereThereRange = + ((&&&) (rconcat . mapRange fst) (rconcat . mapRange snd)) + . mapRange partitionToRange + where + rsingleton0 :: forall x. x -> Range 0 1 [x] + rsingleton0 = rcast . rsingleton + + rnil1 :: forall x. Range 0 1 [x] + rnil1 = rcast rnil + + partitionToRange :: These a b -> (Range 0 1 [a], Range 0 1 [b]) + partitionToRange = \case + (This a) -> (rsingleton0 a, rnil1) + (That b) -> (rnil1, rsingleton0 b) + (These a b) -> (rsingleton0 a, rsingleton0 b) + +getClients :: Set UserId -> Gundeck UserClientsFull +getClients uids = do + fmap mconcat + . pooledMapConcurrentlyN 4 getBatch + . List.chunksOf 100 + $ Set.toList uids + where + getBatch :: [UserId] -> Gundeck UserClientsFull + getBatch uidsChunk = do + r <- do + Endpoint h p <- view $ options . brig + Bilge.post + ( Bilge.host (toByteString' h) + . Bilge.port p + . Bilge.path "/i/clients/full" + . Bilge.json (UserSet $ Set.fromList uidsChunk) + . Bilge.expect2xx + ) + Bilge.responseJsonError r + +pushAll :: (MonadPushAll m, MonadNativeTargets m, MonadMapAsync m, Log.MonadLogger m) => [Push] -> m () +pushAll pushes = do + Log.debug $ msg (val "pushing") . Log.field "pushes" (Aeson.encode pushes) + (rabbitmqPushes, legacyPushes) <- splitPushes pushes + pushAllLegacy legacyPushes + pushAllViaRabbitMq rabbitmqPushes + -- | Construct and send a single bulk push request to the client. Write the 'Notification's from -- the request to C*. Trigger native pushes for all delivery failures notifications. -pushAll :: (MonadPushAll m, MonadNativeTargets m, MonadMapAsync m) => [Push] -> m () -pushAll pushes = do +pushAllLegacy :: (MonadPushAll m, MonadNativeTargets m, MonadMapAsync m) => [Push] -> m () +pushAllLegacy pushes = do newNotifications <- mapM mkNewNotification pushes -- persist push request let cassandraTargets :: [CassandraTargets] @@ -153,6 +267,56 @@ pushAll pushes = do mpaRunWithBudget cost () $ mpaPushNative notif (psh ^. pushNativePriority) =<< nativeTargets psh rcps' alreadySent +pushAllViaRabbitMq :: (MonadPushAll m) => [Push] -> m () +pushAllViaRabbitMq pushes = + for_ pushes $ pushViaRabbitMq + +pushViaRabbitMq :: (MonadPushAll m) => Push -> m () +pushViaRabbitMq p = do + notifId <- mpaMkNotificationId + NotificationTTL ttl <- mpaNotificationTTL + let qMsg = + Q.newMsg + { msgBody = + Aeson.encode + . queuedNotification notifId + $ toNonEmpty p._pushPayload, + msgContentType = Just "application/json", + msgDeliveryMode = + -- Non-persistent messages never hit the disk and so do not + -- survive RabbitMQ node restarts, this is great for transient + -- notifications. + Just + ( if p._pushTransient + then Q.NonPersistent + else Q.Persistent + ), + msgExpiration = + Just + ( if p._pushTransient + then + ( -- Means that if there is no active consumer, this + -- message will never be delivered to anyone. It can + -- still take some time before RabbitMQ forgets about + -- this message because the expiration is only + -- considered for messages which are at the head of a + -- queue. See docs: https://www.rabbitmq.com/docs/ttl + "0" + ) + else showT $ fromIntegral ttl # Second #> MilliSecond + ) + } + routingKeys = + Set.unions $ + flip Set.map (fromRange p._pushRecipients) \r -> + case r._recipientClients of + RecipientClientsAll -> + Set.singleton $ userRoutingKey r._recipientId + RecipientClientsSome (toList -> cs) -> + Set.fromList $ map (clientRoutingKey r._recipientId) cs + for_ routingKeys $ \routingKey -> + mpaPublishToRabbitMq routingKey qMsg + -- | A new notification to be stored in C* and pushed over websockets data NewNotification = NewNotification { nnPush :: Push, @@ -251,7 +415,7 @@ shouldActuallyPush psh rcp pres = not isOrigin && okByPushAllowlist && okByRecip okByRecipientAllowlist :: Bool okByRecipientAllowlist = - case (rcp ^. recipientClients, clientId pres) of + case (rcp ^. recipientClients, pres.clientId) of (RecipientClientsSome cs, Just c) -> c `elem` cs _ -> True diff --git a/services/gundeck/src/Gundeck/Run.hs b/services/gundeck/src/Gundeck/Run.hs index b978b9d6b13..1f73490862b 100644 --- a/services/gundeck/src/Gundeck/Run.hs +++ b/services/gundeck/src/Gundeck/Run.hs @@ -42,6 +42,7 @@ import Gundeck.React import Gundeck.Schema.Run (lastSchemaVersion) import Gundeck.ThreadBudget import Imports +import Network.AMQP import Network.Wai as Wai import Network.Wai.Middleware.Gunzip qualified as GZip import Network.Wai.Middleware.Gzip qualified as GZip @@ -53,49 +54,73 @@ import OpenTelemetry.Trace qualified as Otel import Servant (Handler (Handler), (:<|>) (..)) import Servant qualified import System.Logger qualified as Log +import System.Logger.Class qualified as MonadLogger import UnliftIO.Async qualified as Async import Util.Options +import Wire.API.Notification import Wire.API.Routes.Public.Gundeck (GundeckAPI) import Wire.API.Routes.Version import Wire.API.Routes.Version.Wai import Wire.OpenTelemetry run :: Opts -> IO () -run o = withTracer \tracer -> do - (rThreads, e) <- createEnv o - runClient (e ^. cstate) $ +run opts = withTracer \tracer -> do + (rThreads, env) <- createEnv opts + let logger = env ^. applog + + runDirect env setUpRabbitMqExchangesAndQueues + + runClient (env ^. cstate) $ versionCheck lastSchemaVersion - let l = e ^. applog - s <- newSettings $ defaultServer (unpack . host $ o ^. gundeck) (port $ o ^. gundeck) l - let throttleMillis = fromMaybe defSqsThrottleMillis $ o ^. (settings . sqsThrottleMillis) + s <- newSettings $ defaultServer (unpack . host $ opts ^. gundeck) (port $ opts ^. gundeck) logger + let throttleMillis = fromMaybe defSqsThrottleMillis $ opts ^. (settings . sqsThrottleMillis) - lst <- Async.async $ Aws.execute (e ^. awsEnv) (Aws.listen throttleMillis (runDirect e . onEvent)) - wtbs <- forM (e ^. threadBudgetState) $ \tbs -> Async.async $ runDirect e $ watchThreadBudgetState tbs 10 - wCollectAuth <- Async.async (collectAuthMetrics (Aws._awsEnv (Env._awsEnv e))) + lst <- Async.async $ Aws.execute (env ^. awsEnv) (Aws.listen throttleMillis (runDirect env . onEvent)) + wtbs <- forM (env ^. threadBudgetState) $ \tbs -> Async.async $ runDirect env $ watchThreadBudgetState tbs 10 + wCollectAuth <- Async.async (collectAuthMetrics (Aws._awsEnv (Env._awsEnv env))) - app <- middleware e <*> pure (mkApp e) + app <- middleware env <*> pure (mkApp env) inSpan tracer "gundeck" defaultSpanArguments {kind = Otel.Server} (runSettingsWithShutdown s app Nothing) `finally` do - Log.info l $ Log.msg (Log.val "Shutting down ...") - shutdown (e ^. cstate) + Log.info logger $ Log.msg (Log.val "Shutting down ...") + shutdown (env ^. cstate) Async.cancel lst Async.cancel wCollectAuth forM_ wtbs Async.cancel forM_ rThreads Async.cancel - Redis.disconnect =<< takeMVar (e ^. rstate) - whenJust (e ^. rstateAdditionalWrite) $ (=<<) Redis.disconnect . takeMVar - Log.close (e ^. applog) + Redis.disconnect =<< takeMVar (env ^. rstate) + whenJust (env ^. rstateAdditionalWrite) $ (=<<) Redis.disconnect . takeMVar + Log.close (env ^. applog) where + setUpRabbitMqExchangesAndQueues :: Gundeck () + setUpRabbitMqExchangesAndQueues = do + chan <- getRabbitMqChan + MonadLogger.info $ Log.msg (Log.val "setting up RabbitMQ exchanges and queues") + liftIO $ createUserNotificationsExchange chan + liftIO $ createDeadUserNotificationsExchange chan + + createUserNotificationsExchange :: Channel -> IO () + createUserNotificationsExchange chan = do + declareExchange chan newExchange {exchangeName = userNotificationExchangeName, exchangeType = "direct"} + + createDeadUserNotificationsExchange :: Channel -> IO () + createDeadUserNotificationsExchange chan = do + declareExchange chan newExchange {exchangeName = userNotificationDlxName, exchangeType = "direct"} + + let routingKey = userNotificationDlqName + void $ declareQueue chan newQueue {queueName = userNotificationDlqName} + bindQueue chan userNotificationDlqName userNotificationDlxName routingKey + middleware :: Env -> IO Middleware - middleware e = do + middleware env = do otelMiddleWare <- newOpenTelemetryWaiMiddleware pure $ - versionMiddleware (foldMap expandVersionExp (o ^. settings . disabledAPIVersions)) + versionMiddleware (foldMap expandVersionExp (opts ^. settings . disabledAPIVersions)) . otelMiddleWare - . requestIdMiddleware (e ^. applog) defaultRequestIdHeaderName + . requestIdMiddleware (env ^. applog) defaultRequestIdHeaderName . Metrics.servantPrometheusMiddleware (Proxy @(GundeckAPI :<|> InternalAPI)) . GZip.gunzip . GZip.gzip GZip.def - . catchErrors (e ^. applog) defaultRequestIdHeaderName + . catchErrors (env ^. applog) defaultRequestIdHeaderName mkApp :: Env -> Wai.Application mkApp env0 req cont = do diff --git a/services/gundeck/src/Gundeck/Schema/Run.hs b/services/gundeck/src/Gundeck/Schema/Run.hs index 247f7a69488..34dfebd09df 100644 --- a/services/gundeck/src/Gundeck/Schema/Run.hs +++ b/services/gundeck/src/Gundeck/Schema/Run.hs @@ -23,6 +23,7 @@ import Control.Exception (finally) import Gundeck.Schema.V1 qualified as V1 import Gundeck.Schema.V10 qualified as V10 import Gundeck.Schema.V11 qualified as V11 +import Gundeck.Schema.V12 qualified as V12 import Gundeck.Schema.V2 qualified as V2 import Gundeck.Schema.V3 qualified as V3 import Gundeck.Schema.V4 qualified as V4 @@ -63,5 +64,6 @@ migrations = V8.migration, V9.migration, V10.migration, - V11.migration + V11.migration, + V12.migration ] diff --git a/services/gundeck/src/Gundeck/Schema/V12.hs b/services/gundeck/src/Gundeck/Schema/V12.hs new file mode 100644 index 00000000000..ad2a638f51a --- /dev/null +++ b/services/gundeck/src/Gundeck/Schema/V12.hs @@ -0,0 +1,33 @@ +-- This file is part of the Wire Server implementation. +-- +-- Copyright (C) 2022 Wire Swiss GmbH +-- +-- This program is free software: you can redistribute it and/or modify it under +-- the terms of the GNU Affero General Public License as published by the Free +-- Software Foundation, either version 3 of the License, or (at your option) any +-- later version. +-- +-- This program is distributed in the hope that it will be useful, but WITHOUT +-- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +-- FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more +-- details. +-- +-- You should have received a copy of the GNU Affero General Public License along +-- with this program. If not, see . + +module Gundeck.Schema.V12 (migration) where + +import Cassandra.Schema +import Imports +import Text.RawString.QQ + +migration :: Migration +migration = Migration 12 "Create table `missed_notifications`" $ do + schema' + [r| + CREATE TABLE missed_notifications ( + user_id uuid, + client_id text, + PRIMARY KEY (user_id, client_id) + ); + |] diff --git a/services/gundeck/test/resources/rabbitmq-ca.pem b/services/gundeck/test/resources/rabbitmq-ca.pem new file mode 120000 index 00000000000..ca91c2c31bd --- /dev/null +++ b/services/gundeck/test/resources/rabbitmq-ca.pem @@ -0,0 +1 @@ +../../../../deploy/dockerephemeral/rabbitmq-config/certificates/ca.pem \ No newline at end of file diff --git a/services/gundeck/test/unit/MockGundeck.hs b/services/gundeck/test/unit/MockGundeck.hs index 10bc5806bb6..a747a6570a3 100644 --- a/services/gundeck/test/unit/MockGundeck.hs +++ b/services/gundeck/test/unit/MockGundeck.hs @@ -3,8 +3,6 @@ {-# LANGUAGE TemplateHaskell #-} {-# LANGUAGE TypeSynonymInstances #-} {-# OPTIONS_GHC -Wno-orphans #-} --- Disabling to stop warnings on HasCallStack -{-# OPTIONS_GHC -Wno-redundant-constraints #-} -- This file is part of the Wire Server implementation. -- @@ -204,7 +202,7 @@ genMockEnv :: (HasCallStack) => Gen MockEnv genMockEnv = do -- This function generates a 'ClientInfo' that corresponds to one of the -- four scenarios above - let genClientInfo :: (HasCallStack) => UserId -> ClientId -> Gen ClientInfo + let genClientInfo :: UserId -> ClientId -> Gen ClientInfo genClientInfo uid cid = do _ciNativeAddress <- QC.oneof @@ -251,7 +249,7 @@ genMockEnv = do validateMockEnv env & either error (const $ pure env) -- Try to shrink a 'MockEnv' by removing some users from '_meClientInfos'. -shrinkMockEnv :: (HasCallStack) => MockEnv -> [MockEnv] +shrinkMockEnv :: MockEnv -> [MockEnv] shrinkMockEnv (MockEnv cis) = MockEnv . Map.fromList <$> filter (not . null) (shrinkList (const []) (Map.toList cis)) @@ -291,7 +289,7 @@ genRecipient' env uid = do ] pure $ Recipient uid route cids -genRoute :: (HasCallStack) => Gen Route +genRoute :: Gen Route genRoute = QC.elements [minBound ..] genId :: Gen (Id a) @@ -302,7 +300,7 @@ genId = do genClientId :: Gen ClientId genClientId = ClientId <$> arbitrary -genProtoAddress :: (HasCallStack) => UserId -> ClientId -> Gen Address +genProtoAddress :: UserId -> ClientId -> Gen Address genProtoAddress _addrUser _addrClient = do _addrTransport :: Transport <- QC.elements [minBound ..] arnEpId :: Text <- arbitrary @@ -374,14 +372,14 @@ dropSomeDevices = RecipientClientsSome . unsafeList1 . take numdevs <$> QC.shuffle (toList cids) -shrinkPushes :: (HasCallStack) => [Push] -> [[Push]] +shrinkPushes :: [Push] -> [[Push]] shrinkPushes = shrinkList shrinkPush where - shrinkPush :: (HasCallStack) => Push -> [Push] + shrinkPush :: Push -> [Push] shrinkPush psh = (\rcps -> psh & pushRecipients .~ rcps) <$> shrinkRecipients (psh ^. pushRecipients) - shrinkRecipients :: (HasCallStack) => Range 1 1024 (Set Recipient) -> [Range 1 1024 (Set Recipient)] + shrinkRecipients :: Range 1 1024 (Set Recipient) -> [Range 1 1024 (Set Recipient)] shrinkRecipients = fmap unsafeRange . map Set.fromList . filter (not . null) . shrinkList shrinkRecipient . Set.toList . fromRange - shrinkRecipient :: (HasCallStack) => Recipient -> [Recipient] + shrinkRecipient :: Recipient -> [Recipient] shrinkRecipient _ = [] -- | See 'Payload'. @@ -401,7 +399,7 @@ genNotifs env = fmap uniqNotifs . listOf $ do where uniqNotifs = nubBy ((==) `on` (ntfId . fst)) -shrinkNotifs :: (HasCallStack) => [(Notification, [Presence])] -> [[(Notification, [Presence])]] +shrinkNotifs :: [(Notification, [Presence])] -> [[(Notification, [Presence])]] shrinkNotifs = shrinkList (\(notif, prcs) -> (notif,) <$> shrinkList (const []) prcs) ---------------------------------------------------------------------- @@ -430,6 +428,8 @@ instance MonadPushAll MockGundeck where -- doesn't, this is good enough for testing). mpaRunWithBudget _ _ = id -- no throttling needed as long as we don't overdo it in the tests... + mpaGetClients _ = pure mempty + mpaPublishToRabbitMq _ _ = pure () instance MonadNativeTargets MockGundeck where mntgtLogErr _ = pure () @@ -529,10 +529,7 @@ handlePushNative Push {..} = do -- | From a single 'Push', store only those notifications that real Gundeck would put into -- Cassandra. -handlePushCass :: - (HasCallStack, m ~ MockGundeck) => - Push -> - m () +handlePushCass :: Push -> MockGundeck () handlePushCass Push {..} -- Condition 1: transient pushes are not put into Cassandra. | _pushTransient = pure () @@ -547,15 +544,10 @@ handlePushCass Push {..} = do forM_ cids' $ \cid -> msCassQueue %= deliver (uid, cid) _pushPayload -mockMkNotificationId :: - (HasCallStack, m ~ MockGundeck) => - m NotificationId +mockMkNotificationId :: MockGundeck NotificationId mockMkNotificationId = Id <$> getRandom -mockListAllPresences :: - (HasCallStack, m ~ MockGundeck) => - [UserId] -> - m [[Presence]] +mockListAllPresences :: [UserId] -> MockGundeck [[Presence]] mockListAllPresences uids = asks $ fmap fakePresences . filter ((`elem` uids) . fst) . allRecipients @@ -584,12 +576,11 @@ mockBulkPush notifs = do -- | persisting notification is not needed for the tests at the moment, so we do nothing here. mockStreamAdd :: - (HasCallStack, m ~ MockGundeck) => NotificationId -> List1 NotificationTarget -> Payload -> NotificationTTL -> - m () + MockGundeck () mockStreamAdd _ (toList -> targets) pay _ = forM_ targets $ \tgt -> case tgt ^. targetClients of clients@(_ : _) -> forM_ clients $ \cid -> @@ -598,11 +589,10 @@ mockStreamAdd _ (toList -> targets) pay _ = msCassQueue %= deliver (tgt ^. targetUser, ClientId 0) pay mockPushNative :: - (HasCallStack, m ~ MockGundeck) => Notification -> Priority -> [Address] -> - m () + MockGundeck () mockPushNative (ntfPayload -> payload) _ addrs = do env <- ask forM_ addrs $ \addr -> do @@ -623,10 +613,9 @@ mockLookupAddresses uid = do pure . mapMaybe (^? ciNativeAddress . _Just . _1) $ cinfos mockBulkSend :: - (HasCallStack, m ~ MockGundeck) => URI -> BulkPushRequest -> - m (URI, Either SomeException BulkPushResponse) + MockGundeck (URI, Either SomeException BulkPushResponse) mockBulkSend uri notifs = do getstatus <- mkWSStatus let flat :: [(Notification, PushTarget)] @@ -652,7 +641,7 @@ newtype Pretty a = Pretty a instance (Aeson.ToJSON a) => Show (Pretty a) where show (Pretty a) = cs $ Aeson.encodePretty a -shrinkPretty :: (HasCallStack) => (a -> [a]) -> Pretty a -> [Pretty a] +shrinkPretty :: (a -> [a]) -> Pretty a -> [Pretty a] shrinkPretty shrnk (Pretty xs) = Pretty <$> shrnk xs sublist1Of :: (HasCallStack) => [a] -> Gen (List1 a)