diff --git a/.circleci/config.yml b/.circleci/config.yml index 1ac48a71ba..cf1989eff9 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -41,7 +41,7 @@ workflows: - dockerhubuploadlatest: filters: branches: - only: master + only: [ master, main ] commands: docker_prepare: diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml new file mode 100644 index 0000000000..a746ae6de3 --- /dev/null +++ b/.github/workflows/docs.yaml @@ -0,0 +1,31 @@ +name: Deploy the documentation + +on: + push: + branches: + - develop + + workflow_dispatch: + +jobs: + pages: + name: GitHub Pages + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Setup mdbook + uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14 + with: + mdbook-version: '0.4.9' + + - name: Build the documentation + run: mdbook build + + - name: Deploy latest documentation + uses: peaceiris/actions-gh-pages@068dc23d9710f1ba62e86896f84735d869951305 # v3.8.0 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + keep_files: true + publish_dir: ./book + destination_dir: ./develop diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index e7f3be1b4e..955beb4aa0 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -34,7 +34,13 @@ jobs: if: ${{ github.base_ref == 'develop' || contains(github.base_ref, 'release-') }} runs-on: ubuntu-latest steps: + # Note: This and the script can be simplified once we drop Buildkite. See: + # https://github.com/actions/checkout/issues/266#issuecomment-638346893 + # https://github.com/actions/checkout/issues/416 - uses: actions/checkout@v2 + with: + ref: ${{ github.event.pull_request.head.sha }} + fetch-depth: 0 - uses: actions/setup-python@v2 - run: pip install tox - name: Patch Buildkite-specific test script @@ -226,9 +232,9 @@ jobs: - name: Run SyTest run: /bootstrap.sh synapse working-directory: /src - - name: Dump results.tap + - name: Summarise results.tap if: ${{ always() }} - run: cat /logs/results.tap + run: /sytest/scripts/tap_to_gha.pl /logs/results.tap - name: Upload SyTest logs uses: actions/upload-artifact@v2 if: ${{ always() }} diff --git a/.gitignore b/.gitignore index 295a18b539..6b9257b5c9 100644 --- a/.gitignore +++ b/.gitignore @@ -46,3 +46,6 @@ __pycache__/ /docs/build/ /htmlcov /pip-wheel-metadata/ + +# docs +book/ diff --git a/CHANGES.md b/CHANGES.md index 04d260f8e5..0f9798a4d3 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,88 @@ +Synapse 1.36.0 (2021-06-15) +=========================== + +No significant changes. + + +Synapse 1.36.0rc2 (2021-06-11) +============================== + +Bugfixes +-------- + +- Fix a bug which caused presence updates to stop working some time after a restart, when using a presence writer worker. Broke in v1.33.0. ([\#10149](https://github.com/matrix-org/synapse/issues/10149)) +- Fix a bug when using federation sender worker where it would send out more presence updates than necessary, leading to high resource usage. Broke in v1.33.0. ([\#10163](https://github.com/matrix-org/synapse/issues/10163)) +- Fix a bug where Synapse could send the same presence update to a remote twice. ([\#10165](https://github.com/matrix-org/synapse/issues/10165)) + + +Synapse 1.36.0rc1 (2021-06-08) +============================== + +Features +-------- + +- Add new endpoint `/_matrix/client/r0/rooms/{roomId}/aliases` from Client-Server API r0.6.1 (previously [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432)). ([\#9224](https://github.com/matrix-org/synapse/issues/9224)) +- Improve performance of incoming federation transactions in large rooms. ([\#9953](https://github.com/matrix-org/synapse/issues/9953), [\#9973](https://github.com/matrix-org/synapse/issues/9973)) +- Rewrite logic around verifying JSON object and fetching server keys to be more performant and use less memory. ([\#10035](https://github.com/matrix-org/synapse/issues/10035)) +- Add new admin APIs for unprotecting local media from quarantine. Contributed by @dklimpel. ([\#10040](https://github.com/matrix-org/synapse/issues/10040)) +- Add new admin APIs to remove media by media ID from quarantine. Contributed by @dklimpel. ([\#10044](https://github.com/matrix-org/synapse/issues/10044)) +- Make reason and score parameters optional for reporting content. Implements [MSC2414](https://github.com/matrix-org/matrix-doc/pull/2414). Contributed by Callum Brown. ([\#10077](https://github.com/matrix-org/synapse/issues/10077)) +- Add support for routing more requests to workers. ([\#10084](https://github.com/matrix-org/synapse/issues/10084)) +- Report OpenTracing spans for database activity. ([\#10113](https://github.com/matrix-org/synapse/issues/10113), [\#10136](https://github.com/matrix-org/synapse/issues/10136), [\#10141](https://github.com/matrix-org/synapse/issues/10141)) +- Significantly reduce memory usage of joining large remote rooms. ([\#10117](https://github.com/matrix-org/synapse/issues/10117)) + + +Bugfixes +-------- + +- Fixed a bug causing replication requests to fail when receiving a lot of events via federation. ([\#10082](https://github.com/matrix-org/synapse/issues/10082)) +- Fix a bug in the `force_tracing_for_users` option introduced in Synapse v1.35 which meant that the OpenTracing spans produced were missing most tags. ([\#10092](https://github.com/matrix-org/synapse/issues/10092)) +- Fixed a bug that could cause Synapse to stop notifying application services. Contributed by Willem Mulder. ([\#10107](https://github.com/matrix-org/synapse/issues/10107)) +- Fix bug where the server would attempt to fetch the same history in the room from a remote server multiple times in parallel. ([\#10116](https://github.com/matrix-org/synapse/issues/10116)) +- Fix a bug introduced in Synapse 1.33.0 which caused replication requests to fail when receiving a lot of very large events via federation. ([\#10118](https://github.com/matrix-org/synapse/issues/10118)) +- Fix bug when using workers where pagination requests failed if a remote server returned zero events from `/backfill`. Introduced in 1.35.0. ([\#10133](https://github.com/matrix-org/synapse/issues/10133)) + + +Improved Documentation +---------------------- + +- Clarify security note regarding hosting Synapse on the same domain as other web applications. ([\#9221](https://github.com/matrix-org/synapse/issues/9221)) +- Update CAPTCHA documentation to mention turning off the verify origin feature. Contributed by @aaronraimist. ([\#10046](https://github.com/matrix-org/synapse/issues/10046)) +- Tweak wording of database recommendation in `INSTALL.md`. Contributed by @aaronraimist. ([\#10057](https://github.com/matrix-org/synapse/issues/10057)) +- Add initial infrastructure for rendering Synapse documentation with mdbook. ([\#10086](https://github.com/matrix-org/synapse/issues/10086)) +- Convert the remaining Admin API documentation files to markdown. ([\#10089](https://github.com/matrix-org/synapse/issues/10089)) +- Make a link in docs use HTTPS. Contributed by @RhnSharma. ([\#10130](https://github.com/matrix-org/synapse/issues/10130)) +- Fix broken link in Docker docs. ([\#10132](https://github.com/matrix-org/synapse/issues/10132)) + + +Deprecations and Removals +------------------------- + +- Remove the experimental `spaces_enabled` flag. The spaces features are always available now. ([\#10063](https://github.com/matrix-org/synapse/issues/10063)) + + +Internal Changes +---------------- + +- Tell CircleCI to build Docker images from `main` branch. ([\#9906](https://github.com/matrix-org/synapse/issues/9906)) +- Simplify naming convention for release branches to only include the major and minor version numbers. ([\#10013](https://github.com/matrix-org/synapse/issues/10013)) +- Add `parse_strings_from_args` for parsing an array from query parameters. ([\#10048](https://github.com/matrix-org/synapse/issues/10048), [\#10137](https://github.com/matrix-org/synapse/issues/10137)) +- Remove some dead code regarding TLS certificate handling. ([\#10054](https://github.com/matrix-org/synapse/issues/10054)) +- Remove redundant, unmaintained `convert_server_keys` script. ([\#10055](https://github.com/matrix-org/synapse/issues/10055)) +- Improve the error message printed by synctl when synapse fails to start. ([\#10059](https://github.com/matrix-org/synapse/issues/10059)) +- Fix GitHub Actions lint for newsfragments. ([\#10069](https://github.com/matrix-org/synapse/issues/10069)) +- Update opentracing to inject the right context into the carrier. ([\#10074](https://github.com/matrix-org/synapse/issues/10074)) +- Fix up `BatchingQueue` implementation. ([\#10078](https://github.com/matrix-org/synapse/issues/10078)) +- Log method and path when dropping request due to size limit. ([\#10091](https://github.com/matrix-org/synapse/issues/10091)) +- In Github Actions workflows, summarize the Sytest results in an easy-to-read format. ([\#10094](https://github.com/matrix-org/synapse/issues/10094)) +- Make `/sync` do fewer state resolutions. ([\#10102](https://github.com/matrix-org/synapse/issues/10102)) +- Add missing type hints to the admin API servlets. ([\#10105](https://github.com/matrix-org/synapse/issues/10105)) +- Improve opentracing annotations for `Notifier`. ([\#10111](https://github.com/matrix-org/synapse/issues/10111)) +- Enable Prometheus metrics for the jaeger client library. ([\#10112](https://github.com/matrix-org/synapse/issues/10112)) +- Work to improve the responsiveness of `/sync` requests. ([\#10124](https://github.com/matrix-org/synapse/issues/10124)) +- OpenTracing: use a consistent name for background processes. ([\#10135](https://github.com/matrix-org/synapse/issues/10135)) + + Synapse 1.35.1 (2021-06-03) =========================== diff --git a/INSTALL.md b/INSTALL.md index 7b40689234..3c498edd29 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -399,11 +399,9 @@ Once you have installed synapse as above, you will need to configure it. ### Using PostgreSQL -By default Synapse uses [SQLite](https://sqlite.org/) and in doing so trades performance for convenience. -SQLite is only recommended in Synapse for testing purposes or for servers with -very light workloads. - -Almost all installations should opt to use [PostgreSQL](https://www.postgresql.org). Advantages include: +By default Synapse uses an [SQLite](https://sqlite.org/) database and in doing so trades +performance for convenience. Almost all installations should opt to use [PostgreSQL](https://www.postgresql.org) +instead. Advantages include: - significant performance improvements due to the superior threading and caching model, smarter query optimiser @@ -412,6 +410,10 @@ Almost all installations should opt to use [PostgreSQL](https://www.postgresql.o For information on how to install and use PostgreSQL in Synapse, please see [docs/postgres.md](docs/postgres.md) +SQLite is only acceptable for testing purposes. SQLite should not be used in +a production server. Synapse will perform poorly when using +SQLite, especially when participating in large rooms. + ### TLS certificates The default configuration exposes a single HTTP port on the local diff --git a/MANIFEST.in b/MANIFEST.in index 09bef29705..298597db18 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -41,6 +41,7 @@ exclude mypy.ini exclude sytest-blacklist exclude test_postgresql.sh +include book.toml include pyproject.toml recursive-include changelog.d * diff --git a/README.rst b/README.rst index 1a5503572e..a14a687fd1 100644 --- a/README.rst +++ b/README.rst @@ -149,21 +149,45 @@ For details on having Synapse manage your federation TLS certificates automatically, please see ``_. -Security Note +Security note ============= -Matrix serves raw user generated data in some APIs - specifically the `content -repository endpoints `_. +Matrix serves raw, user-supplied data in some APIs -- specifically the `content +repository endpoints`_. -Whilst we have tried to mitigate against possible XSS attacks (e.g. -https://github.com/matrix-org/synapse/pull/1021) we recommend running -matrix homeservers on a dedicated domain name, to limit any malicious user generated -content served to web browsers a matrix API from being able to attack webapps hosted -on the same domain. This is particularly true of sharing a matrix webclient and -server on the same domain. +.. _content repository endpoints: https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid -See https://github.com/vector-im/riot-web/issues/1977 and -https://developer.github.com/changes/2014-04-25-user-content-security for more details. +Whilst we make a reasonable effort to mitigate against XSS attacks (for +instance, by using `CSP`_), a Matrix homeserver should not be hosted on a +domain hosting other web applications. This especially applies to sharing +the domain with Matrix web clients and other sensitive applications like +webmail. See +https://developer.github.com/changes/2014-04-25-user-content-security for more +information. + +.. _CSP: https://github.com/matrix-org/synapse/pull/1021 + +Ideally, the homeserver should not simply be on a different subdomain, but on +a completely different `registered domain`_ (also known as top-level site or +eTLD+1). This is because `some attacks`_ are still possible as long as the two +applications share the same registered domain. + +.. _registered domain: https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-03#section-2.3 + +.. _some attacks: https://en.wikipedia.org/wiki/Session_fixation#Attacks_using_cross-subdomain_cookie + +To illustrate this with an example, if your Element Web or other sensitive web +application is hosted on ``A.example1.com``, you should ideally host Synapse on +``example2.com``. Some amount of protection is offered by hosting on +``B.example1.com`` instead, so this is also acceptable in some scenarios. +However, you should *not* host your Synapse on ``A.example1.com``. + +Note that all of the above refers exclusively to the domain used in Synapse's +``public_baseurl`` setting. In particular, it has no bearing on the domain +mentioned in MXIDs hosted on that server. + +Following this advice ensures that even if an XSS is found in Synapse, the +impact to other applications will be minimal. Upgrading an existing Synapse diff --git a/book.toml b/book.toml new file mode 100644 index 0000000000..fa83d86ffc --- /dev/null +++ b/book.toml @@ -0,0 +1,39 @@ +# Documentation for possible options in this file is at +# https://rust-lang.github.io/mdBook/format/config.html +[book] +title = "Synapse" +authors = ["The Matrix.org Foundation C.I.C."] +language = "en" +multilingual = false + +# The directory that documentation files are stored in +src = "docs" + +[build] +# Prevent markdown pages from being automatically generated when they're +# linked to in SUMMARY.md +create-missing = false + +[output.html] +# The URL visitors will be directed to when they try to edit a page +edit-url-template = "https://github.com/matrix-org/synapse/edit/develop/{path}" + +# Remove the numbers that appear before each item in the sidebar, as they can +# get quite messy as we nest deeper +no-section-label = true + +# The source code URL of the repository +git-repository-url = "https://github.com/matrix-org/synapse" + +# The path that the docs are hosted on +site-url = "/synapse/" + +# Additional HTML, JS, CSS that's injected into each page of the book. +# More information available in docs/website_files/README.md +additional-css = [ + "docs/website_files/table-of-contents.css", + "docs/website_files/remove-nav-buttons.css", + "docs/website_files/indent-section-headers.css", +] +additional-js = ["docs/website_files/table-of-contents.js"] +theme = "docs/website_files/theme" \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index 084e878def..e640dadde9 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.36.0) stable; urgency=medium + + * New synapse release 1.36.0. + + -- Synapse Packaging team Tue, 15 Jun 2021 15:41:53 +0100 + matrix-synapse-py3 (1.35.1) stable; urgency=medium * New synapse release 1.35.1. diff --git a/docker/README.md b/docker/README.md index c8d3c4b3da..3f28cdada3 100644 --- a/docker/README.md +++ b/docker/README.md @@ -226,4 +226,4 @@ healthcheck: ## Using jemalloc Jemalloc is embedded in the image and will be used instead of the default allocator. -You can read about jemalloc by reading the Synapse [README](../README.md). +You can read about jemalloc by reading the Synapse [README](../README.rst). diff --git a/docs/CAPTCHA_SETUP.md b/docs/CAPTCHA_SETUP.md index 331e5d059a..fabdd7b726 100644 --- a/docs/CAPTCHA_SETUP.md +++ b/docs/CAPTCHA_SETUP.md @@ -1,31 +1,37 @@ # Overview -Captcha can be enabled for this home server. This file explains how to do that. -The captcha mechanism used is Google's ReCaptcha. This requires API keys from Google. - -## Getting keys - -Requires a site/secret key pair from: - - - -Must be a reCAPTCHA v2 key using the "I'm not a robot" Checkbox option - -## Setting ReCaptcha Keys - -The keys are a config option on the home server config. If they are not -visible, you can generate them via `--generate-config`. Set the following value: - +A captcha can be enabled on your homeserver to help prevent bots from registering +accounts. Synapse currently uses Google's reCAPTCHA service which requires API keys +from Google. + +## Getting API keys + +1. Create a new site at +1. Set the label to anything you want +1. Set the type to reCAPTCHA v2 using the "I'm not a robot" Checkbox option. +This is the only type of captcha that works with Synapse. +1. Add the public hostname for your server, as set in `public_baseurl` +in `homeserver.yaml`, to the list of authorized domains. If you have not set +`public_baseurl`, use `server_name`. +1. Agree to the terms of service and submit. +1. Copy your site key and secret key and add them to your `homeserver.yaml` +configuration file + ``` recaptcha_public_key: YOUR_SITE_KEY recaptcha_private_key: YOUR_SECRET_KEY - -In addition, you MUST enable captchas via: - + ``` +1. Enable the CAPTCHA for new registrations + ``` enable_registration_captcha: true + ``` +1. Go to the settings page for the CAPTCHA you just created +1. Uncheck the "Verify the origin of reCAPTCHA solutions" checkbox so that the +captcha can be displayed in any client. If you do not disable this option then you +must specify the domains of every client that is allowed to display the CAPTCHA. ## Configuring IP used for auth -The ReCaptcha API requires that the IP address of the user who solved the -captcha is sent. If the client is connecting through a proxy or load balancer, +The reCAPTCHA API requires that the IP address of the user who solved the +CAPTCHA is sent. If the client is connecting through a proxy or load balancer, it may be required to use the `X-Forwarded-For` (XFF) header instead of the origin IP address. This can be configured using the `x_forwarded` directive in the -listeners section of the homeserver.yaml configuration file. +listeners section of the `homeserver.yaml` configuration file. diff --git a/docs/README.md b/docs/README.md index 3c6ea48c66..e113f55d2a 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,7 +1,72 @@ # Synapse Documentation -This directory contains documentation specific to the `synapse` homeserver. +**The documentation is currently hosted [here](https://matrix-org.github.io/synapse).** +Please update any links to point to the new website instead. -All matrix-generic documentation now lives in its own project, located at [matrix-org/matrix-doc](https://github.com/matrix-org/matrix-doc) +## About -(Note: some items here may be moved to [matrix-org/matrix-doc](https://github.com/matrix-org/matrix-doc) at some point in the future.) +This directory currently holds a series of markdown files documenting how to install, use +and develop Synapse, the reference Matrix homeserver. The documentation is readable directly +from this repository, but it is recommended to instead browse through the +[website](https://matrix-org.github.io/synapse) for easier discoverability. + +## Adding to the documentation + +Most of the documentation currently exists as top-level files, as when organising them into +a structured website, these files were kept in place so that existing links would not break. +The rest of the documentation is stored in folders, such as `setup`, `usage`, and `development` +etc. **All new documentation files should be placed in structured folders.** For example: + +To create a new user-facing documentation page about a new Single Sign-On protocol named +"MyCoolProtocol", one should create a new file with a relevant name, such as "my_cool_protocol.md". +This file might fit into the documentation structure at: + +- Usage + - Configuration + - User Authentication + - Single Sign-On + - **My Cool Protocol** + +Given that, one would place the new file under +`usage/configuration/user_authentication/single_sign_on/my_cool_protocol.md`. + +Note that the structure of the documentation (and thus the left sidebar on the website) is determined +by the list in [SUMMARY.md](SUMMARY.md). The final thing to do when adding a new page is to add a new +line linking to the new documentation file: + +```markdown +- [My Cool Protocol](usage/configuration/user_authentication/single_sign_on/my_cool_protocol.md) +``` + +## Building the documentation + +The documentation is built with [mdbook](https://rust-lang.github.io/mdBook/), and the outline of the +documentation is determined by the structure of [SUMMARY.md](SUMMARY.md). + +First, [get mdbook](https://github.com/rust-lang/mdBook#installation). Then, **from the root of the repository**, +build the documentation with: + +```sh +mdbook build +``` + +The rendered contents will be outputted to a new `book/` directory at the root of the repository. You can +browse the book by opening `book/index.html` in a web browser. + +You can also have mdbook host the docs on a local webserver with hot-reload functionality via: + +```sh +mdbook serve +``` + +The URL at which the docs can be viewed at will be logged. + +## Configuration and theming + +The look and behaviour of the website is configured by the [book.toml](../book.toml) file +at the root of the repository. See +[mdbook's documentation on configuration](https://rust-lang.github.io/mdBook/format/config.html) +for available options. + +The site can be themed and additionally extended with extra UI and features. See +[website_files/README.md](website_files/README.md) for details. diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md new file mode 100644 index 0000000000..8f39ae0270 --- /dev/null +++ b/docs/SUMMARY.md @@ -0,0 +1,87 @@ +# Summary + +# Introduction +- [Welcome and Overview](welcome_and_overview.md) + +# Setup + - [Installation](setup/installation.md) + - [Using Postgres](postgres.md) + - [Configuring a Reverse Proxy](reverse_proxy.md) + - [Configuring a Turn Server](turn-howto.md) + - [Delegation](delegate.md) + +# Upgrading + - [Upgrading between Synapse Versions](upgrading/README.md) + - [Upgrading from pre-Synapse 1.0](MSC1711_certificates_FAQ.md) + +# Usage + - [Federation](federate.md) + - [Configuration](usage/configuration/README.md) + - [Homeserver Sample Config File](usage/configuration/homeserver_sample_config.md) + - [Logging Sample Config File](usage/configuration/logging_sample_config.md) + - [Structured Logging](structured_logging.md) + - [User Authentication](usage/configuration/user_authentication/README.md) + - [Single-Sign On]() + - [OpenID Connect](openid.md) + - [SAML]() + - [CAS]() + - [SSO Mapping Providers](sso_mapping_providers.md) + - [Password Auth Providers](password_auth_providers.md) + - [JSON Web Tokens](jwt.md) + - [Registration Captcha](CAPTCHA_SETUP.md) + - [Application Services](application_services.md) + - [Server Notices](server_notices.md) + - [Consent Tracking](consent_tracking.md) + - [URL Previews](url_previews.md) + - [User Directory](user_directory.md) + - [Message Retention Policies](message_retention_policies.md) + - [Pluggable Modules]() + - [Third Party Rules]() + - [Spam Checker](spam_checker.md) + - [Presence Router](presence_router_module.md) + - [Media Storage Providers]() + - [Workers](workers.md) + - [Using `synctl` with Workers](synctl_workers.md) + - [Systemd](systemd-with-workers/README.md) + - [Administration](usage/administration/README.md) + - [Admin API](usage/administration/admin_api/README.md) + - [Account Validity](admin_api/account_validity.md) + - [Delete Group](admin_api/delete_group.md) + - [Event Reports](admin_api/event_reports.md) + - [Media](admin_api/media_admin_api.md) + - [Purge History](admin_api/purge_history_api.md) + - [Purge Rooms](admin_api/purge_room.md) + - [Register Users](admin_api/register_api.md) + - [Manipulate Room Membership](admin_api/room_membership.md) + - [Rooms](admin_api/rooms.md) + - [Server Notices](admin_api/server_notices.md) + - [Shutdown Room](admin_api/shutdown_room.md) + - [Statistics](admin_api/statistics.md) + - [Users](admin_api/user_admin_api.md) + - [Server Version](admin_api/version_api.md) + - [Manhole](manhole.md) + - [Monitoring](metrics-howto.md) + - [Scripts]() + +# Development + - [Contributing Guide](development/contributing_guide.md) + - [Code Style](code_style.md) + - [Git Usage](dev/git.md) + - [Testing]() + - [OpenTracing](opentracing.md) + - [Synapse Architecture]() + - [Log Contexts](log_contexts.md) + - [Replication](replication.md) + - [TCP Replication](tcp_replication.md) + - [Internal Documentation](development/internal_documentation/README.md) + - [Single Sign-On]() + - [SAML](dev/saml.md) + - [CAS](dev/cas.md) + - [State Resolution]() + - [The Auth Chain Difference Algorithm](auth_chain_difference_algorithm.md) + - [Media Repository](media_repository.md) + - [Room and User Statistics](room_and_user_statistics.md) + - [Scripts]() + +# Other + - [Dependency Deprecation Policy](deprecation_policy.md) \ No newline at end of file diff --git a/docs/admin_api/README.rst b/docs/admin_api/README.rst index 9587bee0ce..37cee87d32 100644 --- a/docs/admin_api/README.rst +++ b/docs/admin_api/README.rst @@ -1,28 +1,14 @@ Admin APIs ========== -This directory includes documentation for the various synapse specific admin -APIs available. - -Authenticating as a server admin --------------------------------- - -Many of the API calls in the admin api will require an `access_token` for a -server admin. (Note that a server admin is distinct from a room admin.) - -A user can be marked as a server admin by updating the database directly, e.g.: - -.. code-block:: sql +**Note**: The latest documentation can be viewed `here `_. +See `docs/README.md <../docs/README.md>`_ for more information. - UPDATE users SET admin = 1 WHERE name = '@foo:bar.com'; +**Please update links to point to the website instead.** Existing files in this directory +are preserved to maintain historical links, but may be moved in the future. -A new server admin user can also be created using the -``register_new_matrix_user`` script. - -Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings. - -Once you have your `access_token`, to include it in a request, the best option is to add the token to a request header: - -``curl --header "Authorization: Bearer " `` +This directory includes documentation for the various synapse specific admin +APIs available. Updates to the existing Admin API documentation should still +be made to these files, but any new documentation files should instead be placed under +`docs/usage/administration/admin_api <../docs/usage/administration/admin_api>`_. -Fore more details, please refer to the complete `matrix spec documentation `_. diff --git a/docs/admin_api/account_validity.md b/docs/admin_api/account_validity.md new file mode 100644 index 0000000000..b74b5d0c1a --- /dev/null +++ b/docs/admin_api/account_validity.md @@ -0,0 +1,42 @@ +# Account validity API + +This API allows a server administrator to manage the validity of an account. To +use it, you must enable the account validity feature (under +`account_validity`) in Synapse's configuration. + +## Renew account + +This API extends the validity of an account by as much time as configured in the +`period` parameter from the `account_validity` configuration. + +The API is: + +``` +POST /_synapse/admin/v1/account_validity/validity +``` + +with the following body: + +```json +{ + "user_id": "", + "expiration_ts": 0, + "enable_renewal_emails": true +} +``` + + +`expiration_ts` is an optional parameter and overrides the expiration date, +which otherwise defaults to now + validity period. + +`enable_renewal_emails` is also an optional parameter and enables/disables +sending renewal emails to the user. Defaults to true. + +The API returns with the new expiration date for this account, as a timestamp in +milliseconds since epoch: + +```json +{ + "expiration_ts": 0 +} +``` diff --git a/docs/admin_api/account_validity.rst b/docs/admin_api/account_validity.rst deleted file mode 100644 index 7559de4c57..0000000000 --- a/docs/admin_api/account_validity.rst +++ /dev/null @@ -1,42 +0,0 @@ -Account validity API -==================== - -This API allows a server administrator to manage the validity of an account. To -use it, you must enable the account validity feature (under -``account_validity``) in Synapse's configuration. - -Renew account -------------- - -This API extends the validity of an account by as much time as configured in the -``period`` parameter from the ``account_validity`` configuration. - -The API is:: - - POST /_synapse/admin/v1/account_validity/validity - -with the following body: - -.. code:: json - - { - "user_id": "", - "expiration_ts": 0, - "enable_renewal_emails": true - } - - -``expiration_ts`` is an optional parameter and overrides the expiration date, -which otherwise defaults to now + validity period. - -``enable_renewal_emails`` is also an optional parameter and enables/disables -sending renewal emails to the user. Defaults to true. - -The API returns with the new expiration date for this account, as a timestamp in -milliseconds since epoch: - -.. code:: json - - { - "expiration_ts": 0 - } diff --git a/docs/admin_api/delete_group.md b/docs/admin_api/delete_group.md index c061678e75..9c335ff759 100644 --- a/docs/admin_api/delete_group.md +++ b/docs/admin_api/delete_group.md @@ -11,4 +11,4 @@ POST /_synapse/admin/v1/delete_group/ ``` To use it, you will need to authenticate by providing an `access_token` for a -server admin: see [README.rst](README.rst). +server admin: see [Admin API](../../usage/administration/admin_api). diff --git a/docs/admin_api/event_reports.md b/docs/admin_api/event_reports.md index 0159098138..186139185e 100644 --- a/docs/admin_api/event_reports.md +++ b/docs/admin_api/event_reports.md @@ -7,7 +7,7 @@ The api is: GET /_synapse/admin/v1/event_reports?from=0&limit=10 ``` To use it, you will need to authenticate by providing an `access_token` for a -server admin: see [README.rst](README.rst). +server admin: see [Admin API](../../usage/administration/admin_api). It returns a JSON body like the following: @@ -75,9 +75,9 @@ The following fields are returned in the JSON response body: * `name`: string - The name of the room. * `event_id`: string - The ID of the reported event. * `user_id`: string - This is the user who reported the event and wrote the reason. -* `reason`: string - Comment made by the `user_id` in this report. May be blank. +* `reason`: string - Comment made by the `user_id` in this report. May be blank or `null`. * `score`: integer - Content is reported based upon a negative score, where -100 is - "most offensive" and 0 is "inoffensive". + "most offensive" and 0 is "inoffensive". May be `null`. * `sender`: string - This is the ID of the user who sent the original message/event that was reported. * `canonical_alias`: string - The canonical alias of the room. `null` if the room does not @@ -95,7 +95,7 @@ The api is: GET /_synapse/admin/v1/event_reports/ ``` To use it, you will need to authenticate by providing an `access_token` for a -server admin: see [README.rst](README.rst). +server admin: see [Admin API](../../usage/administration/admin_api). It returns a JSON body like the following: diff --git a/docs/admin_api/media_admin_api.md b/docs/admin_api/media_admin_api.md index 9dbec68c19..9ab5269881 100644 --- a/docs/admin_api/media_admin_api.md +++ b/docs/admin_api/media_admin_api.md @@ -4,9 +4,11 @@ * [List all media uploaded by a user](#list-all-media-uploaded-by-a-user) - [Quarantine media](#quarantine-media) * [Quarantining media by ID](#quarantining-media-by-id) + * [Remove media from quarantine by ID](#remove-media-from-quarantine-by-id) * [Quarantining media in a room](#quarantining-media-in-a-room) * [Quarantining all media of a user](#quarantining-all-media-of-a-user) * [Protecting media from being quarantined](#protecting-media-from-being-quarantined) + * [Unprotecting media from being quarantined](#unprotecting-media-from-being-quarantined) - [Delete local media](#delete-local-media) * [Delete a specific local media](#delete-a-specific-local-media) * [Delete local media by date or size](#delete-local-media-by-date-or-size) @@ -26,7 +28,7 @@ The API is: GET /_synapse/admin/v1/room//media ``` To use it, you will need to authenticate by providing an `access_token` for a -server admin: see [README.rst](README.rst). +server admin: see [Admin API](../../usage/administration/admin_api). The API returns a JSON body like the following: ```json @@ -76,6 +78,27 @@ Response: {} ``` +## Remove media from quarantine by ID + +This API removes a single piece of local or remote media from quarantine. + +Request: + +``` +POST /_synapse/admin/v1/media/unquarantine// + +{} +``` + +Where `server_name` is in the form of `example.org`, and `media_id` is in the +form of `abcdefg12345...`. + +Response: + +```json +{} +``` + ## Quarantining media in a room This API quarantines all local and remote media in a room. @@ -159,6 +182,26 @@ Response: {} ``` +## Unprotecting media from being quarantined + +This API reverts the protection of a media. + +Request: + +``` +POST /_synapse/admin/v1/media/unprotect/ + +{} +``` + +Where `media_id` is in the form of `abcdefg12345...`. + +Response: + +```json +{} +``` + # Delete local media This API deletes the *local* media from the disk of your own server. This includes any local thumbnails and copies of media downloaded from @@ -268,7 +311,7 @@ The following fields are returned in the JSON response body: * `deleted`: integer - The number of media items successfully deleted To use it, you will need to authenticate by providing an `access_token` for a -server admin: see [README.rst](README.rst). +server admin: see [Admin API](../../usage/administration/admin_api). If the user re-requests purged remote media, synapse will re-request the media from the originating server. diff --git a/docs/admin_api/purge_history_api.rst b/docs/admin_api/purge_history_api.md similarity index 56% rename from docs/admin_api/purge_history_api.rst rename to docs/admin_api/purge_history_api.md index 92cd05f2a0..25decc3e61 100644 --- a/docs/admin_api/purge_history_api.rst +++ b/docs/admin_api/purge_history_api.md @@ -1,5 +1,4 @@ -Purge History API -================= +# Purge History API The purge history API allows server admins to purge historic events from their database, reclaiming disk space. @@ -13,10 +12,12 @@ delete the last message in a room. The API is: -``POST /_synapse/admin/v1/purge_history/[/]`` +``` +POST /_synapse/admin/v1/purge_history/[/] +``` -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](../../usage/administration/admin_api) By default, events sent by local users are not deleted, as they may represent the only copies of this content in existence. (Events sent by remote users are @@ -24,54 +25,54 @@ deleted.) Room state data (such as joins, leaves, topic) is always preserved. -To delete local message events as well, set ``delete_local_events`` in the body: +To delete local message events as well, set `delete_local_events` in the body: -.. code:: json - - { - "delete_local_events": true - } +``` +{ + "delete_local_events": true +} +``` The caller must specify the point in the room to purge up to. This can be specified by including an event_id in the URI, or by setting a -``purge_up_to_event_id`` or ``purge_up_to_ts`` in the request body. If an event +`purge_up_to_event_id` or `purge_up_to_ts` in the request body. If an event id is given, that event (and others at the same graph depth) will be retained. -If ``purge_up_to_ts`` is given, it should be a timestamp since the unix epoch, +If `purge_up_to_ts` is given, it should be a timestamp since the unix epoch, in milliseconds. The API starts the purge running, and returns immediately with a JSON body with a purge id: -.. code:: json - - { - "purge_id": "" - } +```json +{ + "purge_id": "" +} +``` -Purge status query ------------------- +## Purge status query It is possible to poll for updates on recent purges with a second API; -``GET /_synapse/admin/v1/purge_history_status/`` +``` +GET /_synapse/admin/v1/purge_history_status/ +``` -Again, you will need to authenticate by providing an ``access_token`` for a +Again, you will need to authenticate by providing an `access_token` for a server admin. This API returns a JSON body like the following: -.. code:: json - - { - "status": "active" - } +```json +{ + "status": "active" +} +``` -The status will be one of ``active``, ``complete``, or ``failed``. +The status will be one of `active`, `complete`, or `failed`. -Reclaim disk space (Postgres) ------------------------------ +## Reclaim disk space (Postgres) To reclaim the disk space and return it to the operating system, you need to run `VACUUM FULL;` on the database. -https://www.postgresql.org/docs/current/sql-vacuum.html + diff --git a/docs/admin_api/register_api.md b/docs/admin_api/register_api.md new file mode 100644 index 0000000000..c346090bb1 --- /dev/null +++ b/docs/admin_api/register_api.md @@ -0,0 +1,73 @@ +# Shared-Secret Registration + +This API allows for the creation of users in an administrative and +non-interactive way. This is generally used for bootstrapping a Synapse +instance with administrator accounts. + +To authenticate yourself to the server, you will need both the shared secret +(`registration_shared_secret` in the homeserver configuration), and a +one-time nonce. If the registration shared secret is not configured, this API +is not enabled. + +To fetch the nonce, you need to request one from the API: + +``` +> GET /_synapse/admin/v1/register + +< {"nonce": "thisisanonce"} +``` + +Once you have the nonce, you can make a `POST` to the same URL with a JSON +body containing the nonce, username, password, whether they are an admin +(optional, False by default), and a HMAC digest of the content. Also you can +set the displayname (optional, `username` by default). + +As an example: + +``` +> POST /_synapse/admin/v1/register +> { + "nonce": "thisisanonce", + "username": "pepper_roni", + "displayname": "Pepper Roni", + "password": "pizza", + "admin": true, + "mac": "mac_digest_here" + } + +< { + "access_token": "token_here", + "user_id": "@pepper_roni:localhost", + "home_server": "test", + "device_id": "device_id_here" + } +``` + +The MAC is the hex digest output of the HMAC-SHA1 algorithm, with the key being +the shared secret and the content being the nonce, user, password, either the +string "admin" or "notadmin", and optionally the user_type +each separated by NULs. For an example of generation in Python: + +```python +import hmac, hashlib + +def generate_mac(nonce, user, password, admin=False, user_type=None): + + mac = hmac.new( + key=shared_secret, + digestmod=hashlib.sha1, + ) + + mac.update(nonce.encode('utf8')) + mac.update(b"\x00") + mac.update(user.encode('utf8')) + mac.update(b"\x00") + mac.update(password.encode('utf8')) + mac.update(b"\x00") + mac.update(b"admin" if admin else b"notadmin") + if user_type: + mac.update(b"\x00") + mac.update(user_type.encode('utf8')) + + return mac.hexdigest() +``` \ No newline at end of file diff --git a/docs/admin_api/register_api.rst b/docs/admin_api/register_api.rst deleted file mode 100644 index c3057b204b..0000000000 --- a/docs/admin_api/register_api.rst +++ /dev/null @@ -1,68 +0,0 @@ -Shared-Secret Registration -========================== - -This API allows for the creation of users in an administrative and -non-interactive way. This is generally used for bootstrapping a Synapse -instance with administrator accounts. - -To authenticate yourself to the server, you will need both the shared secret -(``registration_shared_secret`` in the homeserver configuration), and a -one-time nonce. If the registration shared secret is not configured, this API -is not enabled. - -To fetch the nonce, you need to request one from the API:: - - > GET /_synapse/admin/v1/register - - < {"nonce": "thisisanonce"} - -Once you have the nonce, you can make a ``POST`` to the same URL with a JSON -body containing the nonce, username, password, whether they are an admin -(optional, False by default), and a HMAC digest of the content. Also you can -set the displayname (optional, ``username`` by default). - -As an example:: - - > POST /_synapse/admin/v1/register - > { - "nonce": "thisisanonce", - "username": "pepper_roni", - "displayname": "Pepper Roni", - "password": "pizza", - "admin": true, - "mac": "mac_digest_here" - } - - < { - "access_token": "token_here", - "user_id": "@pepper_roni:localhost", - "home_server": "test", - "device_id": "device_id_here" - } - -The MAC is the hex digest output of the HMAC-SHA1 algorithm, with the key being -the shared secret and the content being the nonce, user, password, either the -string "admin" or "notadmin", and optionally the user_type -each separated by NULs. For an example of generation in Python:: - - import hmac, hashlib - - def generate_mac(nonce, user, password, admin=False, user_type=None): - - mac = hmac.new( - key=shared_secret, - digestmod=hashlib.sha1, - ) - - mac.update(nonce.encode('utf8')) - mac.update(b"\x00") - mac.update(user.encode('utf8')) - mac.update(b"\x00") - mac.update(password.encode('utf8')) - mac.update(b"\x00") - mac.update(b"admin" if admin else b"notadmin") - if user_type: - mac.update(b"\x00") - mac.update(user_type.encode('utf8')) - - return mac.hexdigest() diff --git a/docs/admin_api/room_membership.md b/docs/admin_api/room_membership.md index b6746ff5e4..ed40366099 100644 --- a/docs/admin_api/room_membership.md +++ b/docs/admin_api/room_membership.md @@ -24,7 +24,7 @@ POST /_synapse/admin/v1/join/ ``` To use it, you will need to authenticate by providing an `access_token` for a -server admin: see [README.rst](README.rst). +server admin: see [Admin API](../../usage/administration/admin_api). Response: diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md index 5721210fee..dc007fa00e 100644 --- a/docs/admin_api/rooms.md +++ b/docs/admin_api/rooms.md @@ -443,7 +443,7 @@ with a body of: ``` To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see [README.rst](README.rst). +server admin: see [Admin API](../../usage/administration/admin_api). A response body like the following is returned: diff --git a/docs/admin_api/statistics.md b/docs/admin_api/statistics.md index d398a120fb..d93d52a3ac 100644 --- a/docs/admin_api/statistics.md +++ b/docs/admin_api/statistics.md @@ -10,7 +10,7 @@ GET /_synapse/admin/v1/statistics/users/media ``` To use it, you will need to authenticate by providing an `access_token` -for a server admin: see [README.rst](README.rst). +for a server admin: see [Admin API](../../usage/administration/admin_api). A response body like the following is returned: diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md new file mode 100644 index 0000000000..c835e4a0cd --- /dev/null +++ b/docs/admin_api/user_admin_api.md @@ -0,0 +1,1001 @@ +# User Admin API + +## Query User Account + +This API returns information about a specific user account. + +The api is: + +``` +GET /_synapse/admin/v2/users/ +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](../../usage/administration/admin_api) + +It returns a JSON body like the following: + +```json +{ + "displayname": "User", + "threepids": [ + { + "medium": "email", + "address": "" + }, + { + "medium": "email", + "address": "" + } + ], + "avatar_url": "", + "admin": 0, + "deactivated": 0, + "shadow_banned": 0, + "password_hash": "$2b$12$p9B4GkqYdRTPGD", + "creation_ts": 1560432506, + "appservice_id": null, + "consent_server_notice_sent": null, + "consent_version": null +} +``` + +URL parameters: + +- `user_id`: fully-qualified user id: for example, `@user:server.com`. + +## Create or modify Account + +This API allows an administrator to create or modify a user account with a +specific `user_id`. + +This api is: + +``` +PUT /_synapse/admin/v2/users/ +``` + +with a body of: + +```json +{ + "password": "user_password", + "displayname": "User", + "threepids": [ + { + "medium": "email", + "address": "" + }, + { + "medium": "email", + "address": "" + } + ], + "avatar_url": "", + "admin": false, + "deactivated": false +} +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](../../usage/administration/admin_api) + +URL parameters: + +- `user_id`: fully-qualified user id: for example, `@user:server.com`. + +Body parameters: + +- `password`, optional. If provided, the user's password is updated and all + devices are logged out. + +- `displayname`, optional, defaults to the value of `user_id`. + +- `threepids`, optional, allows setting the third-party IDs (email, msisdn) + belonging to a user. + +- `avatar_url`, optional, must be a + [MXC URI](https://matrix.org/docs/spec/client_server/r0.6.0#matrix-content-mxc-uris). + +- `admin`, optional, defaults to `false`. + +- `deactivated`, optional. If unspecified, deactivation state will be left + unchanged on existing accounts and set to `false` for new accounts. + A user cannot be erased by deactivating with this API. For details on + deactivating users see [Deactivate Account](#deactivate-account). + +If the user already exists then optional parameters default to the current value. + +In order to re-activate an account `deactivated` must be set to `false`. If +users do not login via single-sign-on, a new `password` must be provided. + +## List Accounts + +This API returns all local user accounts. +By default, the response is ordered by ascending user ID. + +``` +GET /_synapse/admin/v2/users?from=0&limit=10&guests=false +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](../../usage/administration/admin_api) + +A response body like the following is returned: + +```json +{ + "users": [ + { + "name": "", + "is_guest": 0, + "admin": 0, + "user_type": null, + "deactivated": 0, + "shadow_banned": 0, + "displayname": "", + "avatar_url": null + }, { + "name": "", + "is_guest": 0, + "admin": 1, + "user_type": null, + "deactivated": 0, + "shadow_banned": 0, + "displayname": "", + "avatar_url": "" + } + ], + "next_token": "100", + "total": 200 +} +``` + +To paginate, check for `next_token` and if present, call the endpoint again +with `from` set to the value of `next_token`. This will return a new page. + +If the endpoint does not return a `next_token` then there are no more users +to paginate through. + +**Parameters** + +The following parameters should be set in the URL: + +- `user_id` - Is optional and filters to only return users with user IDs + that contain this value. This parameter is ignored when using the `name` parameter. +- `name` - Is optional and filters to only return users with user ID localparts + **or** displaynames that contain this value. +- `guests` - string representing a bool - Is optional and if `false` will **exclude** guest users. + Defaults to `true` to include guest users. +- `deactivated` - string representing a bool - Is optional and if `true` will **include** deactivated users. + Defaults to `false` to exclude deactivated users. +- `limit` - string representing a positive integer - Is optional but is used for pagination, + denoting the maximum number of items to return in this call. Defaults to `100`. +- `from` - string representing a positive integer - Is optional but used for pagination, + denoting the offset in the returned results. This should be treated as an opaque value and + not explicitly set to anything other than the return value of `next_token` from a previous call. + Defaults to `0`. +- `order_by` - The method by which to sort the returned list of users. + If the ordered field has duplicates, the second order is always by ascending `name`, + which guarantees a stable ordering. Valid values are: + + - `name` - Users are ordered alphabetically by `name`. This is the default. + - `is_guest` - Users are ordered by `is_guest` status. + - `admin` - Users are ordered by `admin` status. + - `user_type` - Users are ordered alphabetically by `user_type`. + - `deactivated` - Users are ordered by `deactivated` status. + - `shadow_banned` - Users are ordered by `shadow_banned` status. + - `displayname` - Users are ordered alphabetically by `displayname`. + - `avatar_url` - Users are ordered alphabetically by avatar URL. + +- `dir` - Direction of media order. Either `f` for forwards or `b` for backwards. + Setting this value to `b` will reverse the above sort order. Defaults to `f`. + +Caution. The database only has indexes on the columns `name` and `created_ts`. +This means that if a different sort order is used (`is_guest`, `admin`, +`user_type`, `deactivated`, `shadow_banned`, `avatar_url` or `displayname`), +this can cause a large load on the database, especially for large environments. + +**Response** + +The following fields are returned in the JSON response body: + +- `users` - An array of objects, each containing information about an user. + User objects contain the following fields: + + - `name` - string - Fully-qualified user ID (ex. `@user:server.com`). + - `is_guest` - bool - Status if that user is a guest account. + - `admin` - bool - Status if that user is a server administrator. + - `user_type` - string - Type of the user. Normal users are type `None`. + This allows user type specific behaviour. There are also types `support` and `bot`. + - `deactivated` - bool - Status if that user has been marked as deactivated. + - `shadow_banned` - bool - Status if that user has been marked as shadow banned. + - `displayname` - string - The user's display name if they have set one. + - `avatar_url` - string - The user's avatar URL if they have set one. + +- `next_token`: string representing a positive integer - Indication for pagination. See above. +- `total` - integer - Total number of media. + + +## Query current sessions for a user + +This API returns information about the active sessions for a specific user. + +The endpoints are: + +``` +GET /_synapse/admin/v1/whois/ +``` + +and: + +``` +GET /_matrix/client/r0/admin/whois/ +``` + +See also: [Client Server +API Whois](https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid). + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](../../usage/administration/admin_api) + +It returns a JSON body like the following: + +```json +{ + "user_id": "", + "devices": { + "": { + "sessions": [ + { + "connections": [ + { + "ip": "1.2.3.4", + "last_seen": 1417222374433, + "user_agent": "Mozilla/5.0 ..." + }, + { + "ip": "1.2.3.10", + "last_seen": 1417222374500, + "user_agent": "Dalvik/2.1.0 ..." + } + ] + } + ] + } + } +} +``` + +`last_seen` is measured in milliseconds since the Unix epoch. + +## Deactivate Account + +This API deactivates an account. It removes active access tokens, resets the +password, and deletes third-party IDs (to prevent the user requesting a +password reset). + +It can also mark the user as GDPR-erased. This means messages sent by the +user will still be visible by anyone that was in the room when these messages +were sent, but hidden from users joining the room afterwards. + +The api is: + +``` +POST /_synapse/admin/v1/deactivate/ +``` + +with a body of: + +```json +{ + "erase": true +} +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](../../usage/administration/admin_api) + +The erase parameter is optional and defaults to `false`. +An empty body may be passed for backwards compatibility. + +The following actions are performed when deactivating an user: + +- Try to unpind 3PIDs from the identity server +- Remove all 3PIDs from the homeserver +- Delete all devices and E2EE keys +- Delete all access tokens +- Delete the password hash +- Removal from all rooms the user is a member of +- Remove the user from the user directory +- Reject all pending invites +- Remove all account validity information related to the user + +The following additional actions are performed during deactivation if `erase` +is set to `true`: + +- Remove the user's display name +- Remove the user's avatar URL +- Mark the user as erased + + +## Reset password + +Changes the password of another user. This will automatically log the user out of all their devices. + +The api is: + +``` +POST /_synapse/admin/v1/reset_password/ +``` + +with a body of: + +```json +{ + "new_password": "", + "logout_devices": true +} +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](../../usage/administration/admin_api) + +The parameter `new_password` is required. +The parameter `logout_devices` is optional and defaults to `true`. + + +## Get whether a user is a server administrator or not + +The api is: + +``` +GET /_synapse/admin/v1/users//admin +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](../../usage/administration/admin_api) + +A response body like the following is returned: + +```json +{ + "admin": true +} +``` + + +## Change whether a user is a server administrator or not + +Note that you cannot demote yourself. + +The api is: + +``` +PUT /_synapse/admin/v1/users//admin +``` + +with a body of: + +```json +{ + "admin": true +} +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](../../usage/administration/admin_api) + + +## List room memberships of a user + +Gets a list of all `room_id` that a specific `user_id` is member. + +The API is: + +``` +GET /_synapse/admin/v1/users//joined_rooms +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](../../usage/administration/admin_api) + +A response body like the following is returned: + +```json + { + "joined_rooms": [ + "!DuGcnbhHGaSZQoNQR:matrix.org", + "!ZtSaPCawyWtxfWiIy:matrix.org" + ], + "total": 2 + } +``` + +The server returns the list of rooms of which the user and the server +are member. If the user is local, all the rooms of which the user is +member are returned. + +**Parameters** + +The following parameters should be set in the URL: + +- `user_id` - fully qualified: for example, `@user:server.com`. + +**Response** + +The following fields are returned in the JSON response body: + +- `joined_rooms` - An array of `room_id`. +- `total` - Number of rooms. + + +## List media of a user +Gets a list of all local media that a specific `user_id` has created. +By default, the response is ordered by descending creation date and ascending media ID. +The newest media is on top. You can change the order with parameters +`order_by` and `dir`. + +The API is: + +``` +GET /_synapse/admin/v1/users//media +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](../../usage/administration/admin_api) + +A response body like the following is returned: + +```json +{ + "media": [ + { + "created_ts": 100400, + "last_access_ts": null, + "media_id": "qXhyRzulkwLsNHTbpHreuEgo", + "media_length": 67, + "media_type": "image/png", + "quarantined_by": null, + "safe_from_quarantine": false, + "upload_name": "test1.png" + }, + { + "created_ts": 200400, + "last_access_ts": null, + "media_id": "FHfiSnzoINDatrXHQIXBtahw", + "media_length": 67, + "media_type": "image/png", + "quarantined_by": null, + "safe_from_quarantine": false, + "upload_name": "test2.png" + } + ], + "next_token": 3, + "total": 2 +} +``` + +To paginate, check for `next_token` and if present, call the endpoint again +with `from` set to the value of `next_token`. This will return a new page. + +If the endpoint does not return a `next_token` then there are no more +reports to paginate through. + +**Parameters** + +The following parameters should be set in the URL: + +- `user_id` - string - fully qualified: for example, `@user:server.com`. +- `limit`: string representing a positive integer - Is optional but is used for pagination, + denoting the maximum number of items to return in this call. Defaults to `100`. +- `from`: string representing a positive integer - Is optional but used for pagination, + denoting the offset in the returned results. This should be treated as an opaque value and + not explicitly set to anything other than the return value of `next_token` from a previous call. + Defaults to `0`. +- `order_by` - The method by which to sort the returned list of media. + If the ordered field has duplicates, the second order is always by ascending `media_id`, + which guarantees a stable ordering. Valid values are: + + - `media_id` - Media are ordered alphabetically by `media_id`. + - `upload_name` - Media are ordered alphabetically by name the media was uploaded with. + - `created_ts` - Media are ordered by when the content was uploaded in ms. + Smallest to largest. This is the default. + - `last_access_ts` - Media are ordered by when the content was last accessed in ms. + Smallest to largest. + - `media_length` - Media are ordered by length of the media in bytes. + Smallest to largest. + - `media_type` - Media are ordered alphabetically by MIME-type. + - `quarantined_by` - Media are ordered alphabetically by the user ID that + initiated the quarantine request for this media. + - `safe_from_quarantine` - Media are ordered by the status if this media is safe + from quarantining. + +- `dir` - Direction of media order. Either `f` for forwards or `b` for backwards. + Setting this value to `b` will reverse the above sort order. Defaults to `f`. + +If neither `order_by` nor `dir` is set, the default order is newest media on top +(corresponds to `order_by` = `created_ts` and `dir` = `b`). + +Caution. The database only has indexes on the columns `media_id`, +`user_id` and `created_ts`. This means that if a different sort order is used +(`upload_name`, `last_access_ts`, `media_length`, `media_type`, +`quarantined_by` or `safe_from_quarantine`), this can cause a large load on the +database, especially for large environments. + +**Response** + +The following fields are returned in the JSON response body: + +- `media` - An array of objects, each containing information about a media. + Media objects contain the following fields: + + - `created_ts` - integer - Timestamp when the content was uploaded in ms. + - `last_access_ts` - integer - Timestamp when the content was last accessed in ms. + - `media_id` - string - The id used to refer to the media. + - `media_length` - integer - Length of the media in bytes. + - `media_type` - string - The MIME-type of the media. + - `quarantined_by` - string - The user ID that initiated the quarantine request + for this media. + + - `safe_from_quarantine` - bool - Status if this media is safe from quarantining. + - `upload_name` - string - The name the media was uploaded with. + +- `next_token`: integer - Indication for pagination. See above. +- `total` - integer - Total number of media. + +## Login as a user + +Get an access token that can be used to authenticate as that user. Useful for +when admins wish to do actions on behalf of a user. + +The API is: + +``` +POST /_synapse/admin/v1/users//login +{} +``` + +An optional `valid_until_ms` field can be specified in the request body as an +integer timestamp that specifies when the token should expire. By default tokens +do not expire. + +A response body like the following is returned: + +```json +{ + "access_token": "" +} +``` + +This API does *not* generate a new device for the user, and so will not appear +their `/devices` list, and in general the target user should not be able to +tell they have been logged in as. + +To expire the token call the standard `/logout` API with the token. + +Note: The token will expire if the *admin* user calls `/logout/all` from any +of their devices, but the token will *not* expire if the target user does the +same. + + +## User devices + +### List all devices +Gets information about all devices for a specific `user_id`. + +The API is: + +``` +GET /_synapse/admin/v2/users//devices +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](../../usage/administration/admin_api) + +A response body like the following is returned: + +```json +{ + "devices": [ + { + "device_id": "QBUAZIFURK", + "display_name": "android", + "last_seen_ip": "1.2.3.4", + "last_seen_ts": 1474491775024, + "user_id": "" + }, + { + "device_id": "AUIECTSRND", + "display_name": "ios", + "last_seen_ip": "1.2.3.5", + "last_seen_ts": 1474491775025, + "user_id": "" + } + ], + "total": 2 +} +``` + +**Parameters** + +The following parameters should be set in the URL: + +- `user_id` - fully qualified: for example, `@user:server.com`. + +**Response** + +The following fields are returned in the JSON response body: + +- `devices` - An array of objects, each containing information about a device. + Device objects contain the following fields: + + - `device_id` - Identifier of device. + - `display_name` - Display name set by the user for this device. + Absent if no name has been set. + - `last_seen_ip` - The IP address where this device was last seen. + (May be a few minutes out of date, for efficiency reasons). + - `last_seen_ts` - The timestamp (in milliseconds since the unix epoch) when this + devices was last seen. (May be a few minutes out of date, for efficiency reasons). + - `user_id` - Owner of device. + +- `total` - Total number of user's devices. + +### Delete multiple devices +Deletes the given devices for a specific `user_id`, and invalidates +any access token associated with them. + +The API is: + +``` +POST /_synapse/admin/v2/users//delete_devices + +{ + "devices": [ + "QBUAZIFURK", + "AUIECTSRND" + ], +} +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](../../usage/administration/admin_api) + +An empty JSON dict is returned. + +**Parameters** + +The following parameters should be set in the URL: + +- `user_id` - fully qualified: for example, `@user:server.com`. + +The following fields are required in the JSON request body: + +- `devices` - The list of device IDs to delete. + +### Show a device +Gets information on a single device, by `device_id` for a specific `user_id`. + +The API is: + +``` +GET /_synapse/admin/v2/users//devices/ +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](../../usage/administration/admin_api) + +A response body like the following is returned: + +```json +{ + "device_id": "", + "display_name": "android", + "last_seen_ip": "1.2.3.4", + "last_seen_ts": 1474491775024, + "user_id": "" +} +``` + +**Parameters** + +The following parameters should be set in the URL: + +- `user_id` - fully qualified: for example, `@user:server.com`. +- `device_id` - The device to retrieve. + +**Response** + +The following fields are returned in the JSON response body: + +- `device_id` - Identifier of device. +- `display_name` - Display name set by the user for this device. + Absent if no name has been set. +- `last_seen_ip` - The IP address where this device was last seen. + (May be a few minutes out of date, for efficiency reasons). +- `last_seen_ts` - The timestamp (in milliseconds since the unix epoch) when this + devices was last seen. (May be a few minutes out of date, for efficiency reasons). +- `user_id` - Owner of device. + +### Update a device +Updates the metadata on the given `device_id` for a specific `user_id`. + +The API is: + +``` +PUT /_synapse/admin/v2/users//devices/ + +{ + "display_name": "My other phone" +} +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](../../usage/administration/admin_api) + +An empty JSON dict is returned. + +**Parameters** + +The following parameters should be set in the URL: + +- `user_id` - fully qualified: for example, `@user:server.com`. +- `device_id` - The device to update. + +The following fields are required in the JSON request body: + +- `display_name` - The new display name for this device. If not given, + the display name is unchanged. + +### Delete a device +Deletes the given `device_id` for a specific `user_id`, +and invalidates any access token associated with it. + +The API is: + +``` +DELETE /_synapse/admin/v2/users//devices/ + +{} +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](../../usage/administration/admin_api) + +An empty JSON dict is returned. + +**Parameters** + +The following parameters should be set in the URL: + +- `user_id` - fully qualified: for example, `@user:server.com`. +- `device_id` - The device to delete. + +## List all pushers +Gets information about all pushers for a specific `user_id`. + +The API is: + +``` +GET /_synapse/admin/v1/users//pushers +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](../../usage/administration/admin_api) + +A response body like the following is returned: + +```json +{ + "pushers": [ + { + "app_display_name":"HTTP Push Notifications", + "app_id":"m.http", + "data": { + "url":"example.com" + }, + "device_display_name":"pushy push", + "kind":"http", + "lang":"None", + "profile_tag":"", + "pushkey":"a@example.com" + } + ], + "total": 1 +} +``` + +**Parameters** + +The following parameters should be set in the URL: + +- `user_id` - fully qualified: for example, `@user:server.com`. + +**Response** + +The following fields are returned in the JSON response body: + +- `pushers` - An array containing the current pushers for the user + + - `app_display_name` - string - A string that will allow the user to identify + what application owns this pusher. + + - `app_id` - string - This is a reverse-DNS style identifier for the application. + Max length, 64 chars. + + - `data` - A dictionary of information for the pusher implementation itself. + + - `url` - string - Required if `kind` is `http`. The URL to use to send + notifications to. + + - `format` - string - The format to use when sending notifications to the + Push Gateway. + + - `device_display_name` - string - A string that will allow the user to identify + what device owns this pusher. + + - `profile_tag` - string - This string determines which set of device specific rules + this pusher executes. + + - `kind` - string - The kind of pusher. "http" is a pusher that sends HTTP pokes. + - `lang` - string - The preferred language for receiving notifications + (e.g. 'en' or 'en-US') + + - `profile_tag` - string - This string determines which set of device specific rules + this pusher executes. + + - `pushkey` - string - This is a unique identifier for this pusher. + Max length, 512 bytes. + +- `total` - integer - Number of pushers. + +See also the +[Client-Server API Spec on pushers](https://matrix.org/docs/spec/client_server/latest#get-matrix-client-r0-pushers). + +## Shadow-banning users + +Shadow-banning is a useful tool for moderating malicious or egregiously abusive users. +A shadow-banned users receives successful responses to their client-server API requests, +but the events are not propagated into rooms. This can be an effective tool as it +(hopefully) takes longer for the user to realise they are being moderated before +pivoting to another account. + +Shadow-banning a user should be used as a tool of last resort and may lead to confusing +or broken behaviour for the client. A shadow-banned user will not receive any +notification and it is generally more appropriate to ban or kick abusive users. +A shadow-banned user will be unable to contact anyone on the server. + +The API is: + +``` +POST /_synapse/admin/v1/users//shadow_ban +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](../../usage/administration/admin_api) + +An empty JSON dict is returned. + +**Parameters** + +The following parameters should be set in the URL: + +- `user_id` - The fully qualified MXID: for example, `@user:server.com`. The user must + be local. + +## Override ratelimiting for users + +This API allows to override or disable ratelimiting for a specific user. +There are specific APIs to set, get and delete a ratelimit. + +### Get status of ratelimit + +The API is: + +``` +GET /_synapse/admin/v1/users//override_ratelimit +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](../../usage/administration/admin_api) + +A response body like the following is returned: + +```json +{ + "messages_per_second": 0, + "burst_count": 0 +} +``` + +**Parameters** + +The following parameters should be set in the URL: + +- `user_id` - The fully qualified MXID: for example, `@user:server.com`. The user must + be local. + +**Response** + +The following fields are returned in the JSON response body: + +- `messages_per_second` - integer - The number of actions that can + be performed in a second. `0` mean that ratelimiting is disabled for this user. +- `burst_count` - integer - How many actions that can be performed before + being limited. + +If **no** custom ratelimit is set, an empty JSON dict is returned. + +```json +{} +``` + +### Set ratelimit + +The API is: + +``` +POST /_synapse/admin/v1/users//override_ratelimit +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](../../usage/administration/admin_api) + +A response body like the following is returned: + +```json +{ + "messages_per_second": 0, + "burst_count": 0 +} +``` + +**Parameters** + +The following parameters should be set in the URL: + +- `user_id` - The fully qualified MXID: for example, `@user:server.com`. The user must + be local. + +Body parameters: + +- `messages_per_second` - positive integer, optional. The number of actions that can + be performed in a second. Defaults to `0`. +- `burst_count` - positive integer, optional. How many actions that can be performed + before being limited. Defaults to `0`. + +To disable users' ratelimit set both values to `0`. + +**Response** + +The following fields are returned in the JSON response body: + +- `messages_per_second` - integer - The number of actions that can + be performed in a second. +- `burst_count` - integer - How many actions that can be performed before + being limited. + +### Delete ratelimit + +The API is: + +``` +DELETE /_synapse/admin/v1/users//override_ratelimit +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](../../usage/administration/admin_api) + +An empty JSON dict is returned. + +```json +{} +``` + +**Parameters** + +The following parameters should be set in the URL: + +- `user_id` - The fully qualified MXID: for example, `@user:server.com`. The user must + be local. + diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst deleted file mode 100644 index dbce9c90b6..0000000000 --- a/docs/admin_api/user_admin_api.rst +++ /dev/null @@ -1,981 +0,0 @@ -.. contents:: - -Query User Account -================== - -This API returns information about a specific user account. - -The api is:: - - GET /_synapse/admin/v2/users/ - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -It returns a JSON body like the following: - -.. code:: json - - { - "displayname": "User", - "threepids": [ - { - "medium": "email", - "address": "" - }, - { - "medium": "email", - "address": "" - } - ], - "avatar_url": "", - "admin": 0, - "deactivated": 0, - "shadow_banned": 0, - "password_hash": "$2b$12$p9B4GkqYdRTPGD", - "creation_ts": 1560432506, - "appservice_id": null, - "consent_server_notice_sent": null, - "consent_version": null - } - -URL parameters: - -- ``user_id``: fully-qualified user id: for example, ``@user:server.com``. - -Create or modify Account -======================== - -This API allows an administrator to create or modify a user account with a -specific ``user_id``. - -This api is:: - - PUT /_synapse/admin/v2/users/ - -with a body of: - -.. code:: json - - { - "password": "user_password", - "displayname": "User", - "threepids": [ - { - "medium": "email", - "address": "" - }, - { - "medium": "email", - "address": "" - } - ], - "avatar_url": "", - "admin": false, - "deactivated": false - } - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -URL parameters: - -- ``user_id``: fully-qualified user id: for example, ``@user:server.com``. - -Body parameters: - -- ``password``, optional. If provided, the user's password is updated and all - devices are logged out. - -- ``displayname``, optional, defaults to the value of ``user_id``. - -- ``threepids``, optional, allows setting the third-party IDs (email, msisdn) - belonging to a user. - -- ``avatar_url``, optional, must be a - `MXC URI `_. - -- ``admin``, optional, defaults to ``false``. - -- ``deactivated``, optional. If unspecified, deactivation state will be left - unchanged on existing accounts and set to ``false`` for new accounts. - A user cannot be erased by deactivating with this API. For details on deactivating users see - `Deactivate Account <#deactivate-account>`_. - -If the user already exists then optional parameters default to the current value. - -In order to re-activate an account ``deactivated`` must be set to ``false``. If -users do not login via single-sign-on, a new ``password`` must be provided. - -List Accounts -============= - -This API returns all local user accounts. -By default, the response is ordered by ascending user ID. - -The API is:: - - GET /_synapse/admin/v2/users?from=0&limit=10&guests=false - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -A response body like the following is returned: - -.. code:: json - - { - "users": [ - { - "name": "", - "is_guest": 0, - "admin": 0, - "user_type": null, - "deactivated": 0, - "shadow_banned": 0, - "displayname": "", - "avatar_url": null - }, { - "name": "", - "is_guest": 0, - "admin": 1, - "user_type": null, - "deactivated": 0, - "shadow_banned": 0, - "displayname": "", - "avatar_url": "" - } - ], - "next_token": "100", - "total": 200 - } - -To paginate, check for ``next_token`` and if present, call the endpoint again -with ``from`` set to the value of ``next_token``. This will return a new page. - -If the endpoint does not return a ``next_token`` then there are no more users -to paginate through. - -**Parameters** - -The following parameters should be set in the URL: - -- ``user_id`` - Is optional and filters to only return users with user IDs - that contain this value. This parameter is ignored when using the ``name`` parameter. -- ``name`` - Is optional and filters to only return users with user ID localparts - **or** displaynames that contain this value. -- ``guests`` - string representing a bool - Is optional and if ``false`` will **exclude** guest users. - Defaults to ``true`` to include guest users. -- ``deactivated`` - string representing a bool - Is optional and if ``true`` will **include** deactivated users. - Defaults to ``false`` to exclude deactivated users. -- ``limit`` - string representing a positive integer - Is optional but is used for pagination, - denoting the maximum number of items to return in this call. Defaults to ``100``. -- ``from`` - string representing a positive integer - Is optional but used for pagination, - denoting the offset in the returned results. This should be treated as an opaque value and - not explicitly set to anything other than the return value of ``next_token`` from a previous call. - Defaults to ``0``. -- ``order_by`` - The method by which to sort the returned list of users. - If the ordered field has duplicates, the second order is always by ascending ``name``, - which guarantees a stable ordering. Valid values are: - - - ``name`` - Users are ordered alphabetically by ``name``. This is the default. - - ``is_guest`` - Users are ordered by ``is_guest`` status. - - ``admin`` - Users are ordered by ``admin`` status. - - ``user_type`` - Users are ordered alphabetically by ``user_type``. - - ``deactivated`` - Users are ordered by ``deactivated`` status. - - ``shadow_banned`` - Users are ordered by ``shadow_banned`` status. - - ``displayname`` - Users are ordered alphabetically by ``displayname``. - - ``avatar_url`` - Users are ordered alphabetically by avatar URL. - -- ``dir`` - Direction of media order. Either ``f`` for forwards or ``b`` for backwards. - Setting this value to ``b`` will reverse the above sort order. Defaults to ``f``. - -Caution. The database only has indexes on the columns ``name`` and ``created_ts``. -This means that if a different sort order is used (``is_guest``, ``admin``, -``user_type``, ``deactivated``, ``shadow_banned``, ``avatar_url`` or ``displayname``), -this can cause a large load on the database, especially for large environments. - -**Response** - -The following fields are returned in the JSON response body: - -- ``users`` - An array of objects, each containing information about an user. - User objects contain the following fields: - - - ``name`` - string - Fully-qualified user ID (ex. ``@user:server.com``). - - ``is_guest`` - bool - Status if that user is a guest account. - - ``admin`` - bool - Status if that user is a server administrator. - - ``user_type`` - string - Type of the user. Normal users are type ``None``. - This allows user type specific behaviour. There are also types ``support`` and ``bot``. - - ``deactivated`` - bool - Status if that user has been marked as deactivated. - - ``shadow_banned`` - bool - Status if that user has been marked as shadow banned. - - ``displayname`` - string - The user's display name if they have set one. - - ``avatar_url`` - string - The user's avatar URL if they have set one. - -- ``next_token``: string representing a positive integer - Indication for pagination. See above. -- ``total`` - integer - Total number of media. - - -Query current sessions for a user -================================= - -This API returns information about the active sessions for a specific user. - -The api is:: - - GET /_synapse/admin/v1/whois/ - -and:: - - GET /_matrix/client/r0/admin/whois/ - -See also: `Client Server API Whois -`_ - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -It returns a JSON body like the following: - -.. code:: json - - { - "user_id": "", - "devices": { - "": { - "sessions": [ - { - "connections": [ - { - "ip": "1.2.3.4", - "last_seen": 1417222374433, - "user_agent": "Mozilla/5.0 ..." - }, - { - "ip": "1.2.3.10", - "last_seen": 1417222374500, - "user_agent": "Dalvik/2.1.0 ..." - } - ] - } - ] - } - } - } - -``last_seen`` is measured in milliseconds since the Unix epoch. - -Deactivate Account -================== - -This API deactivates an account. It removes active access tokens, resets the -password, and deletes third-party IDs (to prevent the user requesting a -password reset). - -It can also mark the user as GDPR-erased. This means messages sent by the -user will still be visible by anyone that was in the room when these messages -were sent, but hidden from users joining the room afterwards. - -The api is:: - - POST /_synapse/admin/v1/deactivate/ - -with a body of: - -.. code:: json - - { - "erase": true - } - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -The erase parameter is optional and defaults to ``false``. -An empty body may be passed for backwards compatibility. - -The following actions are performed when deactivating an user: - -- Try to unpind 3PIDs from the identity server -- Remove all 3PIDs from the homeserver -- Delete all devices and E2EE keys -- Delete all access tokens -- Delete the password hash -- Removal from all rooms the user is a member of -- Remove the user from the user directory -- Reject all pending invites -- Remove all account validity information related to the user - -The following additional actions are performed during deactivation if ``erase`` -is set to ``true``: - -- Remove the user's display name -- Remove the user's avatar URL -- Mark the user as erased - - -Reset password -============== - -Changes the password of another user. This will automatically log the user out of all their devices. - -The api is:: - - POST /_synapse/admin/v1/reset_password/ - -with a body of: - -.. code:: json - - { - "new_password": "", - "logout_devices": true - } - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -The parameter ``new_password`` is required. -The parameter ``logout_devices`` is optional and defaults to ``true``. - -Get whether a user is a server administrator or not -=================================================== - - -The api is:: - - GET /_synapse/admin/v1/users//admin - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -A response body like the following is returned: - -.. code:: json - - { - "admin": true - } - - -Change whether a user is a server administrator or not -====================================================== - -Note that you cannot demote yourself. - -The api is:: - - PUT /_synapse/admin/v1/users//admin - -with a body of: - -.. code:: json - - { - "admin": true - } - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - - -List room memberships of an user -================================ -Gets a list of all ``room_id`` that a specific ``user_id`` is member. - -The API is:: - - GET /_synapse/admin/v1/users//joined_rooms - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -A response body like the following is returned: - -.. code:: json - - { - "joined_rooms": [ - "!DuGcnbhHGaSZQoNQR:matrix.org", - "!ZtSaPCawyWtxfWiIy:matrix.org" - ], - "total": 2 - } - -The server returns the list of rooms of which the user and the server -are member. If the user is local, all the rooms of which the user is -member are returned. - -**Parameters** - -The following parameters should be set in the URL: - -- ``user_id`` - fully qualified: for example, ``@user:server.com``. - -**Response** - -The following fields are returned in the JSON response body: - -- ``joined_rooms`` - An array of ``room_id``. -- ``total`` - Number of rooms. - - -List media of a user -==================== -Gets a list of all local media that a specific ``user_id`` has created. -By default, the response is ordered by descending creation date and ascending media ID. -The newest media is on top. You can change the order with parameters -``order_by`` and ``dir``. - -The API is:: - - GET /_synapse/admin/v1/users//media - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -A response body like the following is returned: - -.. code:: json - - { - "media": [ - { - "created_ts": 100400, - "last_access_ts": null, - "media_id": "qXhyRzulkwLsNHTbpHreuEgo", - "media_length": 67, - "media_type": "image/png", - "quarantined_by": null, - "safe_from_quarantine": false, - "upload_name": "test1.png" - }, - { - "created_ts": 200400, - "last_access_ts": null, - "media_id": "FHfiSnzoINDatrXHQIXBtahw", - "media_length": 67, - "media_type": "image/png", - "quarantined_by": null, - "safe_from_quarantine": false, - "upload_name": "test2.png" - } - ], - "next_token": 3, - "total": 2 - } - -To paginate, check for ``next_token`` and if present, call the endpoint again -with ``from`` set to the value of ``next_token``. This will return a new page. - -If the endpoint does not return a ``next_token`` then there are no more -reports to paginate through. - -**Parameters** - -The following parameters should be set in the URL: - -- ``user_id`` - string - fully qualified: for example, ``@user:server.com``. -- ``limit``: string representing a positive integer - Is optional but is used for pagination, - denoting the maximum number of items to return in this call. Defaults to ``100``. -- ``from``: string representing a positive integer - Is optional but used for pagination, - denoting the offset in the returned results. This should be treated as an opaque value and - not explicitly set to anything other than the return value of ``next_token`` from a previous call. - Defaults to ``0``. -- ``order_by`` - The method by which to sort the returned list of media. - If the ordered field has duplicates, the second order is always by ascending ``media_id``, - which guarantees a stable ordering. Valid values are: - - - ``media_id`` - Media are ordered alphabetically by ``media_id``. - - ``upload_name`` - Media are ordered alphabetically by name the media was uploaded with. - - ``created_ts`` - Media are ordered by when the content was uploaded in ms. - Smallest to largest. This is the default. - - ``last_access_ts`` - Media are ordered by when the content was last accessed in ms. - Smallest to largest. - - ``media_length`` - Media are ordered by length of the media in bytes. - Smallest to largest. - - ``media_type`` - Media are ordered alphabetically by MIME-type. - - ``quarantined_by`` - Media are ordered alphabetically by the user ID that - initiated the quarantine request for this media. - - ``safe_from_quarantine`` - Media are ordered by the status if this media is safe - from quarantining. - -- ``dir`` - Direction of media order. Either ``f`` for forwards or ``b`` for backwards. - Setting this value to ``b`` will reverse the above sort order. Defaults to ``f``. - -If neither ``order_by`` nor ``dir`` is set, the default order is newest media on top -(corresponds to ``order_by`` = ``created_ts`` and ``dir`` = ``b``). - -Caution. The database only has indexes on the columns ``media_id``, -``user_id`` and ``created_ts``. This means that if a different sort order is used -(``upload_name``, ``last_access_ts``, ``media_length``, ``media_type``, -``quarantined_by`` or ``safe_from_quarantine``), this can cause a large load on the -database, especially for large environments. - -**Response** - -The following fields are returned in the JSON response body: - -- ``media`` - An array of objects, each containing information about a media. - Media objects contain the following fields: - - - ``created_ts`` - integer - Timestamp when the content was uploaded in ms. - - ``last_access_ts`` - integer - Timestamp when the content was last accessed in ms. - - ``media_id`` - string - The id used to refer to the media. - - ``media_length`` - integer - Length of the media in bytes. - - ``media_type`` - string - The MIME-type of the media. - - ``quarantined_by`` - string - The user ID that initiated the quarantine request - for this media. - - - ``safe_from_quarantine`` - bool - Status if this media is safe from quarantining. - - ``upload_name`` - string - The name the media was uploaded with. - -- ``next_token``: integer - Indication for pagination. See above. -- ``total`` - integer - Total number of media. - -Login as a user -=============== - -Get an access token that can be used to authenticate as that user. Useful for -when admins wish to do actions on behalf of a user. - -The API is:: - - POST /_synapse/admin/v1/users//login - {} - -An optional ``valid_until_ms`` field can be specified in the request body as an -integer timestamp that specifies when the token should expire. By default tokens -do not expire. - -A response body like the following is returned: - -.. code:: json - - { - "access_token": "" - } - - -This API does *not* generate a new device for the user, and so will not appear -their ``/devices`` list, and in general the target user should not be able to -tell they have been logged in as. - -To expire the token call the standard ``/logout`` API with the token. - -Note: The token will expire if the *admin* user calls ``/logout/all`` from any -of their devices, but the token will *not* expire if the target user does the -same. - - -User devices -============ - -List all devices ----------------- -Gets information about all devices for a specific ``user_id``. - -The API is:: - - GET /_synapse/admin/v2/users//devices - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -A response body like the following is returned: - -.. code:: json - - { - "devices": [ - { - "device_id": "QBUAZIFURK", - "display_name": "android", - "last_seen_ip": "1.2.3.4", - "last_seen_ts": 1474491775024, - "user_id": "" - }, - { - "device_id": "AUIECTSRND", - "display_name": "ios", - "last_seen_ip": "1.2.3.5", - "last_seen_ts": 1474491775025, - "user_id": "" - } - ], - "total": 2 - } - -**Parameters** - -The following parameters should be set in the URL: - -- ``user_id`` - fully qualified: for example, ``@user:server.com``. - -**Response** - -The following fields are returned in the JSON response body: - -- ``devices`` - An array of objects, each containing information about a device. - Device objects contain the following fields: - - - ``device_id`` - Identifier of device. - - ``display_name`` - Display name set by the user for this device. - Absent if no name has been set. - - ``last_seen_ip`` - The IP address where this device was last seen. - (May be a few minutes out of date, for efficiency reasons). - - ``last_seen_ts`` - The timestamp (in milliseconds since the unix epoch) when this - devices was last seen. (May be a few minutes out of date, for efficiency reasons). - - ``user_id`` - Owner of device. - -- ``total`` - Total number of user's devices. - -Delete multiple devices ------------------- -Deletes the given devices for a specific ``user_id``, and invalidates -any access token associated with them. - -The API is:: - - POST /_synapse/admin/v2/users//delete_devices - - { - "devices": [ - "QBUAZIFURK", - "AUIECTSRND" - ], - } - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -An empty JSON dict is returned. - -**Parameters** - -The following parameters should be set in the URL: - -- ``user_id`` - fully qualified: for example, ``@user:server.com``. - -The following fields are required in the JSON request body: - -- ``devices`` - The list of device IDs to delete. - -Show a device ---------------- -Gets information on a single device, by ``device_id`` for a specific ``user_id``. - -The API is:: - - GET /_synapse/admin/v2/users//devices/ - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -A response body like the following is returned: - -.. code:: json - - { - "device_id": "", - "display_name": "android", - "last_seen_ip": "1.2.3.4", - "last_seen_ts": 1474491775024, - "user_id": "" - } - -**Parameters** - -The following parameters should be set in the URL: - -- ``user_id`` - fully qualified: for example, ``@user:server.com``. -- ``device_id`` - The device to retrieve. - -**Response** - -The following fields are returned in the JSON response body: - -- ``device_id`` - Identifier of device. -- ``display_name`` - Display name set by the user for this device. - Absent if no name has been set. -- ``last_seen_ip`` - The IP address where this device was last seen. - (May be a few minutes out of date, for efficiency reasons). -- ``last_seen_ts`` - The timestamp (in milliseconds since the unix epoch) when this - devices was last seen. (May be a few minutes out of date, for efficiency reasons). -- ``user_id`` - Owner of device. - -Update a device ---------------- -Updates the metadata on the given ``device_id`` for a specific ``user_id``. - -The API is:: - - PUT /_synapse/admin/v2/users//devices/ - - { - "display_name": "My other phone" - } - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -An empty JSON dict is returned. - -**Parameters** - -The following parameters should be set in the URL: - -- ``user_id`` - fully qualified: for example, ``@user:server.com``. -- ``device_id`` - The device to update. - -The following fields are required in the JSON request body: - -- ``display_name`` - The new display name for this device. If not given, - the display name is unchanged. - -Delete a device ---------------- -Deletes the given ``device_id`` for a specific ``user_id``, -and invalidates any access token associated with it. - -The API is:: - - DELETE /_synapse/admin/v2/users//devices/ - - {} - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -An empty JSON dict is returned. - -**Parameters** - -The following parameters should be set in the URL: - -- ``user_id`` - fully qualified: for example, ``@user:server.com``. -- ``device_id`` - The device to delete. - -List all pushers -================ -Gets information about all pushers for a specific ``user_id``. - -The API is:: - - GET /_synapse/admin/v1/users//pushers - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -A response body like the following is returned: - -.. code:: json - - { - "pushers": [ - { - "app_display_name":"HTTP Push Notifications", - "app_id":"m.http", - "data": { - "url":"example.com" - }, - "device_display_name":"pushy push", - "kind":"http", - "lang":"None", - "profile_tag":"", - "pushkey":"a@example.com" - } - ], - "total": 1 - } - -**Parameters** - -The following parameters should be set in the URL: - -- ``user_id`` - fully qualified: for example, ``@user:server.com``. - -**Response** - -The following fields are returned in the JSON response body: - -- ``pushers`` - An array containing the current pushers for the user - - - ``app_display_name`` - string - A string that will allow the user to identify - what application owns this pusher. - - - ``app_id`` - string - This is a reverse-DNS style identifier for the application. - Max length, 64 chars. - - - ``data`` - A dictionary of information for the pusher implementation itself. - - - ``url`` - string - Required if ``kind`` is ``http``. The URL to use to send - notifications to. - - - ``format`` - string - The format to use when sending notifications to the - Push Gateway. - - - ``device_display_name`` - string - A string that will allow the user to identify - what device owns this pusher. - - - ``profile_tag`` - string - This string determines which set of device specific rules - this pusher executes. - - - ``kind`` - string - The kind of pusher. "http" is a pusher that sends HTTP pokes. - - ``lang`` - string - The preferred language for receiving notifications - (e.g. 'en' or 'en-US') - - - ``profile_tag`` - string - This string determines which set of device specific rules - this pusher executes. - - - ``pushkey`` - string - This is a unique identifier for this pusher. - Max length, 512 bytes. - -- ``total`` - integer - Number of pushers. - -See also `Client-Server API Spec `_ - -Shadow-banning users -==================== - -Shadow-banning is a useful tool for moderating malicious or egregiously abusive users. -A shadow-banned users receives successful responses to their client-server API requests, -but the events are not propagated into rooms. This can be an effective tool as it -(hopefully) takes longer for the user to realise they are being moderated before -pivoting to another account. - -Shadow-banning a user should be used as a tool of last resort and may lead to confusing -or broken behaviour for the client. A shadow-banned user will not receive any -notification and it is generally more appropriate to ban or kick abusive users. -A shadow-banned user will be unable to contact anyone on the server. - -The API is:: - - POST /_synapse/admin/v1/users//shadow_ban - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -An empty JSON dict is returned. - -**Parameters** - -The following parameters should be set in the URL: - -- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must - be local. - -Override ratelimiting for users -=============================== - -This API allows to override or disable ratelimiting for a specific user. -There are specific APIs to set, get and delete a ratelimit. - -Get status of ratelimit ------------------------ - -The API is:: - - GET /_synapse/admin/v1/users//override_ratelimit - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -A response body like the following is returned: - -.. code:: json - - { - "messages_per_second": 0, - "burst_count": 0 - } - -**Parameters** - -The following parameters should be set in the URL: - -- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must - be local. - -**Response** - -The following fields are returned in the JSON response body: - -- ``messages_per_second`` - integer - The number of actions that can - be performed in a second. `0` mean that ratelimiting is disabled for this user. -- ``burst_count`` - integer - How many actions that can be performed before - being limited. - -If **no** custom ratelimit is set, an empty JSON dict is returned. - -.. code:: json - - {} - -Set ratelimit -------------- - -The API is:: - - POST /_synapse/admin/v1/users//override_ratelimit - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -A response body like the following is returned: - -.. code:: json - - { - "messages_per_second": 0, - "burst_count": 0 - } - -**Parameters** - -The following parameters should be set in the URL: - -- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must - be local. - -Body parameters: - -- ``messages_per_second`` - positive integer, optional. The number of actions that can - be performed in a second. Defaults to ``0``. -- ``burst_count`` - positive integer, optional. How many actions that can be performed - before being limited. Defaults to ``0``. - -To disable users' ratelimit set both values to ``0``. - -**Response** - -The following fields are returned in the JSON response body: - -- ``messages_per_second`` - integer - The number of actions that can - be performed in a second. -- ``burst_count`` - integer - How many actions that can be performed before - being limited. - -Delete ratelimit ----------------- - -The API is:: - - DELETE /_synapse/admin/v1/users//override_ratelimit - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -An empty JSON dict is returned. - -.. code:: json - - {} - -**Parameters** - -The following parameters should be set in the URL: - -- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must - be local. - diff --git a/docs/admin_api/version_api.rst b/docs/admin_api/version_api.md similarity index 59% rename from docs/admin_api/version_api.rst rename to docs/admin_api/version_api.md index 833d9028be..efb4a0c0f7 100644 --- a/docs/admin_api/version_api.rst +++ b/docs/admin_api/version_api.md @@ -1,20 +1,21 @@ -Version API -=========== +# Version API This API returns the running Synapse version and the Python version on which Synapse is being run. This is useful when a Synapse instance is behind a proxy that does not forward the 'Server' header (which also contains Synapse version information). -The api is:: +The api is: - GET /_synapse/admin/v1/server_version +``` +GET /_synapse/admin/v1/server_version +``` It returns a JSON body like the following: -.. code:: json - - { - "server_version": "0.99.2rc1 (b=develop, abcdef123)", - "python_version": "3.6.8" - } +```json +{ + "server_version": "0.99.2rc1 (b=develop, abcdef123)", + "python_version": "3.6.8" +} +``` diff --git a/docs/dev/git.md b/docs/dev/git.md index b747ff20c9..87950f07b2 100644 --- a/docs/dev/git.md +++ b/docs/dev/git.md @@ -122,15 +122,15 @@ So, what counts as a more- or less-stable branch? A little reflection will show that our active branches are ordered thus, from more-stable to less-stable: * `master` (tracks our last release). - * `release-vX.Y.Z` (the branch where we prepare the next release)[3](#f3). * PR branches which are targeting the release. * `develop` (our "mainline" branch containing our bleeding-edge). * regular PR branches. The corollary is: if you have a bugfix that needs to land in both -`release-vX.Y.Z` *and* `develop`, then you should base your PR on -`release-vX.Y.Z`, get it merged there, and then merge from `release-vX.Y.Z` to +`release-vX.Y` *and* `develop`, then you should base your PR on +`release-vX.Y`, get it merged there, and then merge from `release-vX.Y` to `develop`. (If a fix lands in `develop` and we later need it in a release-branch, we can of course cherry-pick it, but landing it in the release branch first helps reduce the chance of annoying conflicts.) @@ -145,4 +145,4 @@ most intuitive name. [^](#a1) [3]: Very, very occasionally (I think this has happened once in the history of Synapse), we've had two releases in flight at once. Obviously, -`release-v1.2.3` is more-stable than `release-v1.3.0`. [^](#a3) +`release-v1.2` is more-stable than `release-v1.3`. [^](#a3) diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md new file mode 100644 index 0000000000..ddf0887123 --- /dev/null +++ b/docs/development/contributing_guide.md @@ -0,0 +1,7 @@ + +# Contributing + +{{#include ../../CONTRIBUTING.md}} diff --git a/docs/development/internal_documentation/README.md b/docs/development/internal_documentation/README.md new file mode 100644 index 0000000000..51c5fb94d5 --- /dev/null +++ b/docs/development/internal_documentation/README.md @@ -0,0 +1,12 @@ +# Internal Documentation + +This section covers implementation documentation for various parts of Synapse. + +If a developer is planning to make a change to a feature of Synapse, it can be useful for +general documentation of how that feature is implemented to be available. This saves the +developer time in place of needing to understand how the feature works by reading the +code. + +Documentation that would be more useful for the perspective of a system administrator, +rather than a developer who's intending to change to code, should instead be placed +under the Usage section of the documentation. \ No newline at end of file diff --git a/docs/favicon.png b/docs/favicon.png new file mode 100644 index 0000000000..5f18bf641f Binary files /dev/null and b/docs/favicon.png differ diff --git a/docs/favicon.svg b/docs/favicon.svg new file mode 100644 index 0000000000..e571aeb3ed --- /dev/null +++ b/docs/favicon.svg @@ -0,0 +1,58 @@ + + + + + + image/svg+xml + + + + + + + + + diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 085053127b..59a1a76fe9 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -3163,18 +3163,3 @@ redis: # Optional password if configured on the Redis instance # #password: - - -# Enable experimental features in Synapse. -# -# Experimental features might break or be removed without a deprecation -# period. -# -experimental_features: - # Support for Spaces (MSC1772), it enables the following: - # - # * The Spaces Summary API (MSC2946). - # * Restricting room membership based on space membership (MSC3083). - # - # Uncomment to disable support for Spaces. - #spaces_enabled: false diff --git a/docs/setup/installation.md b/docs/setup/installation.md new file mode 100644 index 0000000000..8bb1cffd3d --- /dev/null +++ b/docs/setup/installation.md @@ -0,0 +1,7 @@ + +{{#include ../../INSTALL.md}} \ No newline at end of file diff --git a/docs/turn-howto.md b/docs/turn-howto.md index 41738bbe69..6433446c2a 100644 --- a/docs/turn-howto.md +++ b/docs/turn-howto.md @@ -4,7 +4,7 @@ This document explains how to enable VoIP relaying on your Home Server with TURN. The synapse Matrix Home Server supports integration with TURN server via the -[TURN server REST API](). This +[TURN server REST API](). This allows the Home Server to generate credentials that are valid for use on the TURN server through the use of a secret shared between the Home Server and the TURN server. diff --git a/docs/upgrading/README.md b/docs/upgrading/README.md new file mode 100644 index 0000000000..258e58cf15 --- /dev/null +++ b/docs/upgrading/README.md @@ -0,0 +1,7 @@ + +{{#include ../../UPGRADE.rst}} \ No newline at end of file diff --git a/docs/usage/administration/README.md b/docs/usage/administration/README.md new file mode 100644 index 0000000000..e1e57546ab --- /dev/null +++ b/docs/usage/administration/README.md @@ -0,0 +1,7 @@ +# Administration + +This section contains information on managing your Synapse homeserver. This includes: + +* Managing users, rooms and media via the Admin API. +* Setting up metrics and monitoring to give you insight into your homeserver's health. +* Configuring structured logging. \ No newline at end of file diff --git a/docs/usage/administration/admin_api/README.md b/docs/usage/administration/admin_api/README.md new file mode 100644 index 0000000000..2fca96f8be --- /dev/null +++ b/docs/usage/administration/admin_api/README.md @@ -0,0 +1,29 @@ +# The Admin API + +## Authenticate as a server admin + +Many of the API calls in the admin api will require an `access_token` for a +server admin. (Note that a server admin is distinct from a room admin.) + +A user can be marked as a server admin by updating the database directly, e.g.: + +```sql +UPDATE users SET admin = 1 WHERE name = '@foo:bar.com'; +``` + +A new server admin user can also be created using the `register_new_matrix_user` +command. This is a script that is located in the `scripts/` directory, or possibly +already on your `$PATH` depending on how Synapse was installed. + +Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings. + +## Making an Admin API request +Once you have your `access_token`, you will need to authenticate each request to an Admin API endpoint by +providing the token as either a query parameter or a request header. To add it as a request header in cURL: + +```sh +curl --header "Authorization: Bearer " +``` + +For more details on access tokens in Matrix, please refer to the complete +[matrix spec documentation](https://matrix.org/docs/spec/client_server/r0.6.1#using-access-tokens). diff --git a/docs/usage/configuration/README.md b/docs/usage/configuration/README.md new file mode 100644 index 0000000000..41d41167c6 --- /dev/null +++ b/docs/usage/configuration/README.md @@ -0,0 +1,4 @@ +# Configuration + +This section contains information on tweaking Synapse via the various options in the configuration file. A configuration +file should have been generated when you [installed Synapse](../../setup/installation.html). diff --git a/docs/usage/configuration/homeserver_sample_config.md b/docs/usage/configuration/homeserver_sample_config.md new file mode 100644 index 0000000000..11e806998d --- /dev/null +++ b/docs/usage/configuration/homeserver_sample_config.md @@ -0,0 +1,14 @@ +# Homeserver Sample Configuration File + +Below is a sample homeserver configuration file. The homeserver configuration file +can be tweaked to change the behaviour of your homeserver. A restart of the server is +generally required to apply any changes made to this file. + +Note that the contents below are *not* intended to be copied and used as the basis for +a real homeserver.yaml. Instead, if you are starting from scratch, please generate +a fresh config using Synapse by following the instructions in +[Installation](../../setup/installation.md). + +```yaml +{{#include ../../sample_config.yaml}} +``` diff --git a/docs/usage/configuration/logging_sample_config.md b/docs/usage/configuration/logging_sample_config.md new file mode 100644 index 0000000000..4c4bc6fc16 --- /dev/null +++ b/docs/usage/configuration/logging_sample_config.md @@ -0,0 +1,14 @@ +# Logging Sample Configuration File + +Below is a sample logging configuration file. This file can be tweaked to control how your +homeserver will output logs. A restart of the server is generally required to apply any +changes made to this file. + +Note that the contents below are *not* intended to be copied and used as the basis for +a real homeserver.yaml. Instead, if you are starting from scratch, please generate +a fresh config using Synapse by following the instructions in +[Installation](../../setup/installation.md). + +```yaml +{{#include ../../sample_log_config.yaml}} +``__` \ No newline at end of file diff --git a/docs/usage/configuration/user_authentication/README.md b/docs/usage/configuration/user_authentication/README.md new file mode 100644 index 0000000000..087ae053cf --- /dev/null +++ b/docs/usage/configuration/user_authentication/README.md @@ -0,0 +1,15 @@ +# User Authentication + +Synapse supports multiple methods of authenticating users, either out-of-the-box or through custom pluggable +authentication modules. + +Included in Synapse is support for authenticating users via: + +* A username and password. +* An email address and password. +* Single Sign-On through the SAML, Open ID Connect or CAS protocols. +* JSON Web Tokens. +* An administrator's shared secret. + +Synapse can additionally be extended to support custom authentication schemes through optional "password auth provider" +modules. \ No newline at end of file diff --git a/docs/website_files/README.md b/docs/website_files/README.md new file mode 100644 index 0000000000..04d191479b --- /dev/null +++ b/docs/website_files/README.md @@ -0,0 +1,30 @@ +# Documentation Website Files and Assets + +This directory contains extra files for modifying the look and functionality of +[mdbook](https://github.com/rust-lang/mdBook), the documentation software that's +used to generate Synapse's documentation website. + +The configuration options in the `output.html` section of [book.toml](../../book.toml) +point to additional JS/CSS in this directory that are added on each page load. In +addition, the `theme` directory contains files that overwrite their counterparts in +each of the default themes included with mdbook. + +Currently we use these files to generate a floating Table of Contents panel. The code for +which was partially taken from +[JorelAli/mdBook-pagetoc](https://github.com/JorelAli/mdBook-pagetoc/) +before being modified such that it scrolls with the content of the page. This is handled +by the `table-of-contents.js/css` files. The table of contents panel only appears on pages +that have more than one header, as well as only appearing on desktop-sized monitors. + +We remove the navigation arrows which typically appear on the left and right side of the +screen on desktop as they interfere with the table of contents. This is handled by +the `remove-nav-buttons.css` file. + +Finally, we also stylise the chapter titles in the left sidebar by indenting them +slightly so that they are more visually distinguishable from the section headers +(the bold titles). This is done through the `indent-section-headers.css` file. + +More information can be found in mdbook's official documentation for +[injecting page JS/CSS](https://rust-lang.github.io/mdBook/format/config.html) +and +[customising the default themes](https://rust-lang.github.io/mdBook/format/theme/index.html). \ No newline at end of file diff --git a/docs/website_files/indent-section-headers.css b/docs/website_files/indent-section-headers.css new file mode 100644 index 0000000000..f9b3c82ca6 --- /dev/null +++ b/docs/website_files/indent-section-headers.css @@ -0,0 +1,7 @@ +/* + * Indents each chapter title in the left sidebar so that they aren't + * at the same level as the section headers. + */ +.chapter-item { + margin-left: 1em; +} \ No newline at end of file diff --git a/docs/website_files/remove-nav-buttons.css b/docs/website_files/remove-nav-buttons.css new file mode 100644 index 0000000000..4b280794ea --- /dev/null +++ b/docs/website_files/remove-nav-buttons.css @@ -0,0 +1,8 @@ +/* Remove the prev, next chapter buttons as they interfere with the + * table of contents. + * Note that the table of contents only appears on desktop, thus we + * only remove the desktop (wide) chapter buttons. + */ +.nav-wide-wrapper { + display: none +} \ No newline at end of file diff --git a/docs/website_files/table-of-contents.css b/docs/website_files/table-of-contents.css new file mode 100644 index 0000000000..d16bb3b988 --- /dev/null +++ b/docs/website_files/table-of-contents.css @@ -0,0 +1,42 @@ +@media only screen and (max-width:1439px) { + .sidetoc { + display: none; + } +} + +@media only screen and (min-width:1440px) { + main { + position: relative; + margin-left: 100px !important; + } + .sidetoc { + margin-left: auto; + margin-right: auto; + left: calc(100% + (var(--content-max-width))/4 - 140px); + position: absolute; + text-align: right; + } + .pagetoc { + position: fixed; + width: 250px; + overflow: auto; + right: 20px; + height: calc(100% - var(--menu-bar-height)); + } + .pagetoc a { + color: var(--fg) !important; + display: block; + padding: 5px 15px 5px 10px; + text-align: left; + text-decoration: none; + } + .pagetoc a:hover, + .pagetoc a.active { + background: var(--sidebar-bg) !important; + color: var(--sidebar-fg) !important; + } + .pagetoc .active { + background: var(--sidebar-bg); + color: var(--sidebar-fg); + } +} diff --git a/docs/website_files/table-of-contents.js b/docs/website_files/table-of-contents.js new file mode 100644 index 0000000000..0de5960b22 --- /dev/null +++ b/docs/website_files/table-of-contents.js @@ -0,0 +1,134 @@ +const getPageToc = () => document.getElementsByClassName('pagetoc')[0]; + +const pageToc = getPageToc(); +const pageTocChildren = [...pageToc.children]; +const headers = [...document.getElementsByClassName('header')]; + + +// Select highlighted item in ToC when clicking an item +pageTocChildren.forEach(child => { + child.addEventHandler('click', () => { + pageTocChildren.forEach(child => { + child.classList.remove('active'); + }); + child.classList.add('active'); + }); +}); + + +/** + * Test whether a node is in the viewport + */ +function isInViewport(node) { + const rect = node.getBoundingClientRect(); + return rect.top >= 0 && rect.left >= 0 && rect.bottom <= (window.innerHeight || document.documentElement.clientHeight) && rect.right <= (window.innerWidth || document.documentElement.clientWidth); +} + + +/** + * Set a new ToC entry. + * Clear any previously highlighted ToC items, set the new one, + * and adjust the ToC scroll position. + */ +function setTocEntry() { + let activeEntry; + const pageTocChildren = [...getPageToc().children]; + + // Calculate which header is the current one at the top of screen + headers.forEach(header => { + if (window.pageYOffset >= header.offsetTop) { + activeEntry = header; + } + }); + + // Update selected item in ToC when scrolling + pageTocChildren.forEach(child => { + if (activeEntry.href.localeCompare(child.href) === 0) { + child.classList.add('active'); + } else { + child.classList.remove('active'); + } + }); + + let tocEntryForLocation = document.querySelector(`nav a[href="${activeEntry.href}"]`); + if (tocEntryForLocation) { + const headingForLocation = document.querySelector(activeEntry.hash); + if (headingForLocation && isInViewport(headingForLocation)) { + // Update ToC scroll + const nav = getPageToc(); + const content = document.querySelector('html'); + if (content.scrollTop !== 0) { + nav.scrollTo({ + top: tocEntryForLocation.offsetTop - 100, + left: 0, + behavior: 'smooth', + }); + } else { + nav.scrollTop = 0; + } + } + } +} + + +/** + * Populate sidebar on load + */ +window.addEventListener('load', () => { + // Only create table of contents if there is more than one header on the page + if (headers.length <= 1) { + return; + } + + // Create an entry in the page table of contents for each header in the document + headers.forEach((header, index) => { + const link = document.createElement('a'); + + // Indent shows hierarchy + let indent = '0px'; + switch (header.parentElement.tagName) { + case 'H1': + indent = '5px'; + break; + case 'H2': + indent = '20px'; + break; + case 'H3': + indent = '30px'; + break; + case 'H4': + indent = '40px'; + break; + case 'H5': + indent = '50px'; + break; + case 'H6': + indent = '60px'; + break; + default: + break; + } + + let tocEntry; + if (index == 0) { + // Create a bolded title for the first element + tocEntry = document.createElement("strong"); + tocEntry.innerHTML = header.text; + } else { + // All other elements are non-bold + tocEntry = document.createTextNode(header.text); + } + link.appendChild(tocEntry); + + link.style.paddingLeft = indent; + link.href = header.href; + pageToc.appendChild(link); + }); + setTocEntry.call(); +}); + + +// Handle active headers on scroll, if there is more than one header on the page +if (headers.length > 1) { + window.addEventListener('scroll', setTocEntry); +} diff --git a/docs/website_files/theme/index.hbs b/docs/website_files/theme/index.hbs new file mode 100644 index 0000000000..3b7a5b6163 --- /dev/null +++ b/docs/website_files/theme/index.hbs @@ -0,0 +1,312 @@ + + + + + + {{ title }} + {{#if is_print }} + + {{/if}} + {{#if base_url}} + + {{/if}} + + + + {{> head}} + + + + + + + {{#if favicon_svg}} + + {{/if}} + {{#if favicon_png}} + + {{/if}} + + + + {{#if print_enable}} + + {{/if}} + + + + {{#if copy_fonts}} + + {{/if}} + + + + + + + + {{#each additional_css}} + + {{/each}} + + {{#if mathjax_support}} + + + {{/if}} + + + + + + + + + + + + + + + + +
+ +
+ {{> header}} + + + + {{#if search_enabled}} + + {{/if}} + + + + +
+
+ +
+ +
+ + {{{ content }}} +
+ + +
+
+ + + +
+ + {{#if livereload}} + + + {{/if}} + + {{#if google_analytics}} + + + {{/if}} + + {{#if playground_line_numbers}} + + {{/if}} + + {{#if playground_copyable}} + + {{/if}} + + {{#if playground_js}} + + + + + + {{/if}} + + {{#if search_js}} + + + + {{/if}} + + + + + + + {{#each additional_js}} + + {{/each}} + + {{#if is_print}} + {{#if mathjax_support}} + + {{else}} + + {{/if}} + {{/if}} + + + \ No newline at end of file diff --git a/docs/welcome_and_overview.md b/docs/welcome_and_overview.md new file mode 100644 index 0000000000..30e75984d1 --- /dev/null +++ b/docs/welcome_and_overview.md @@ -0,0 +1,4 @@ +# Introduction + +Welcome to the documentation repository for Synapse, the reference +[Matrix](https://matrix.org) homeserver implementation. \ No newline at end of file diff --git a/docs/workers.md b/docs/workers.md index c6282165b0..46b5e4b737 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -228,6 +228,9 @@ expressions: ^/_matrix/client/(api/v1|r0|unstable)/joined_groups$ ^/_matrix/client/(api/v1|r0|unstable)/publicised_groups$ ^/_matrix/client/(api/v1|r0|unstable)/publicised_groups/ + ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/event/ + ^/_matrix/client/(api/v1|r0|unstable)/joined_rooms$ + ^/_matrix/client/(api/v1|r0|unstable)/search$ # Registration/login requests ^/_matrix/client/(api/v1|r0|unstable)/login$ diff --git a/mypy.ini b/mypy.ini index 062872020e..1ab9001831 100644 --- a/mypy.ini +++ b/mypy.ini @@ -32,6 +32,7 @@ files = synapse/http/federation/matrix_federation_agent.py, synapse/http/federation/well_known_resolver.py, synapse/http/matrixfederationclient.py, + synapse/http/servlet.py, synapse/http/server.py, synapse/http/site.py, synapse/logging, @@ -130,7 +131,7 @@ ignore_missing_imports = True [mypy-canonicaljson] ignore_missing_imports = True -[mypy-jaeger_client] +[mypy-jaeger_client.*] ignore_missing_imports = True [mypy-jsonschema] diff --git a/scripts-dev/convert_server_keys.py b/scripts-dev/convert_server_keys.py deleted file mode 100644 index d4314a054c..0000000000 --- a/scripts-dev/convert_server_keys.py +++ /dev/null @@ -1,108 +0,0 @@ -import json -import sys -import time - -import psycopg2 -import yaml -from canonicaljson import encode_canonical_json -from signedjson.key import read_signing_keys -from signedjson.sign import sign_json -from unpaddedbase64 import encode_base64 - -db_binary_type = memoryview - - -def select_v1_keys(connection): - cursor = connection.cursor() - cursor.execute("SELECT server_name, key_id, verify_key FROM server_signature_keys") - rows = cursor.fetchall() - cursor.close() - results = {} - for server_name, key_id, verify_key in rows: - results.setdefault(server_name, {})[key_id] = encode_base64(verify_key) - return results - - -def select_v1_certs(connection): - cursor = connection.cursor() - cursor.execute("SELECT server_name, tls_certificate FROM server_tls_certificates") - rows = cursor.fetchall() - cursor.close() - results = {} - for server_name, tls_certificate in rows: - results[server_name] = tls_certificate - return results - - -def select_v2_json(connection): - cursor = connection.cursor() - cursor.execute("SELECT server_name, key_id, key_json FROM server_keys_json") - rows = cursor.fetchall() - cursor.close() - results = {} - for server_name, key_id, key_json in rows: - results.setdefault(server_name, {})[key_id] = json.loads( - str(key_json).decode("utf-8") - ) - return results - - -def convert_v1_to_v2(server_name, valid_until, keys, certificate): - return { - "old_verify_keys": {}, - "server_name": server_name, - "verify_keys": {key_id: {"key": key} for key_id, key in keys.items()}, - "valid_until_ts": valid_until, - } - - -def rows_v2(server, json): - valid_until = json["valid_until_ts"] - key_json = encode_canonical_json(json) - for key_id in json["verify_keys"]: - yield (server, key_id, "-", valid_until, valid_until, db_binary_type(key_json)) - - -def main(): - config = yaml.safe_load(open(sys.argv[1])) - valid_until = int(time.time() / (3600 * 24)) * 1000 * 3600 * 24 - - server_name = config["server_name"] - signing_key = read_signing_keys(open(config["signing_key_path"]))[0] - - database = config["database"] - assert database["name"] == "psycopg2", "Can only convert for postgresql" - args = database["args"] - args.pop("cp_max") - args.pop("cp_min") - connection = psycopg2.connect(**args) - keys = select_v1_keys(connection) - certificates = select_v1_certs(connection) - json = select_v2_json(connection) - - result = {} - for server in keys: - if server not in json: - v2_json = convert_v1_to_v2( - server, valid_until, keys[server], certificates[server] - ) - v2_json = sign_json(v2_json, server_name, signing_key) - result[server] = v2_json - - yaml.safe_dump(result, sys.stdout, default_flow_style=False) - - rows = [row for server, json in result.items() for row in rows_v2(server, json)] - - cursor = connection.cursor() - cursor.executemany( - "INSERT INTO server_keys_json (" - " server_name, key_id, from_server," - " ts_added_ms, ts_valid_until_ms, key_json" - ") VALUES (%s, %s, %s, %s, %s, %s)", - rows, - ) - connection.commit() - - -if __name__ == "__main__": - main() diff --git a/scripts-dev/release.py b/scripts-dev/release.py index 1042fa48bc..fc3df9071c 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -139,7 +139,7 @@ def run(): click.get_current_context().abort() # Switch to the release branch. - release_branch_name = f"release-v{base_version}" + release_branch_name = f"release-v{current_version.major}.{current_version.minor}" release_branch = find_ref(repo, release_branch_name) if release_branch: if release_branch.is_remote(): diff --git a/synapse/__init__.py b/synapse/__init__.py index 445e8a5cad..c3016fc6ed 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.35.1" +__version__ = "1.36.0" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when diff --git a/synapse/api/auth.py b/synapse/api/auth.py index b2e60c6aa7..a67624f230 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -206,11 +206,11 @@ async def get_user_by_req( requester = create_requester(user_id, app_service=app_service) request.requester = user_id + if user_id in self._force_tracing_for_users: + opentracing.set_tag(opentracing.tags.SAMPLING_PRIORITY, 1) opentracing.set_tag("authenticated_entity", user_id) opentracing.set_tag("user_id", user_id) opentracing.set_tag("appservice_id", app_service.id) - if user_id in self._force_tracing_for_users: - opentracing.set_tag(opentracing.tags.SAMPLING_PRIORITY, 1) return requester @@ -259,12 +259,12 @@ async def get_user_by_req( ) request.requester = requester + if user_info.token_owner in self._force_tracing_for_users: + opentracing.set_tag(opentracing.tags.SAMPLING_PRIORITY, 1) opentracing.set_tag("authenticated_entity", user_info.token_owner) opentracing.set_tag("user_id", user_info.user_id) if device_id: opentracing.set_tag("device_id", device_id) - if user_info.token_owner in self._force_tracing_for_users: - opentracing.set_tag(opentracing.tags.SAMPLING_PRIORITY, 1) return requester except KeyError: diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index 2c0ead80f5..1e78a69a64 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -205,6 +205,6 @@ class RoomVersions: RoomVersions.V6, RoomVersions.V7, RoomVersions.MSC2176, + RoomVersions.MSC3083, ) - # Note that we do not include MSC3083 here unless it is enabled in the config. } # type: Dict[str, RoomVersion] diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 59918d789e..1329af2e2b 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -261,13 +261,10 @@ def refresh_certificate(hs): Refresh the TLS certificates that Synapse is using by re-reading them from disk and updating the TLS context factories to use them. """ - if not hs.config.has_tls_listener(): - # attempt to reload the certs for the good of the tls_fingerprints - hs.config.read_certificate_from_disk(require_cert_and_key=False) return - hs.config.read_certificate_from_disk(require_cert_and_key=True) + hs.config.read_certificate_from_disk() hs.tls_server_context_factory = context_factory.ServerContextFactory(hs.config) if hs._listening_services: diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 91ad326f19..57c2fc2e88 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -109,7 +109,7 @@ MonthlyActiveUsersWorkerStore, ) from synapse.storage.databases.main.presence import PresenceStore -from synapse.storage.databases.main.search import SearchWorkerStore +from synapse.storage.databases.main.search import SearchStore from synapse.storage.databases.main.stats import StatsStore from synapse.storage.databases.main.transactions import TransactionWorkerStore from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore @@ -242,7 +242,7 @@ class GenericWorkerSlavedStore( MonthlyActiveUsersWorkerStore, MediaRepositoryStore, ServerMetricsStore, - SearchWorkerStore, + SearchStore, TransactionWorkerStore, BaseSlavedStore, ): diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index ea692f699d..763de0c3c5 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions from synapse.config._base import Config from synapse.types import JsonDict @@ -34,27 +33,5 @@ def read_config(self, config: JsonDict, **kwargs): # MSC2858 (multiple SSO identity providers) self.msc2858_enabled = experimental.get("msc2858_enabled", False) # type: bool - # Spaces (MSC1772, MSC2946, MSC3083, etc) - self.spaces_enabled = experimental.get("spaces_enabled", True) # type: bool - if self.spaces_enabled: - KNOWN_ROOM_VERSIONS[RoomVersions.MSC3083.identifier] = RoomVersions.MSC3083 - # MSC3026 (busy presence state) self.msc3026_enabled = experimental.get("msc3026_enabled", False) # type: bool - - def generate_config_section(self, **kwargs): - return """\ - # Enable experimental features in Synapse. - # - # Experimental features might break or be removed without a deprecation - # period. - # - experimental_features: - # Support for Spaces (MSC1772), it enables the following: - # - # * The Spaces Summary API (MSC2946). - # * Restricting room membership based on space membership (MSC3083). - # - # Uncomment to disable support for Spaces. - #spaces_enabled: false - """ diff --git a/synapse/config/tls.py b/synapse/config/tls.py index 26f1150ca5..0e9bba53c9 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -215,28 +215,12 @@ def is_disk_cert_valid(self, allow_self_signed=True): days_remaining = (expires_on - now).days return days_remaining - def read_certificate_from_disk(self, require_cert_and_key: bool): + def read_certificate_from_disk(self): """ Read the certificates and private key from disk. - - Args: - require_cert_and_key: set to True to throw an error if the certificate - and key file are not given """ - if require_cert_and_key: - self.tls_private_key = self.read_tls_private_key() - self.tls_certificate = self.read_tls_certificate() - elif self.tls_certificate_file: - # we only need the certificate for the tls_fingerprints. Reload it if we - # can, but it's not a fatal error if we can't. - try: - self.tls_certificate = self.read_tls_certificate() - except Exception as e: - logger.info( - "Unable to read TLS certificate (%s). Ignoring as no " - "tls listeners enabled.", - e, - ) + self.tls_private_key = self.read_tls_private_key() + self.tls_certificate = self.read_tls_certificate() def generate_config_section( self, diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 6fc0712978..e5a4685ed4 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -16,8 +16,7 @@ import abc import logging import urllib -from collections import defaultdict -from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Set, Tuple +from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Tuple import attr from signedjson.key import ( @@ -44,17 +43,12 @@ from synapse.config.key import TrustedKeyServer from synapse.events import EventBase from synapse.events.utils import prune_event_dict -from synapse.logging.context import ( - PreserveLoggingContext, - make_deferred_yieldable, - preserve_fn, - run_in_background, -) +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.storage.keys import FetchKeyResult from synapse.types import JsonDict from synapse.util import unwrapFirstError from synapse.util.async_helpers import yieldable_gather_results -from synapse.util.metrics import Measure +from synapse.util.batching_queue import BatchingQueue from synapse.util.retryutils import NotRetryingDestination if TYPE_CHECKING: @@ -80,32 +74,19 @@ class VerifyJsonRequest: minimum_valid_until_ts: time at which we require the signing key to be valid. (0 implies we don't care) - request_name: The name of the request. - key_ids: The set of key_ids to that could be used to verify the JSON object - - key_ready (Deferred[str, str, nacl.signing.VerifyKey]): - A deferred (server_name, key_id, verify_key) tuple that resolves when - a verify key has been fetched. The deferreds' callbacks are run with no - logcontext. - - If we are unable to find a key which satisfies the request, the deferred - errbacks with an M_UNAUTHORIZED SynapseError. """ server_name = attr.ib(type=str) get_json_object = attr.ib(type=Callable[[], JsonDict]) minimum_valid_until_ts = attr.ib(type=int) - request_name = attr.ib(type=str) key_ids = attr.ib(type=List[str]) - key_ready = attr.ib(default=attr.Factory(defer.Deferred), type=defer.Deferred) @staticmethod def from_json_object( server_name: str, json_object: JsonDict, minimum_valid_until_ms: int, - request_name: str, ): """Create a VerifyJsonRequest to verify all signatures on a signed JSON object for the given server. @@ -115,7 +96,6 @@ def from_json_object( server_name, lambda: json_object, minimum_valid_until_ms, - request_name=request_name, key_ids=key_ids, ) @@ -135,16 +115,48 @@ def from_event( # memory than the Event object itself. lambda: prune_event_dict(event.room_version, event.get_pdu_json()), minimum_valid_until_ms, - request_name=event.event_id, key_ids=key_ids, ) + def to_fetch_key_request(self) -> "_FetchKeyRequest": + """Create a key fetch request for all keys needed to satisfy the + verification request. + """ + return _FetchKeyRequest( + server_name=self.server_name, + minimum_valid_until_ts=self.minimum_valid_until_ts, + key_ids=self.key_ids, + ) + class KeyLookupError(ValueError): pass +@attr.s(slots=True) +class _FetchKeyRequest: + """A request for keys for a given server. + + We will continue to try and fetch until we have all the keys listed under + `key_ids` (with an appropriate `valid_until_ts` property) or we run out of + places to fetch keys from. + + Attributes: + server_name: The name of the server that owns the keys. + minimum_valid_until_ts: The timestamp which the keys must be valid until. + key_ids: The IDs of the keys to attempt to fetch + """ + + server_name = attr.ib(type=str) + minimum_valid_until_ts = attr.ib(type=int) + key_ids = attr.ib(type=List[str]) + + class Keyring: + """Handles verifying signed JSON objects and fetching the keys needed to do + so. + """ + def __init__( self, hs: "HomeServer", key_fetchers: "Optional[Iterable[KeyFetcher]]" = None ): @@ -158,22 +170,22 @@ def __init__( ) self._key_fetchers = key_fetchers - # map from server name to Deferred. Has an entry for each server with - # an ongoing key download; the Deferred completes once the download - # completes. - # - # These are regular, logcontext-agnostic Deferreds. - self.key_downloads = {} # type: Dict[str, defer.Deferred] + self._server_queue = BatchingQueue( + "keyring_server", + clock=hs.get_clock(), + process_batch_callback=self._inner_fetch_key_requests, + ) # type: BatchingQueue[_FetchKeyRequest, Dict[str, Dict[str, FetchKeyResult]]] - def verify_json_for_server( + async def verify_json_for_server( self, server_name: str, json_object: JsonDict, validity_time: int, - request_name: str, - ) -> defer.Deferred: + ) -> None: """Verify that a JSON object has been signed by a given server + Completes if the the object was correctly signed, otherwise raises. + Args: server_name: name of the server which must have signed this object @@ -181,392 +193,265 @@ def verify_json_for_server( validity_time: timestamp at which we require the signing key to be valid. (0 implies we don't care) - - request_name: an identifier for this json object (eg, an event id) - for logging. - - Returns: - Deferred[None]: completes if the the object was correctly signed, otherwise - errbacks with an error """ request = VerifyJsonRequest.from_json_object( server_name, json_object, validity_time, - request_name, ) - requests = (request,) - return make_deferred_yieldable(self._verify_objects(requests)[0]) + return await self.process_request(request) def verify_json_objects_for_server( - self, server_and_json: Iterable[Tuple[str, dict, int, str]] + self, server_and_json: Iterable[Tuple[str, dict, int]] ) -> List[defer.Deferred]: """Bulk verifies signatures of json objects, bulk fetching keys as necessary. Args: server_and_json: - Iterable of (server_name, json_object, validity_time, request_name) + Iterable of (server_name, json_object, validity_time) tuples. validity_time is a timestamp at which the signing key must be valid. - request_name is an identifier for this json object (eg, an event id) - for logging. - Returns: List: for each input triplet, a deferred indicating success or failure to verify each json object's signature for the given server_name. The deferreds run their callbacks in the sentinel logcontext. """ - return self._verify_objects( - VerifyJsonRequest.from_json_object( - server_name, json_object, validity_time, request_name + return [ + run_in_background( + self.process_request, + VerifyJsonRequest.from_json_object( + server_name, + json_object, + validity_time, + ), ) - for server_name, json_object, validity_time, request_name in server_and_json - ) + for server_name, json_object, validity_time in server_and_json + ] - def verify_events_for_server( - self, server_and_events: Iterable[Tuple[str, EventBase, int]] - ) -> List[defer.Deferred]: - """Bulk verification of signatures on events. - - Args: - server_and_events: - Iterable of `(server_name, event, validity_time)` tuples. - - `server_name` is which server we are verifying the signature for - on the event. - - `event` is the event that we'll verify the signatures of for - the given `server_name`. - - `validity_time` is a timestamp at which the signing key must be - valid. - - Returns: - List: for each input triplet, a deferred indicating success - or failure to verify each event's signature for the given - server_name. The deferreds run their callbacks in the sentinel - logcontext. - """ - return self._verify_objects( - VerifyJsonRequest.from_event(server_name, event, validity_time) - for server_name, event, validity_time in server_and_events + async def verify_event_for_server( + self, + server_name: str, + event: EventBase, + validity_time: int, + ) -> None: + await self.process_request( + VerifyJsonRequest.from_event( + server_name, + event, + validity_time, + ) ) - def _verify_objects( - self, verify_requests: Iterable[VerifyJsonRequest] - ) -> List[defer.Deferred]: - """Does the work of verify_json_[objects_]for_server - - - Args: - verify_requests: Iterable of verification requests. - - Returns: - List: for each input item, a deferred indicating success - or failure to verify each json object's signature for the given - server_name. The deferreds run their callbacks in the sentinel - logcontext. + async def process_request(self, verify_request: VerifyJsonRequest) -> None: + """Processes the `VerifyJsonRequest`. Raises if the object is not signed + by the server, the signatures don't match or we failed to fetch the + necessary keys. """ - # a list of VerifyJsonRequests which are awaiting a key lookup - key_lookups = [] - handle = preserve_fn(_handle_key_deferred) - - def process(verify_request: VerifyJsonRequest) -> defer.Deferred: - """Process an entry in the request list - - Adds a key request to key_lookups, and returns a deferred which - will complete or fail (in the sentinel context) when verification completes. - """ - if not verify_request.key_ids: - return defer.fail( - SynapseError( - 400, - "Not signed by %s" % (verify_request.server_name,), - Codes.UNAUTHORIZED, - ) - ) - logger.debug( - "Verifying %s for %s with key_ids %s, min_validity %i", - verify_request.request_name, - verify_request.server_name, - verify_request.key_ids, - verify_request.minimum_valid_until_ts, + if not verify_request.key_ids: + raise SynapseError( + 400, + f"Not signed by {verify_request.server_name}", + Codes.UNAUTHORIZED, ) - # add the key request to the queue, but don't start it off yet. - key_lookups.append(verify_request) - - # now run _handle_key_deferred, which will wait for the key request - # to complete and then do the verification. - # - # We want _handle_key_request to log to the right context, so we - # wrap it with preserve_fn (aka run_in_background) - return handle(verify_request) - - results = [process(r) for r in verify_requests] - - if key_lookups: - run_in_background(self._start_key_lookups, key_lookups) - - return results - - async def _start_key_lookups( - self, verify_requests: List[VerifyJsonRequest] - ) -> None: - """Sets off the key fetches for each verify request - - Once each fetch completes, verify_request.key_ready will be resolved. - - Args: - verify_requests: - """ - - try: - # map from server name to a set of outstanding request ids - server_to_request_ids = {} # type: Dict[str, Set[int]] - - for verify_request in verify_requests: - server_name = verify_request.server_name - request_id = id(verify_request) - server_to_request_ids.setdefault(server_name, set()).add(request_id) - - # Wait for any previous lookups to complete before proceeding. - await self.wait_for_previous_lookups(server_to_request_ids.keys()) - - # take out a lock on each of the servers by sticking a Deferred in - # key_downloads - for server_name in server_to_request_ids.keys(): - self.key_downloads[server_name] = defer.Deferred() - logger.debug("Got key lookup lock on %s", server_name) - - # When we've finished fetching all the keys for a given server_name, - # drop the lock by resolving the deferred in key_downloads. - def drop_server_lock(server_name): - d = self.key_downloads.pop(server_name) - d.callback(None) - - def lookup_done(res, verify_request): - server_name = verify_request.server_name - server_requests = server_to_request_ids[server_name] - server_requests.remove(id(verify_request)) - - # if there are no more requests for this server, we can drop the lock. - if not server_requests: - logger.debug("Releasing key lookup lock on %s", server_name) - drop_server_lock(server_name) - - return res + # Add the keys we need to verify to the queue for retrieval. We queue + # up requests for the same server so we don't end up with many in flight + # requests for the same keys. + key_request = verify_request.to_fetch_key_request() + found_keys_by_server = await self._server_queue.add_to_queue( + key_request, key=verify_request.server_name + ) - for verify_request in verify_requests: - verify_request.key_ready.addBoth(lookup_done, verify_request) + # Since we batch up requests the returned set of keys may contain keys + # from other servers, so we pull out only the ones we care about.s + found_keys = found_keys_by_server.get(verify_request.server_name, {}) - # Actually start fetching keys. - self._get_server_verify_keys(verify_requests) - except Exception: - logger.exception("Error starting key lookups") + # Verify each signature we got valid keys for, raising if we can't + # verify any of them. + verified = False + for key_id in verify_request.key_ids: + key_result = found_keys.get(key_id) + if not key_result: + continue - async def wait_for_previous_lookups(self, server_names: Iterable[str]) -> None: - """Waits for any previous key lookups for the given servers to finish. + if key_result.valid_until_ts < verify_request.minimum_valid_until_ts: + continue - Args: - server_names: list of servers which we want to look up + verify_key = key_result.verify_key + json_object = verify_request.get_json_object() + try: + verify_signed_json( + json_object, + verify_request.server_name, + verify_key, + ) + verified = True + except SignatureVerifyException as e: + logger.debug( + "Error verifying signature for %s:%s:%s with key %s: %s", + verify_request.server_name, + verify_key.alg, + verify_key.version, + encode_verify_key_base64(verify_key), + str(e), + ) + raise SynapseError( + 401, + "Invalid signature for server %s with key %s:%s: %s" + % ( + verify_request.server_name, + verify_key.alg, + verify_key.version, + str(e), + ), + Codes.UNAUTHORIZED, + ) - Returns: - Resolves once all key lookups for the given servers have - completed. Follows the synapse rules of logcontext preservation. - """ - loop_count = 1 - while True: - wait_on = [ - (server_name, self.key_downloads[server_name]) - for server_name in server_names - if server_name in self.key_downloads - ] - if not wait_on: - break - logger.info( - "Waiting for existing lookups for %s to complete [loop %i]", - [w[0] for w in wait_on], - loop_count, + if not verified: + raise SynapseError( + 401, + f"Failed to find any key to satisfy: {key_request}", + Codes.UNAUTHORIZED, ) - with PreserveLoggingContext(): - await defer.DeferredList((w[1] for w in wait_on)) - loop_count += 1 + async def _inner_fetch_key_requests( + self, requests: List[_FetchKeyRequest] + ) -> Dict[str, Dict[str, FetchKeyResult]]: + """Processing function for the queue of `_FetchKeyRequest`.""" + + logger.debug("Starting fetch for %s", requests) + + # First we need to deduplicate requests for the same key. We do this by + # taking the *maximum* requested `minimum_valid_until_ts` for each pair + # of server name/key ID. + server_to_key_to_ts = {} # type: Dict[str, Dict[str, int]] + for request in requests: + by_server = server_to_key_to_ts.setdefault(request.server_name, {}) + for key_id in request.key_ids: + existing_ts = by_server.get(key_id, 0) + by_server[key_id] = max(request.minimum_valid_until_ts, existing_ts) + + deduped_requests = [ + _FetchKeyRequest(server_name, minimum_valid_ts, [key_id]) + for server_name, by_server in server_to_key_to_ts.items() + for key_id, minimum_valid_ts in by_server.items() + ] + + logger.debug("Deduplicated key requests to %s", deduped_requests) + + # For each key we call `_inner_verify_request` which will handle + # fetching each key. Note these shouldn't throw if we fail to contact + # other servers etc. + results_per_request = await yieldable_gather_results( + self._inner_fetch_key_request, + deduped_requests, + ) - def _get_server_verify_keys(self, verify_requests: List[VerifyJsonRequest]) -> None: - """Tries to find at least one key for each verify request + # We now convert the returned list of results into a map from server + # name to key ID to FetchKeyResult, to return. + to_return = {} # type: Dict[str, Dict[str, FetchKeyResult]] + for (request, results) in zip(deduped_requests, results_per_request): + to_return_by_server = to_return.setdefault(request.server_name, {}) + for key_id, key_result in results.items(): + existing = to_return_by_server.get(key_id) + if not existing or existing.valid_until_ts < key_result.valid_until_ts: + to_return_by_server[key_id] = key_result - For each verify_request, verify_request.key_ready is called back with - params (server_name, key_id, VerifyKey) if a key is found, or errbacked - with a SynapseError if none of the keys are found. + return to_return - Args: - verify_requests: list of verify requests + async def _inner_fetch_key_request( + self, verify_request: _FetchKeyRequest + ) -> Dict[str, FetchKeyResult]: + """Attempt to fetch the given key by calling each key fetcher one by + one. """ + logger.debug("Starting fetch for %s", verify_request) - remaining_requests = {rq for rq in verify_requests if not rq.key_ready.called} + found_keys: Dict[str, FetchKeyResult] = {} + missing_key_ids = set(verify_request.key_ids) - async def do_iterations(): - try: - with Measure(self.clock, "get_server_verify_keys"): - for f in self._key_fetchers: - if not remaining_requests: - return - await self._attempt_key_fetches_with_fetcher( - f, remaining_requests - ) - - # look for any requests which weren't satisfied - while remaining_requests: - verify_request = remaining_requests.pop() - rq_str = ( - "VerifyJsonRequest(server=%s, key_ids=%s, min_valid=%i)" - % ( - verify_request.server_name, - verify_request.key_ids, - verify_request.minimum_valid_until_ts, - ) - ) - - # If we run the errback immediately, it may cancel our - # loggingcontext while we are still in it, so instead we - # schedule it for the next time round the reactor. - # - # (this also ensures that we don't get a stack overflow if we - # has a massive queue of lookups waiting for this server). - self.clock.call_later( - 0, - verify_request.key_ready.errback, - SynapseError( - 401, - "Failed to find any key to satisfy %s" % (rq_str,), - Codes.UNAUTHORIZED, - ), - ) - except Exception as err: - # we don't really expect to get here, because any errors should already - # have been caught and logged. But if we do, let's log the error and make - # sure that all of the deferreds are resolved. - logger.error("Unexpected error in _get_server_verify_keys: %s", err) - with PreserveLoggingContext(): - for verify_request in remaining_requests: - if not verify_request.key_ready.called: - verify_request.key_ready.errback(err) - - run_in_background(do_iterations) - - async def _attempt_key_fetches_with_fetcher( - self, fetcher: "KeyFetcher", remaining_requests: Set[VerifyJsonRequest] - ): - """Use a key fetcher to attempt to satisfy some key requests + for fetcher in self._key_fetchers: + if not missing_key_ids: + break - Args: - fetcher: fetcher to use to fetch the keys - remaining_requests: outstanding key requests. - Any successfully-completed requests will be removed from the list. - """ - # The keys to fetch. - # server_name -> key_id -> min_valid_ts - missing_keys = defaultdict(dict) # type: Dict[str, Dict[str, int]] - - for verify_request in remaining_requests: - # any completed requests should already have been removed - assert not verify_request.key_ready.called - keys_for_server = missing_keys[verify_request.server_name] - - for key_id in verify_request.key_ids: - # If we have several requests for the same key, then we only need to - # request that key once, but we should do so with the greatest - # min_valid_until_ts of the requests, so that we can satisfy all of - # the requests. - keys_for_server[key_id] = max( - keys_for_server.get(key_id, -1), - verify_request.minimum_valid_until_ts, - ) + logger.debug("Getting keys from %s for %s", fetcher, verify_request) + keys = await fetcher.get_keys( + verify_request.server_name, + list(missing_key_ids), + verify_request.minimum_valid_until_ts, + ) - results = await fetcher.get_keys(missing_keys) + for key_id, key in keys.items(): + if not key: + continue - completed = [] - for verify_request in remaining_requests: - server_name = verify_request.server_name + # If we already have a result for the given key ID we keep the + # one with the highest `valid_until_ts`. + existing_key = found_keys.get(key_id) + if existing_key: + if key.valid_until_ts <= existing_key.valid_until_ts: + continue - # see if any of the keys we got this time are sufficient to - # complete this VerifyJsonRequest. - result_keys = results.get(server_name, {}) - for key_id in verify_request.key_ids: - fetch_key_result = result_keys.get(key_id) - if not fetch_key_result: - # we didn't get a result for this key - continue + # We always store the returned key even if it doesn't the + # `minimum_valid_until_ts` requirement, as some verification + # requests may still be able to be satisfied by it. + # + # We still keep looking for the key from other fetchers in that + # case though. + found_keys[key_id] = key - if ( - fetch_key_result.valid_until_ts - < verify_request.minimum_valid_until_ts - ): - # key was not valid at this point + if key.valid_until_ts < verify_request.minimum_valid_until_ts: continue - # we have a valid key for this request. If we run the callback - # immediately, it may cancel our loggingcontext while we are still in - # it, so instead we schedule it for the next time round the reactor. - # - # (this also ensures that we don't get a stack overflow if we had - # a massive queue of lookups waiting for this server). - logger.debug( - "Found key %s:%s for %s", - server_name, - key_id, - verify_request.request_name, - ) - self.clock.call_later( - 0, - verify_request.key_ready.callback, - (server_name, key_id, fetch_key_result.verify_key), - ) - completed.append(verify_request) - break + missing_key_ids.discard(key_id) - remaining_requests.difference_update(completed) + return found_keys class KeyFetcher(metaclass=abc.ABCMeta): - @abc.abstractmethod + def __init__(self, hs: "HomeServer"): + self._queue = BatchingQueue( + self.__class__.__name__, hs.get_clock(), self._fetch_keys + ) + async def get_keys( - self, keys_to_fetch: Dict[str, Dict[str, int]] - ) -> Dict[str, Dict[str, FetchKeyResult]]: - """ - Args: - keys_to_fetch: - the keys to be fetched. server_name -> key_id -> min_valid_ts + self, server_name: str, key_ids: List[str], minimum_valid_until_ts: int + ) -> Dict[str, FetchKeyResult]: + results = await self._queue.add_to_queue( + _FetchKeyRequest( + server_name=server_name, + key_ids=key_ids, + minimum_valid_until_ts=minimum_valid_until_ts, + ) + ) + return results.get(server_name, {}) - Returns: - Map from server_name -> key_id -> FetchKeyResult - """ - raise NotImplementedError + @abc.abstractmethod + async def _fetch_keys( + self, keys_to_fetch: List[_FetchKeyRequest] + ) -> Dict[str, Dict[str, FetchKeyResult]]: + pass class StoreKeyFetcher(KeyFetcher): """KeyFetcher impl which fetches keys from our data store""" def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + super().__init__(hs) - async def get_keys( - self, keys_to_fetch: Dict[str, Dict[str, int]] - ) -> Dict[str, Dict[str, FetchKeyResult]]: - """see KeyFetcher.get_keys""" + self.store = hs.get_datastore() + async def _fetch_keys(self, keys_to_fetch: List[_FetchKeyRequest]): key_ids_to_fetch = ( - (server_name, key_id) - for server_name, keys_for_server in keys_to_fetch.items() - for key_id in keys_for_server.keys() + (queue_value.server_name, key_id) + for queue_value in keys_to_fetch + for key_id in queue_value.key_ids ) res = await self.store.get_server_verify_keys(key_ids_to_fetch) @@ -578,6 +463,8 @@ async def get_keys( class BaseV2KeyFetcher(KeyFetcher): def __init__(self, hs: "HomeServer"): + super().__init__(hs) + self.store = hs.get_datastore() self.config = hs.config @@ -685,10 +572,10 @@ def __init__(self, hs: "HomeServer"): self.client = hs.get_federation_http_client() self.key_servers = self.config.key_servers - async def get_keys( - self, keys_to_fetch: Dict[str, Dict[str, int]] + async def _fetch_keys( + self, keys_to_fetch: List[_FetchKeyRequest] ) -> Dict[str, Dict[str, FetchKeyResult]]: - """see KeyFetcher.get_keys""" + """see KeyFetcher._fetch_keys""" async def get_key(key_server: TrustedKeyServer) -> Dict: try: @@ -724,12 +611,12 @@ async def get_key(key_server: TrustedKeyServer) -> Dict: return union_of_keys async def get_server_verify_key_v2_indirect( - self, keys_to_fetch: Dict[str, Dict[str, int]], key_server: TrustedKeyServer + self, keys_to_fetch: List[_FetchKeyRequest], key_server: TrustedKeyServer ) -> Dict[str, Dict[str, FetchKeyResult]]: """ Args: keys_to_fetch: - the keys to be fetched. server_name -> key_id -> min_valid_ts + the keys to be fetched. key_server: notary server to query for the keys @@ -743,7 +630,7 @@ async def get_server_verify_key_v2_indirect( perspective_name = key_server.server_name logger.info( "Requesting keys %s from notary server %s", - keys_to_fetch.items(), + keys_to_fetch, perspective_name, ) @@ -753,11 +640,13 @@ async def get_server_verify_key_v2_indirect( path="/_matrix/key/v2/query", data={ "server_keys": { - server_name: { - key_id: {"minimum_valid_until_ts": min_valid_ts} - for key_id, min_valid_ts in server_keys.items() + queue_value.server_name: { + key_id: { + "minimum_valid_until_ts": queue_value.minimum_valid_until_ts, + } + for key_id in queue_value.key_ids } - for server_name, server_keys in keys_to_fetch.items() + for queue_value in keys_to_fetch } }, ) @@ -858,7 +747,20 @@ def __init__(self, hs: "HomeServer"): self.client = hs.get_federation_http_client() async def get_keys( - self, keys_to_fetch: Dict[str, Dict[str, int]] + self, server_name: str, key_ids: List[str], minimum_valid_until_ts: int + ) -> Dict[str, FetchKeyResult]: + results = await self._queue.add_to_queue( + _FetchKeyRequest( + server_name=server_name, + key_ids=key_ids, + minimum_valid_until_ts=minimum_valid_until_ts, + ), + key=server_name, + ) + return results.get(server_name, {}) + + async def _fetch_keys( + self, keys_to_fetch: List[_FetchKeyRequest] ) -> Dict[str, Dict[str, FetchKeyResult]]: """ Args: @@ -871,8 +773,10 @@ async def get_keys( results = {} - async def get_key(key_to_fetch_item: Tuple[str, Dict[str, int]]) -> None: - server_name, key_ids = key_to_fetch_item + async def get_key(key_to_fetch_item: _FetchKeyRequest) -> None: + server_name = key_to_fetch_item.server_name + key_ids = key_to_fetch_item.key_ids + try: keys = await self.get_server_verify_key_v2_direct(server_name, key_ids) results[server_name] = keys @@ -883,7 +787,7 @@ async def get_key(key_to_fetch_item: Tuple[str, Dict[str, int]]) -> None: except Exception: logger.exception("Error getting keys %s from %s", key_ids, server_name) - await yieldable_gather_results(get_key, keys_to_fetch.items()) + await yieldable_gather_results(get_key, keys_to_fetch) return results async def get_server_verify_key_v2_direct( @@ -955,37 +859,3 @@ async def get_server_verify_key_v2_direct( keys.update(response_keys) return keys - - -async def _handle_key_deferred(verify_request: VerifyJsonRequest) -> None: - """Waits for the key to become available, and then performs a verification - - Args: - verify_request: - - Raises: - SynapseError if there was a problem performing the verification - """ - server_name = verify_request.server_name - with PreserveLoggingContext(): - _, key_id, verify_key = await verify_request.key_ready - - json_object = verify_request.get_json_object() - - try: - verify_signed_json(json_object, server_name, verify_key) - except SignatureVerifyException as e: - logger.debug( - "Error verifying signature for %s:%s:%s with key %s: %s", - server_name, - verify_key.alg, - verify_key.version, - encode_verify_key_base64(verify_key), - str(e), - ) - raise SynapseError( - 401, - "Invalid signature for server %s with key %s:%s: %s" - % (server_name, verify_key.alg, verify_key.version, str(e)), - Codes.UNAUTHORIZED, - ) diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 3fe496dcd3..c066617b92 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -14,11 +14,6 @@ # limitations under the License. import logging from collections import namedtuple -from typing import Iterable, List - -from twisted.internet import defer -from twisted.internet.defer import Deferred, DeferredList -from twisted.python.failure import Failure from synapse.api.constants import MAX_DEPTH, EventTypes, Membership from synapse.api.errors import Codes, SynapseError @@ -28,11 +23,6 @@ from synapse.events import EventBase, make_event_from_dict from synapse.events.utils import prune_event, validate_canonicaljson from synapse.http.servlet import assert_params_in_dict -from synapse.logging.context import ( - PreserveLoggingContext, - current_context, - make_deferred_yieldable, -) from synapse.types import JsonDict, get_domain_from_id logger = logging.getLogger(__name__) @@ -48,112 +38,82 @@ def __init__(self, hs): self.store = hs.get_datastore() self._clock = hs.get_clock() - def _check_sigs_and_hash( + async def _check_sigs_and_hash( self, room_version: RoomVersion, pdu: EventBase - ) -> Deferred: - return make_deferred_yieldable( - self._check_sigs_and_hashes(room_version, [pdu])[0] - ) - - def _check_sigs_and_hashes( - self, room_version: RoomVersion, pdus: List[EventBase] - ) -> List[Deferred]: - """Checks that each of the received events is correctly signed by the - sending server. + ) -> EventBase: + """Checks that event is correctly signed by the sending server. Args: - room_version: The room version of the PDUs - pdus: the events to be checked + room_version: The room version of the PDU + pdu: the event to be checked Returns: - For each input event, a deferred which: - * returns the original event if the checks pass - * returns a redacted version of the event (if the signature + * the original event if the checks pass + * a redacted version of the event (if the signature matched but the hash did not) - * throws a SynapseError if the signature check failed. - The deferreds run their callbacks in the sentinel - """ - deferreds = _check_sigs_on_pdus(self.keyring, room_version, pdus) - - ctx = current_context() - - @defer.inlineCallbacks - def callback(_, pdu: EventBase): - with PreserveLoggingContext(ctx): - if not check_event_content_hash(pdu): - # let's try to distinguish between failures because the event was - # redacted (which are somewhat expected) vs actual ball-tampering - # incidents. - # - # This is just a heuristic, so we just assume that if the keys are - # about the same between the redacted and received events, then the - # received event was probably a redacted copy (but we then use our - # *actual* redacted copy to be on the safe side.) - redacted_event = prune_event(pdu) - if set(redacted_event.keys()) == set(pdu.keys()) and set( - redacted_event.content.keys() - ) == set(pdu.content.keys()): - logger.info( - "Event %s seems to have been redacted; using our redacted " - "copy", - pdu.event_id, - ) - else: - logger.warning( - "Event %s content has been tampered, redacting", - pdu.event_id, - ) - return redacted_event - - result = yield defer.ensureDeferred( - self.spam_checker.check_event_for_spam(pdu) + * throws a SynapseError if the signature check failed.""" + try: + await _check_sigs_on_pdu(self.keyring, room_version, pdu) + except SynapseError as e: + logger.warning( + "Signature check failed for %s: %s", + pdu.event_id, + e, + ) + raise + + if not check_event_content_hash(pdu): + # let's try to distinguish between failures because the event was + # redacted (which are somewhat expected) vs actual ball-tampering + # incidents. + # + # This is just a heuristic, so we just assume that if the keys are + # about the same between the redacted and received events, then the + # received event was probably a redacted copy (but we then use our + # *actual* redacted copy to be on the safe side.) + redacted_event = prune_event(pdu) + if set(redacted_event.keys()) == set(pdu.keys()) and set( + redacted_event.content.keys() + ) == set(pdu.content.keys()): + logger.info( + "Event %s seems to have been redacted; using our redacted copy", + pdu.event_id, ) - - if result: - logger.warning( - "Event contains spam, redacting %s: %s", - pdu.event_id, - pdu.get_pdu_json(), - ) - return prune_event(pdu) - - return pdu - - def errback(failure: Failure, pdu: EventBase): - failure.trap(SynapseError) - with PreserveLoggingContext(ctx): + else: logger.warning( - "Signature check failed for %s: %s", + "Event %s content has been tampered, redacting", pdu.event_id, - failure.getErrorMessage(), ) - return failure + return redacted_event - for deferred, pdu in zip(deferreds, pdus): - deferred.addCallbacks( - callback, errback, callbackArgs=[pdu], errbackArgs=[pdu] + result = await self.spam_checker.check_event_for_spam(pdu) + + if result: + logger.warning( + "Event contains spam, redacting %s: %s", + pdu.event_id, + pdu.get_pdu_json(), ) + return prune_event(pdu) - return deferreds + return pdu class PduToCheckSig(namedtuple("PduToCheckSig", ["pdu", "sender_domain", "deferreds"])): pass -def _check_sigs_on_pdus( - keyring: Keyring, room_version: RoomVersion, pdus: Iterable[EventBase] -) -> List[Deferred]: +async def _check_sigs_on_pdu( + keyring: Keyring, room_version: RoomVersion, pdu: EventBase +) -> None: """Check that the given events are correctly signed + Raise a SynapseError if the event wasn't correctly signed. + Args: keyring: keyring object to do the checks room_version: the room version of the PDUs pdus: the events to be checked - - Returns: - A Deferred for each event in pdus, which will either succeed if - the signatures are valid, or fail (with a SynapseError) if not. """ # we want to check that the event is signed by: @@ -177,90 +137,47 @@ def _check_sigs_on_pdus( # let's start by getting the domain for each pdu, and flattening the event back # to JSON. - pdus_to_check = [ - PduToCheckSig( - pdu=p, - sender_domain=get_domain_from_id(p.sender), - deferreds=[], - ) - for p in pdus - ] - # First we check that the sender event is signed by the sender's domain # (except if its a 3pid invite, in which case it may be sent by any server) - pdus_to_check_sender = [p for p in pdus_to_check if not _is_invite_via_3pid(p.pdu)] - - more_deferreds = keyring.verify_events_for_server( - [ - ( - p.sender_domain, - p.pdu, - p.pdu.origin_server_ts if room_version.enforce_key_validity else 0, + if not _is_invite_via_3pid(pdu): + try: + await keyring.verify_event_for_server( + get_domain_from_id(pdu.sender), + pdu, + pdu.origin_server_ts if room_version.enforce_key_validity else 0, ) - for p in pdus_to_check_sender - ] - ) - - def sender_err(e, pdu_to_check): - errmsg = "event id %s: unable to verify signature for sender %s: %s" % ( - pdu_to_check.pdu.event_id, - pdu_to_check.sender_domain, - e.getErrorMessage(), - ) - raise SynapseError(403, errmsg, Codes.FORBIDDEN) - - for p, d in zip(pdus_to_check_sender, more_deferreds): - d.addErrback(sender_err, p) - p.deferreds.append(d) + except Exception as e: + errmsg = "event id %s: unable to verify signature for sender %s: %s" % ( + pdu.event_id, + get_domain_from_id(pdu.sender), + e, + ) + raise SynapseError(403, errmsg, Codes.FORBIDDEN) # now let's look for events where the sender's domain is different to the # event id's domain (normally only the case for joins/leaves), and add additional # checks. Only do this if the room version has a concept of event ID domain # (ie, the room version uses old-style non-hash event IDs). - if room_version.event_format == EventFormatVersions.V1: - pdus_to_check_event_id = [ - p - for p in pdus_to_check - if p.sender_domain != get_domain_from_id(p.pdu.event_id) - ] - - more_deferreds = keyring.verify_events_for_server( - [ - ( - get_domain_from_id(p.pdu.event_id), - p.pdu, - p.pdu.origin_server_ts if room_version.enforce_key_validity else 0, - ) - for p in pdus_to_check_event_id - ] - ) - - def event_err(e, pdu_to_check): + if room_version.event_format == EventFormatVersions.V1 and get_domain_from_id( + pdu.event_id + ) != get_domain_from_id(pdu.sender): + try: + await keyring.verify_event_for_server( + get_domain_from_id(pdu.event_id), + pdu, + pdu.origin_server_ts if room_version.enforce_key_validity else 0, + ) + except Exception as e: errmsg = ( - "event id %s: unable to verify signature for event id domain: %s" - % (pdu_to_check.pdu.event_id, e.getErrorMessage()) + "event id %s: unable to verify signature for event id domain %s: %s" + % ( + pdu.event_id, + get_domain_from_id(pdu.event_id), + e, + ) ) raise SynapseError(403, errmsg, Codes.FORBIDDEN) - for p, d in zip(pdus_to_check_event_id, more_deferreds): - d.addErrback(event_err, p) - p.deferreds.append(d) - - # replace lists of deferreds with single Deferreds - return [_flatten_deferred_list(p.deferreds) for p in pdus_to_check] - - -def _flatten_deferred_list(deferreds: List[Deferred]) -> Deferred: - """Given a list of deferreds, either return the single deferred, - combine into a DeferredList, or return an already resolved deferred. - """ - if len(deferreds) > 1: - return DeferredList(deferreds, fireOnOneErrback=True, consumeErrors=True) - elif len(deferreds) == 1: - return deferreds[0] - else: - return defer.succeed(None) - def _is_invite_via_3pid(event: EventBase) -> bool: return ( diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 3feb60da2a..35b28b3ed2 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -23,6 +23,7 @@ Any, Awaitable, Callable, + Collection, Dict, Iterable, List, @@ -37,9 +38,6 @@ import attr from prometheus_client import Counter -from twisted.internet import defer -from twisted.internet.defer import Deferred - from synapse.api.constants import EventTypes, Membership from synapse.api.errors import ( CodeMessageException, @@ -58,10 +56,9 @@ from synapse.events import EventBase, builder from synapse.federation.federation_base import FederationBase, event_from_pdu_json from synapse.federation.transport.client import SendJoinResponse -from synapse.logging.context import make_deferred_yieldable, preserve_fn from synapse.logging.utils import log_function from synapse.types import JsonDict, get_domain_from_id -from synapse.util import unwrapFirstError +from synapse.util.async_helpers import concurrently_execute from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.retryutils import NotRetryingDestination @@ -362,10 +359,9 @@ async def get_room_state_ids( async def _check_sigs_and_hash_and_fetch( self, origin: str, - pdus: List[EventBase], + pdus: Collection[EventBase], room_version: RoomVersion, outlier: bool = False, - include_none: bool = False, ) -> List[EventBase]: """Takes a list of PDUs and checks the signatures and hashes of each one. If a PDU fails its signature check then we check if we have it in @@ -382,57 +378,87 @@ async def _check_sigs_and_hash_and_fetch( pdu room_version outlier: Whether the events are outliers or not - include_none: Whether to include None in the returned list - for events that have failed their checks Returns: A list of PDUs that have valid signatures and hashes. """ - deferreds = self._check_sigs_and_hashes(room_version, pdus) - async def handle_check_result(pdu: EventBase, deferred: Deferred): - try: - res = await make_deferred_yieldable(deferred) - except SynapseError: - res = None + # We limit how many PDUs we check at once, as if we try to do hundreds + # of thousands of PDUs at once we see large memory spikes. - if not res: - # Check local db. - res = await self.store.get_event( - pdu.event_id, allow_rejected=True, allow_none=True - ) + valid_pdus = [] - pdu_origin = get_domain_from_id(pdu.sender) - if not res and pdu_origin != origin: - try: - res = await self.get_pdu( - destinations=[pdu_origin], - event_id=pdu.event_id, - room_version=room_version, - outlier=outlier, - timeout=10000, - ) - except SynapseError: - pass + async def _execute(pdu: EventBase) -> None: + valid_pdu = await self._check_sigs_and_hash_and_fetch_one( + pdu=pdu, + origin=origin, + outlier=outlier, + room_version=room_version, + ) - if not res: - logger.warning( - "Failed to find copy of %s with valid signature", pdu.event_id - ) + if valid_pdu: + valid_pdus.append(valid_pdu) - return res + await concurrently_execute(_execute, pdus, 10000) - handle = preserve_fn(handle_check_result) - deferreds2 = [handle(pdu, deferred) for pdu, deferred in zip(pdus, deferreds)] + return valid_pdus - valid_pdus = await make_deferred_yieldable( - defer.gatherResults(deferreds2, consumeErrors=True) - ).addErrback(unwrapFirstError) + async def _check_sigs_and_hash_and_fetch_one( + self, + pdu: EventBase, + origin: str, + room_version: RoomVersion, + outlier: bool = False, + ) -> Optional[EventBase]: + """Takes a PDU and checks its signatures and hashes. If the PDU fails + its signature check then we check if we have it in the database and if + not then request if from the originating server of that PDU. - if include_none: - return valid_pdus - else: - return [p for p in valid_pdus if p] + If then PDU fails its content hash check then it is redacted. + + Args: + origin + pdu + room_version + outlier: Whether the events are outliers or not + include_none: Whether to include None in the returned list + for events that have failed their checks + + Returns: + The PDU (possibly redacted) if it has valid signatures and hashes. + """ + + res = None + try: + res = await self._check_sigs_and_hash(room_version, pdu) + except SynapseError: + pass + + if not res: + # Check local db. + res = await self.store.get_event( + pdu.event_id, allow_rejected=True, allow_none=True + ) + + pdu_origin = get_domain_from_id(pdu.sender) + if not res and pdu_origin != origin: + try: + res = await self.get_pdu( + destinations=[pdu_origin], + event_id=pdu.event_id, + room_version=room_version, + outlier=outlier, + timeout=10000, + ) + except SynapseError: + pass + + if not res: + logger.warning( + "Failed to find copy of %s with valid signature", pdu.event_id + ) + + return res async def get_event_auth( self, destination: str, room_id: str, event_id: str @@ -673,8 +699,6 @@ async def send_request(destination) -> Dict[str, Any]: state = response.state auth_chain = response.auth_events - pdus = {p.event_id: p for p in itertools.chain(state, auth_chain)} - create_event = None for e in state: if (e.type, e.state_key) == (EventTypes.Create, ""): @@ -698,14 +722,29 @@ async def send_request(destination) -> Dict[str, Any]: % (create_room_version,) ) - valid_pdus = await self._check_sigs_and_hash_and_fetch( - destination, - list(pdus.values()), - outlier=True, - room_version=room_version, + logger.info( + "Processing from send_join %d events", len(state) + len(auth_chain) ) - valid_pdus_map = {p.event_id: p for p in valid_pdus} + # We now go and check the signatures and hashes for the event. Note + # that we limit how many events we process at a time to keep the + # memory overhead from exploding. + valid_pdus_map: Dict[str, EventBase] = {} + + async def _execute(pdu: EventBase) -> None: + valid_pdu = await self._check_sigs_and_hash_and_fetch_one( + pdu=pdu, + origin=destination, + outlier=True, + room_version=room_version, + ) + + if valid_pdu: + valid_pdus_map[valid_pdu.event_id] = valid_pdu + + await concurrently_execute( + _execute, itertools.chain(state, auth_chain), 10000 + ) # NB: We *need* to copy to ensure that we don't have multiple # references being passed on, as that causes... issues. diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 086d999d98..f1e659571a 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -39,6 +39,7 @@ ) from synapse.logging.context import run_in_background from synapse.logging.opentracing import ( + SynapseTags, start_active_span, start_active_span_from_request, tags, @@ -153,7 +154,9 @@ async def authenticate_request(self, request, content): ) await self.keyring.verify_json_for_server( - origin, json_request, now, "Incoming request" + origin, + json_request, + now, ) logger.debug("Request from %s", origin) @@ -316,7 +319,7 @@ async def new_func(request, *args, **kwargs): raise request_tags = { - "request_id": request.get_request_id(), + SynapseTags.REQUEST_ID: request.get_request_id(), tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER, tags.HTTP_METHOD: request.get_method(), tags.HTTP_URL: request.get_redacted_uri(), @@ -1646,13 +1649,12 @@ def register_servlets( server_name=hs.hostname, ).register(resource) - if hs.config.experimental.spaces_enabled: - FederationSpaceSummaryServlet( - handler=hs.get_space_summary_handler(), - authenticator=authenticator, - ratelimiter=ratelimiter, - server_name=hs.hostname, - ).register(resource) + FederationSpaceSummaryServlet( + handler=hs.get_space_summary_handler(), + authenticator=authenticator, + ratelimiter=ratelimiter, + server_name=hs.hostname, + ).register(resource) if "openid" in servlet_groups: for servletclass in OPENID_SERVLET_CLASSES: diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index d2fc8be5f5..ff8372c4e9 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -108,7 +108,9 @@ async def verify_attestation( assert server_name is not None await self.keyring.verify_json_for_server( - server_name, attestation, now, "Group attestation" + server_name, + attestation, + now, ) def create_attestation(self, group_id: str, user_id: str) -> JsonDict: diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 177310f0be..862638cc4f 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -87,7 +87,8 @@ async def _notify_interested_services(self, max_token: RoomStreamToken): self.is_processing = True try: limit = 100 - while True: + upper_bound = -1 + while upper_bound < self.current_max: ( upper_bound, events, @@ -95,9 +96,6 @@ async def _notify_interested_services(self, max_token: RoomStreamToken): self.current_max, limit ) - if not events: - break - events_by_room = {} # type: Dict[str, List[EventBase]] for event in events: events_by_room.setdefault(event.room_id, []).append(event) @@ -153,9 +151,6 @@ async def handle_room_events(events): await self.store.set_appservice_last_pos(upper_bound) - now = self.clock.time_msec() - ts = await self.store.get_received_ts(events[-1].event_id) - synapse.metrics.event_processing_positions.labels( "appservice_sender" ).set(upper_bound) @@ -168,12 +163,16 @@ async def handle_room_events(events): event_processing_loop_counter.labels("appservice_sender").inc() - synapse.metrics.event_processing_lag.labels( - "appservice_sender" - ).set(now - ts) - synapse.metrics.event_processing_last_ts.labels( - "appservice_sender" - ).set(ts) + if events: + now = self.clock.time_msec() + ts = await self.store.get_received_ts(events[-1].event_id) + + synapse.metrics.event_processing_lag.labels( + "appservice_sender" + ).set(now - ts) + synapse.metrics.event_processing_last_ts.labels( + "appservice_sender" + ).set(ts) finally: self.is_processing = False diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 36652289a4..15a1596027 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -23,6 +23,7 @@ from http import HTTPStatus from typing import ( TYPE_CHECKING, + Collection, Dict, Iterable, List, @@ -179,6 +180,8 @@ def __init__(self, hs: "HomeServer"): self.room_queues = {} # type: Dict[str, List[Tuple[EventBase, str]]] self._room_pdu_linearizer = Linearizer("fed_room_pdu") + self._room_backfill = Linearizer("room_backfill") + self.third_party_event_rules = hs.get_third_party_event_rules() self._ephemeral_messages_enabled = hs.config.enable_ephemeral_messages @@ -579,7 +582,9 @@ async def _get_state_for_room( # Fetch the state events from the DB, and check we have the auth events. event_map = await self.store.get_events(state_event_ids, allow_rejected=True) - auth_events_in_store = await self.store.have_seen_events(auth_event_ids) + auth_events_in_store = await self.store.have_seen_events( + room_id, auth_event_ids + ) # Check for missing events. We handle state and auth event seperately, # as we want to pull the state from the DB, but we don't for the auth @@ -612,7 +617,7 @@ async def _get_state_for_room( if missing_auth_events: auth_events_in_store = await self.store.have_seen_events( - missing_auth_events + room_id, missing_auth_events ) missing_auth_events.difference_update(auth_events_in_store) @@ -712,7 +717,7 @@ async def _get_state_after_missing_prev_event( missing_auth_events = set(auth_event_ids) - fetched_events.keys() missing_auth_events.difference_update( - await self.store.have_seen_events(missing_auth_events) + await self.store.have_seen_events(room_id, missing_auth_events) ) logger.debug("We are also missing %i auth events", len(missing_auth_events)) @@ -1041,6 +1046,12 @@ async def maybe_backfill( return. This is used as part of the heuristic to decide if we should back paginate. """ + with (await self._room_backfill.queue(room_id)): + return await self._maybe_backfill_inner(room_id, current_depth, limit) + + async def _maybe_backfill_inner( + self, room_id: str, current_depth: int, limit: int + ) -> bool: extremities = await self.store.get_oldest_events_with_depth_in_room(room_id) if not extremities: @@ -1356,11 +1367,12 @@ async def get_event(event_id: str): event_infos.append(_NewEventInfo(event, None, auth)) - await self._auth_and_persist_events( - destination, - room_id, - event_infos, - ) + if event_infos: + await self._auth_and_persist_events( + destination, + room_id, + event_infos, + ) def _sanity_check_event(self, ev: EventBase) -> None: """ @@ -2261,7 +2273,7 @@ async def _auth_and_persist_events( self, origin: str, room_id: str, - event_infos: Iterable[_NewEventInfo], + event_infos: Collection[_NewEventInfo], backfilled: bool = False, ) -> None: """Creates the appropriate contexts and persists events. The events @@ -2272,6 +2284,9 @@ async def _auth_and_persist_events( Notifies about the events where appropriate. """ + if not event_infos: + return + async def prep(ev_info: _NewEventInfo): event = ev_info.event with nested_logging_context(suffix=event.event_id): @@ -2400,13 +2415,14 @@ async def _persist_auth_tree( raise events_to_context[e.event_id].rejected = RejectedReason.AUTH_ERROR - await self.persist_events_and_notify( - room_id, - [ - (e, events_to_context[e.event_id]) - for e in itertools.chain(auth_events, state) - ], - ) + if auth_events or state: + await self.persist_events_and_notify( + room_id, + [ + (e, events_to_context[e.event_id]) + for e in itertools.chain(auth_events, state) + ], + ) new_event_context = await self.state_handler.compute_event_context( event, old_state=state @@ -2669,7 +2685,7 @@ async def _update_auth_events_and_context_for_auth( # # we start by checking if they are in the store, and then try calling /event_auth/. if missing_auth: - have_events = await self.store.have_seen_events(missing_auth) + have_events = await self.store.have_seen_events(event.room_id, missing_auth) logger.debug("Events %s are in the store", have_events) missing_auth.difference_update(have_events) @@ -2688,7 +2704,7 @@ async def _update_auth_events_and_context_for_auth( return context seen_remotes = await self.store.have_seen_events( - [e.event_id for e in remote_auth_chain] + event.room_id, [e.event_id for e in remote_auth_chain] ) for e in remote_auth_chain: @@ -3245,11 +3261,18 @@ async def persist_events_and_notify( the same room. backfilled: Whether these events are a result of backfilling or not + + Returns: + The stream ID after which all events have been persisted. """ + if not event_and_contexts: + return self.store.get_current_events_token() + instance = self.config.worker.events_shard_config.get_instance(room_id) if instance != self._instance_name: - # Limit the number of events sent over federation. - for batch in batch_iter(event_and_contexts, 1000): + # Limit the number of events sent over replication. We choose 200 + # here as that is what we default to in `max_request_body_size(..)` + for batch in batch_iter(event_and_contexts, 200): result = await self._send_events( instance_name=instance, store=self.store, diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index f5a049d754..44ed7a0712 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -299,14 +299,14 @@ async def maybe_send_presence_to_interested_destinations( if not states: return - hosts_and_states = await get_interested_remotes( + hosts_to_states = await get_interested_remotes( self.store, self.presence_router, states, ) - for destinations, states in hosts_and_states: - self._federation.send_presence_to_destinations(states, destinations) + for destination, host_states in hosts_to_states.items(): + self._federation.send_presence_to_destinations(host_states, [destination]) async def send_full_presence_to_users(self, user_ids: Collection[str]): """ @@ -495,9 +495,6 @@ async def notify_from_replication( users=users_to_states.keys(), ) - # If this is a federation sender, notify about presence updates. - await self.maybe_send_presence_to_interested_destinations(states) - async def process_replication_rows( self, stream_name: str, instance_name: str, token: int, rows: list ): @@ -519,11 +516,27 @@ async def process_replication_rows( for row in rows ] - for state in states: - self.user_to_current_state[state.user_id] = state + # The list of states to notify sync streams and remote servers about. + # This is calculated by comparing the old and new states for each user + # using `should_notify(..)`. + # + # Note that this is necessary as the presence writer will periodically + # flush presence state changes that should not be notified about to the + # DB, and so will be sent over the replication stream. + state_to_notify = [] + + for new_state in states: + old_state = self.user_to_current_state.get(new_state.user_id) + self.user_to_current_state[new_state.user_id] = new_state + + if not old_state or should_notify(old_state, new_state): + state_to_notify.append(new_state) stream_id = token - await self.notify_from_replication(states, stream_id) + await self.notify_from_replication(state_to_notify, stream_id) + + # If this is a federation sender, notify about presence updates. + await self.maybe_send_presence_to_interested_destinations(state_to_notify) def get_currently_syncing_users_for_replication(self) -> Iterable[str]: return [ @@ -829,15 +842,15 @@ async def _update_states( if to_federation_ping: federation_presence_out_counter.inc(len(to_federation_ping)) - hosts_and_states = await get_interested_remotes( + hosts_to_states = await get_interested_remotes( self.store, self.presence_router, list(to_federation_ping.values()), ) - for destinations, states in hosts_and_states: + for destination, states in hosts_to_states.items(): self._federation_queue.send_presence_to_destinations( - states, destinations + states, [destination] ) async def _handle_timeouts(self) -> None: @@ -1962,7 +1975,7 @@ async def get_interested_remotes( store: DataStore, presence_router: PresenceRouter, states: List[UserPresenceState], -) -> List[Tuple[Collection[str], List[UserPresenceState]]]: +) -> Dict[str, Set[UserPresenceState]]: """Given a list of presence states figure out which remote servers should be sent which. @@ -1974,11 +1987,9 @@ async def get_interested_remotes( states: A list of incoming user presence updates. Returns: - A list of 2-tuples of destinations and states, where for - each tuple the list of UserPresenceState should be sent to each - destination + A map from destinations to presence states to send to that destination. """ - hosts_and_states = [] # type: List[Tuple[Collection[str], List[UserPresenceState]]] + hosts_and_states: Dict[str, Set[UserPresenceState]] = {} # First we look up the rooms each user is in (as well as any explicit # subscriptions), then for each distinct room we look up the remote @@ -1990,11 +2001,12 @@ async def get_interested_remotes( for room_id, states in room_ids_to_states.items(): user_ids = await store.get_users_in_room(room_id) hosts = {get_domain_from_id(user_id) for user_id in user_ids} - hosts_and_states.append((hosts, states)) + for host in hosts: + hosts_and_states.setdefault(host, set()).update(states) for user_id, states in users_to_states.items(): host = get_domain_from_id(user_id) - hosts_and_states.append(([host], states)) + hosts_and_states.setdefault(host, set()).update(states) return hosts_and_states diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 3bc02fb406..7f2138d804 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -329,6 +329,17 @@ async def _wait_for_sync_for_user( if context: context.tag = sync_type + # if we have a since token, delete any to-device messages before that token + # (since we now know that the device has received them) + if since_token is not None: + since_stream_id = since_token.to_device_key + deleted = await self.store.delete_messages_for_device( + sync_config.user.to_string(), sync_config.device_id, since_stream_id + ) + logger.debug( + "Deleted %d to-device messages up to %d", deleted, since_stream_id + ) + if timeout == 0 or since_token is None or full_state: # we are going to return immediately, so don't bother calling # notifier.wait_for_events. @@ -477,7 +488,7 @@ async def _load_filtered_recents( # ensure that we always include current state in the timeline current_state_ids = frozenset() # type: FrozenSet[str] if any(e.is_state() for e in recents): - current_state_ids_map = await self.state.get_current_state_ids( + current_state_ids_map = await self.store.get_current_state_ids( room_id ) current_state_ids = frozenset(current_state_ids_map.values()) @@ -537,7 +548,7 @@ async def _load_filtered_recents( # ensure that we always include current state in the timeline current_state_ids = frozenset() if any(e.is_state() for e in loaded_recents): - current_state_ids_map = await self.state.get_current_state_ids( + current_state_ids_map = await self.store.get_current_state_ids( room_id ) current_state_ids = frozenset(current_state_ids_map.values()) @@ -1250,16 +1261,6 @@ async def _generate_sync_entry_for_to_device( since_stream_id = int(sync_result_builder.since_token.to_device_key) if since_stream_id != int(now_token.to_device_key): - # We only delete messages when a new message comes in, but that's - # fine so long as we delete them at some point. - - deleted = await self.store.delete_messages_for_device( - user_id, device_id, since_stream_id - ) - logger.debug( - "Deleted %d to-device messages up to %d", deleted, since_stream_id - ) - messages, stream_id = await self.store.get_new_messages_for_device( user_id, device_id, since_stream_id, now_token.to_device_key ) diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index fb5794e80b..89991e7127 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -14,7 +14,11 @@ """ This module contains base REST classes for constructing REST servlets. """ import logging -from typing import Dict, List, Optional, Union +from typing import Dict, Iterable, List, Optional, Union, overload + +from typing_extensions import Literal + +from twisted.web.server import Request from synapse.api.errors import Codes, SynapseError from synapse.util import json_decoder @@ -105,14 +109,66 @@ def parse_boolean_from_args(args, name, default=None, required=False): return default +@overload +def parse_bytes_from_args( + args: Dict[bytes, List[bytes]], + name: str, + default: Literal[None] = None, + required: Literal[True] = True, +) -> bytes: + ... + + +@overload +def parse_bytes_from_args( + args: Dict[bytes, List[bytes]], + name: str, + default: Optional[bytes] = None, + required: bool = False, +) -> Optional[bytes]: + ... + + +def parse_bytes_from_args( + args: Dict[bytes, List[bytes]], + name: str, + default: Optional[bytes] = None, + required: bool = False, +) -> Optional[bytes]: + """ + Parse a string parameter as bytes from the request query string. + + Args: + args: A mapping of request args as bytes to a list of bytes (e.g. request.args). + name: the name of the query parameter. + default: value to use if the parameter is absent, + defaults to None. Must be bytes if encoding is None. + required: whether to raise a 400 SynapseError if the + parameter is absent, defaults to False. + Returns: + Bytes or the default value. + + Raises: + SynapseError if the parameter is absent and required. + """ + name_bytes = name.encode("ascii") + + if name_bytes in args: + return args[name_bytes][0] + elif required: + message = "Missing string query parameter %s" % (name,) + raise SynapseError(400, message, errcode=Codes.MISSING_PARAM) + + return default + + def parse_string( - request, - name, - default=None, - required=False, - allowed_values=None, - param_type="string", - encoding="ascii", + request: Request, + name: str, + default: Optional[str] = None, + required: bool = False, + allowed_values: Optional[Iterable[str]] = None, + encoding: str = "ascii", ): """ Parse a string parameter from the request query string. @@ -122,30 +178,28 @@ def parse_string( Args: request: the twisted HTTP request. - name (bytes|unicode): the name of the query parameter. - default (bytes|unicode|None): value to use if the parameter is absent, - defaults to None. Must be bytes if encoding is None. - required (bool): whether to raise a 400 SynapseError if the + name: the name of the query parameter. + default: value to use if the parameter is absent, defaults to None. + required: whether to raise a 400 SynapseError if the parameter is absent, defaults to False. - allowed_values (list[bytes|unicode]): List of allowed values for the + allowed_values: List of allowed values for the string, or None if any value is allowed, defaults to None. Must be the same type as name, if given. - encoding (str|None): The encoding to decode the string content with. + encoding: The encoding to decode the string content with. Returns: - bytes/unicode|None: A string value or the default. Unicode if encoding - was given, bytes otherwise. + A string value or the default. Raises: SynapseError if the parameter is absent and required, or if the parameter is present, must be one of a list of allowed values and is not one of those allowed values. """ + args = request.args # type: Dict[bytes, List[bytes]] # type: ignore return parse_string_from_args( - request.args, name, default, required, allowed_values, param_type, encoding + args, name, default, required, allowed_values, encoding ) - def parse_list_from_args( args: Dict[bytes, List[bytes]], name: Union[bytes, str], @@ -177,68 +231,146 @@ def parse_list_from_args( return args_list -def parse_string_from_args( +def _parse_string_value( + value: bytes, + allowed_values: Optional[Iterable[str]], + name: str, + encoding: str, +) -> str: + try: + value_str = value.decode(encoding) + except ValueError: + raise SynapseError(400, "Query parameter %r must be %s" % (name, encoding)) + + if allowed_values is not None and value_str not in allowed_values: + message = "Query parameter %r must be one of [%s]" % ( + name, + ", ".join(repr(v) for v in allowed_values), + ) + raise SynapseError(400, message) + else: + return value_str + + +@overload +def parse_strings_from_args( args: Dict[bytes, List[bytes]], - name: Union[bytes, str], - default: Optional[str] = None, - required: Optional[bool] = False, - allowed_values: Optional[List[bytes]] = None, - param_type: Optional[str] = "string", - encoding: Optional[str] = "ascii", -): - """Parse and optionally decode a single value from request query parameters. + name: str, + default: Optional[List[str]] = None, + required: Literal[True] = True, + allowed_values: Optional[Iterable[str]] = None, + encoding: str = "ascii", +) -> List[str]: + ... + + +@overload +def parse_strings_from_args( + args: Dict[bytes, List[bytes]], + name: str, + default: Optional[List[str]] = None, + required: bool = False, + allowed_values: Optional[Iterable[str]] = None, + encoding: str = "ascii", +) -> Optional[List[str]]: + ... + + +def parse_strings_from_args( + args: Dict[bytes, List[bytes]], + name: str, + default: Optional[List[str]] = None, + required: bool = False, + allowed_values: Optional[Iterable[str]] = None, + encoding: str = "ascii", +) -> Optional[List[str]]: + """ + Parse a string parameter from the request query string list. + + The content of the query param will be decoded to Unicode using the encoding. Args: - args: A dictionary of query parameters from a request. - name: The name of the query parameter to extract values from. If given as bytes, - will be decoded as "ascii". - default: A default value to return if the given argument `name` was not found. - required: If this is True, no `default` is provided and the given argument `name` - was not found then a SynapseError is raised. - allowed_values: A list of allowed values. If specified and the found str is - not in this list, a SynapseError is raised. - param_type: The expected type of the query parameter's value. - encoding: An optional encoding that is used to decode each parameter value with. + args: A mapping of request args as bytes to a list of bytes (e.g. request.args). + name: the name of the query parameter. + default: value to use if the parameter is absent, defaults to None. + required: whether to raise a 400 SynapseError if the + parameter is absent, defaults to False. + allowed_values: List of allowed values for the + string, or None if any value is allowed, defaults to None. + encoding: The encoding to decode the string content with. Returns: - The found argument value. + A string value or the default. Raises: - SynapseError: If the given name was not found in the request arguments, - the argument's values were encoded incorrectly or a required value was missing. + SynapseError if the parameter is absent and required, or if the + parameter is present, must be one of a list of allowed values and + is not one of those allowed values. """ - if not isinstance(name, bytes): - name = name.encode("ascii") + name_bytes = name.encode("ascii") - if name in args: - value = args[name][0] - - if encoding: - try: - value = value.decode(encoding) - except ValueError: - raise SynapseError( - 400, "Query parameter %r must be %s" % (name, encoding) - ) - - if allowed_values is not None and value not in allowed_values: - message = "Query parameter %r must be one of [%s]" % ( - name, - ", ".join(repr(v) for v in allowed_values), - ) - raise SynapseError(400, message) - else: - return value + if name_bytes in args: + values = args[name_bytes] + + return [ + _parse_string_value(value, allowed_values, name=name, encoding=encoding) + for value in values + ] else: if required: - message = "Missing %s query parameter %r" % (param_type, name) + message = "Missing string query parameter %r" % (name,) raise SynapseError(400, message, errcode=Codes.MISSING_PARAM) - else: - if encoding and isinstance(default, bytes): - return default.decode(encoding) + return default - return default + +def parse_string_from_args( + args: Dict[bytes, List[bytes]], + name: str, + default: Optional[str] = None, + required: bool = False, + allowed_values: Optional[Iterable[str]] = None, + encoding: str = "ascii", +) -> Optional[str]: + """ + Parse the string parameter from the request query string list + and return the first result. + + The content of the query param will be decoded to Unicode using the encoding. + + Args: + args: A mapping of request args as bytes to a list of bytes (e.g. request.args). + name: the name of the query parameter. + default: value to use if the parameter is absent, defaults to None. + required: whether to raise a 400 SynapseError if the + parameter is absent, defaults to False. + allowed_values: List of allowed values for the + string, or None if any value is allowed, defaults to None. Must be + the same type as name, if given. + encoding: The encoding to decode the string content with. + + Returns: + A string value or the default. + + Raises: + SynapseError if the parameter is absent and required, or if the + parameter is present, must be one of a list of allowed values and + is not one of those allowed values. + """ + + strings = parse_strings_from_args( + args, + name, + default=[default] if default is not None else None, + required=required, + allowed_values=allowed_values, + encoding=encoding, + ) + + if strings is None: + return None + + return strings[0] def parse_json_value_from_request(request, allow_empty_body=False): @@ -266,7 +398,7 @@ def parse_json_value_from_request(request, allow_empty_body=False): try: content = json_decoder.decode(content_bytes.decode("utf-8")) except Exception as e: - logger.warning("Unable to parse JSON: %s", e) + logger.warning("Unable to parse JSON: %s (%s)", e, content_bytes) raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON) return content @@ -329,9 +461,8 @@ class attribute containing a pre-compiled regular expression. The automatic def register(self, http_server): """ Register this servlet with the given HTTP server. """ - if hasattr(self, "PATTERNS"): - patterns = self.PATTERNS - + patterns = getattr(self, "PATTERNS", None) + if patterns: for method in ("GET", "PUT", "POST", "DELETE"): if hasattr(self, "on_%s" % (method,)): servlet_classname = self.__class__.__name__ diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index fba2fa3904..dd9377340e 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -265,6 +265,18 @@ class SynapseTags: # Whether the sync response has new data to be returned to the client. SYNC_RESULT = "sync.new_data" + # incoming HTTP request ID (as written in the logs) + REQUEST_ID = "request_id" + + # HTTP request tag (used to distinguish full vs incremental syncs, etc) + REQUEST_TAG = "request_tag" + + # Text description of a database transaction + DB_TXN_DESC = "db.txn_desc" + + # Uniqueish ID of a database transaction + DB_TXN_ID = "db.txn_id" + # Block everything by default # A regex which matches the server_names to expose traces for. @@ -325,6 +337,7 @@ def ensure_active_span_inner_2(*args, **kwargs): @contextlib.contextmanager def noop_context_manager(*args, **kwargs): """Does exactly what it says on the tin""" + # TODO: replace with contextlib.nullcontext once we drop support for Python 3.6 yield @@ -350,10 +363,13 @@ def init_tracer(hs: "HomeServer"): set_homeserver_whitelist(hs.config.opentracer_whitelist) + from jaeger_client.metrics.prometheus import PrometheusMetricsFactory + config = JaegerConfig( config=hs.config.jaeger_config, service_name="{} {}".format(hs.config.server_name, hs.get_instance_name()), scope_manager=LogContextScopeManager(hs.config), + metrics_factory=PrometheusMetricsFactory(), ) # If we have the rust jaeger reporter available let's use that. @@ -588,7 +604,7 @@ def inject_active_span_twisted_headers(headers, destination, check_destination=T span = opentracing.tracer.active_span carrier = {} # type: Dict[str, str] - opentracing.tracer.inject(span, opentracing.Format.HTTP_HEADERS, carrier) + opentracing.tracer.inject(span.context, opentracing.Format.HTTP_HEADERS, carrier) for key, value in carrier.items(): headers.addRawHeaders(key, value) @@ -625,7 +641,7 @@ def inject_active_span_byte_dict(headers, destination, check_destination=True): span = opentracing.tracer.active_span carrier = {} # type: Dict[str, str] - opentracing.tracer.inject(span, opentracing.Format.HTTP_HEADERS, carrier) + opentracing.tracer.inject(span.context, opentracing.Format.HTTP_HEADERS, carrier) for key, value in carrier.items(): headers[key.encode()] = [value.encode()] @@ -659,7 +675,7 @@ def inject_active_span_text_map(carrier, destination, check_destination=True): return opentracing.tracer.inject( - opentracing.tracer.active_span, opentracing.Format.TEXT_MAP, carrier + opentracing.tracer.active_span.context, opentracing.Format.TEXT_MAP, carrier ) @@ -681,7 +697,7 @@ def get_active_span_text_map(destination=None): carrier = {} # type: Dict[str, str] opentracing.tracer.inject( - opentracing.tracer.active_span, opentracing.Format.TEXT_MAP, carrier + opentracing.tracer.active_span.context, opentracing.Format.TEXT_MAP, carrier ) return carrier @@ -696,7 +712,7 @@ def active_span_context_as_string(): carrier = {} # type: Dict[str, str] if opentracing: opentracing.tracer.inject( - opentracing.tracer.active_span, opentracing.Format.TEXT_MAP, carrier + opentracing.tracer.active_span.context, opentracing.Format.TEXT_MAP, carrier ) return json_encoder.encode(carrier) @@ -824,7 +840,7 @@ def trace_servlet(request: "SynapseRequest", extract_context: bool = False): return request_tags = { - "request_id": request.get_request_id(), + SynapseTags.REQUEST_ID: request.get_request_id(), tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER, tags.HTTP_METHOD: request.get_method(), tags.HTTP_URL: request.get_redacted_uri(), @@ -833,9 +849,9 @@ def trace_servlet(request: "SynapseRequest", extract_context: bool = False): request_name = request.request_metrics.name if extract_context: - scope = start_active_span_from_request(request, request_name, tags=request_tags) + scope = start_active_span_from_request(request, request_name) else: - scope = start_active_span(request_name, tags=request_tags) + scope = start_active_span(request_name) with scope: try: @@ -845,4 +861,11 @@ def trace_servlet(request: "SynapseRequest", extract_context: bool = False): # with JsonResource). scope.span.set_operation_name(request.request_metrics.name) - scope.span.set_tag("request_tag", request.request_metrics.start_context.tag) + # set the tags *after* the servlet completes, in case it decided to + # prioritise the span (tags will get dropped on unprioritised spans) + request_tags[ + SynapseTags.REQUEST_TAG + ] = request.request_metrics.start_context.tag + + for k, v in request_tags.items(): + scope.span.set_tag(k, v) diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index 714caf84c3..de96ca0821 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -22,7 +22,11 @@ from twisted.internet import defer from synapse.logging.context import LoggingContext, PreserveLoggingContext -from synapse.logging.opentracing import noop_context_manager, start_active_span +from synapse.logging.opentracing import ( + SynapseTags, + noop_context_manager, + start_active_span, +) from synapse.util.async_helpers import maybe_awaitable if TYPE_CHECKING: @@ -200,9 +204,12 @@ async def run(): with BackgroundProcessLoggingContext(desc, count) as context: try: - ctx = noop_context_manager() if bg_start_span: - ctx = start_active_span(desc, tags={"request_id": str(context)}) + ctx = start_active_span( + f"bgproc.{desc}", tags={SynapseTags.REQUEST_ID: str(context)} + ) + else: + ctx = noop_context_manager() with ctx: return await maybe_awaitable(func(*args, **kwargs)) except Exception: diff --git a/synapse/notifier.py b/synapse/notifier.py index 24b4e6649f..3c3cc47631 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -485,21 +485,21 @@ async def wait_for_events( end_time = self.clock.time_msec() + timeout while not result: - try: - now = self.clock.time_msec() - if end_time <= now: - break - - # Now we wait for the _NotifierUserStream to be told there - # is a new token. - listener = user_stream.new_listener(prev_token) - listener.deferred = timeout_deferred( - listener.deferred, - (end_time - now) / 1000.0, - self.hs.get_reactor(), - ) + with start_active_span("wait_for_events"): + try: + now = self.clock.time_msec() + if end_time <= now: + break + + # Now we wait for the _NotifierUserStream to be told there + # is a new token. + listener = user_stream.new_listener(prev_token) + listener.deferred = timeout_deferred( + listener.deferred, + (end_time - now) / 1000.0, + self.hs.get_reactor(), + ) - with start_active_span("wait_for_events.deferred"): log_kv( { "wait_for_events": "sleep", @@ -517,27 +517,27 @@ async def wait_for_events( } ) - current_token = user_stream.current_token + current_token = user_stream.current_token - result = await callback(prev_token, current_token) - log_kv( - { - "wait_for_events": "result", - "result": bool(result), - } - ) - if result: + result = await callback(prev_token, current_token) + log_kv( + { + "wait_for_events": "result", + "result": bool(result), + } + ) + if result: + break + + # Update the prev_token to the current_token since nothing + # has happened between the old prev_token and the current_token + prev_token = current_token + except defer.TimeoutError: + log_kv({"wait_for_events": "timeout"}) + break + except defer.CancelledError: + log_kv({"wait_for_events": "cancelled"}) break - - # Update the prev_token to the current_token since nothing - # has happened between the old prev_token and the current_token - prev_token = current_token - except defer.TimeoutError: - log_kv({"wait_for_events": "timeout"}) - break - except defer.CancelledError: - log_kv({"wait_for_events": "cancelled"}) - break if result is None: # This happened if there was no timeout or if the timeout had diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py index 70207420a6..26bdead565 100644 --- a/synapse/replication/slave/storage/devices.py +++ b/synapse/replication/slave/storage/devices.py @@ -68,7 +68,7 @@ def _invalidate_caches_for_devices(self, token, rows): if row.entity.startswith("@"): self._device_list_stream_cache.entity_has_changed(row.entity, token) self.get_cached_devices_for_user.invalidate((row.entity,)) - self._get_cached_user_device.invalidate_many((row.entity,)) + self._get_cached_user_device.invalidate((row.entity,)) self.get_device_list_last_stream_id_for_remote.invalidate((row.entity,)) else: diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 9cb9a9f6aa..abf749b001 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -17,11 +17,13 @@ import logging import platform +from typing import TYPE_CHECKING, Optional, Tuple import synapse from synapse.api.errors import Codes, NotFoundError, SynapseError -from synapse.http.server import JsonResource +from synapse.http.server import HttpServer, JsonResource from synapse.http.servlet import RestServlet, parse_json_object_from_request +from synapse.http.site import SynapseRequest from synapse.rest.admin._base import admin_patterns, assert_requester_is_admin from synapse.rest.admin.devices import ( DeleteDevicesRestServlet, @@ -66,22 +68,25 @@ UserTokenRestServlet, WhoisRestServlet, ) -from synapse.types import RoomStreamToken +from synapse.types import JsonDict, RoomStreamToken from synapse.util.versionstring import get_version_string +if TYPE_CHECKING: + from synapse.server import HomeServer + logger = logging.getLogger(__name__) class VersionServlet(RestServlet): PATTERNS = admin_patterns("/server_version$") - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): self.res = { "server_version": get_version_string(synapse), "python_version": platform.python_version(), } - def on_GET(self, request): + def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: return 200, self.res @@ -90,17 +95,14 @@ class PurgeHistoryRestServlet(RestServlet): "/purge_history/(?P[^/]*)(/(?P[^/]+))?" ) - def __init__(self, hs): - """ - - Args: - hs (synapse.server.HomeServer) - """ + def __init__(self, hs: "HomeServer"): self.pagination_handler = hs.get_pagination_handler() self.store = hs.get_datastore() self.auth = hs.get_auth() - async def on_POST(self, request, room_id, event_id): + async def on_POST( + self, request: SynapseRequest, room_id: str, event_id: Optional[str] + ) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) body = parse_json_object_from_request(request, allow_empty_body=True) @@ -119,6 +121,8 @@ async def on_POST(self, request, room_id, event_id): if event.room_id != room_id: raise SynapseError(400, "Event is for wrong room.") + # RoomStreamToken expects [int] not Optional[int] + assert event.internal_metadata.stream_ordering is not None room_token = RoomStreamToken( event.depth, event.internal_metadata.stream_ordering ) @@ -173,16 +177,13 @@ async def on_POST(self, request, room_id, event_id): class PurgeHistoryStatusRestServlet(RestServlet): PATTERNS = admin_patterns("/purge_history_status/(?P[^/]+)") - def __init__(self, hs): - """ - - Args: - hs (synapse.server.HomeServer) - """ + def __init__(self, hs: "HomeServer"): self.pagination_handler = hs.get_pagination_handler() self.auth = hs.get_auth() - async def on_GET(self, request, purge_id): + async def on_GET( + self, request: SynapseRequest, purge_id: str + ) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) purge_status = self.pagination_handler.get_purge_status(purge_id) @@ -203,12 +204,12 @@ async def on_GET(self, request, purge_id): class AdminRestResource(JsonResource): """The REST resource which gets mounted at /_synapse/admin""" - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): JsonResource.__init__(self, hs, canonical_json=False) register_servlets(hs, self) -def register_servlets(hs, http_server): +def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: """ Register all the admin servlets. """ @@ -242,7 +243,9 @@ def register_servlets(hs, http_server): RateLimitRestServlet(hs).register(http_server) -def register_servlets_for_client_rest_resource(hs, http_server): +def register_servlets_for_client_rest_resource( + hs: "HomeServer", http_server: HttpServer +) -> None: """Register only the servlets which need to be exposed on /_matrix/client/xxx""" WhoisRestServlet(hs).register(http_server) PurgeHistoryStatusRestServlet(hs).register(http_server) diff --git a/synapse/rest/admin/_base.py b/synapse/rest/admin/_base.py index f203f6fdc6..d9a2f6ca15 100644 --- a/synapse/rest/admin/_base.py +++ b/synapse/rest/admin/_base.py @@ -13,6 +13,7 @@ # limitations under the License. import re +from typing import Iterable, Pattern from synapse.api.auth import Auth from synapse.api.errors import AuthError @@ -20,7 +21,7 @@ from synapse.types import UserID -def admin_patterns(path_regex: str, version: str = "v1"): +def admin_patterns(path_regex: str, version: str = "v1") -> Iterable[Pattern]: """Returns the list of patterns for an admin endpoint Args: diff --git a/synapse/rest/admin/groups.py b/synapse/rest/admin/groups.py index 3b3ffde0b6..68a3ba3cb7 100644 --- a/synapse/rest/admin/groups.py +++ b/synapse/rest/admin/groups.py @@ -12,10 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from typing import TYPE_CHECKING, Tuple from synapse.api.errors import SynapseError from synapse.http.servlet import RestServlet +from synapse.http.site import SynapseRequest from synapse.rest.admin._base import admin_patterns, assert_user_is_admin +from synapse.types import JsonDict + +if TYPE_CHECKING: + from synapse.server import HomeServer logger = logging.getLogger(__name__) @@ -25,12 +31,14 @@ class DeleteGroupAdminRestServlet(RestServlet): PATTERNS = admin_patterns("/delete_group/(?P[^/]*)") - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): self.group_server = hs.get_groups_server_handler() self.is_mine_id = hs.is_mine_id self.auth = hs.get_auth() - async def on_POST(self, request, group_id): + async def on_POST( + self, request: SynapseRequest, group_id: str + ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) await assert_user_is_admin(self.auth, requester.user) diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py index 24dd46113a..0a19a333d7 100644 --- a/synapse/rest/admin/media.py +++ b/synapse/rest/admin/media.py @@ -17,6 +17,7 @@ from typing import TYPE_CHECKING, Tuple from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError +from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_boolean, parse_integer from synapse.http.site import SynapseRequest from synapse.rest.admin._base import ( @@ -37,12 +38,11 @@ class QuarantineMediaInRoom(RestServlet): this server. """ - PATTERNS = ( - admin_patterns("/room/(?P[^/]+)/media/quarantine") - + + PATTERNS = [ + *admin_patterns("/room/(?P[^/]+)/media/quarantine"), # This path kept around for legacy reasons - admin_patterns("/quarantine_media/(?P[^/]+)") - ) + *admin_patterns("/quarantine_media/(?P[^/]+)"), + ] def __init__(self, hs: "HomeServer"): self.store = hs.get_datastore() @@ -120,6 +120,35 @@ async def on_POST( return 200, {} +class UnquarantineMediaByID(RestServlet): + """Quarantines local or remote media by a given ID so that no one can download + it via this server. + """ + + PATTERNS = admin_patterns( + "/media/unquarantine/(?P[^/]+)/(?P[^/]+)" + ) + + def __init__(self, hs: "HomeServer"): + self.store = hs.get_datastore() + self.auth = hs.get_auth() + + async def on_POST( + self, request: SynapseRequest, server_name: str, media_id: str + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request) + await assert_user_is_admin(self.auth, requester.user) + + logging.info( + "Remove from quarantine local media by ID: %s/%s", server_name, media_id + ) + + # Remove from quarantine this media id + await self.store.quarantine_media_by_id(server_name, media_id, None) + + return 200, {} + + class ProtectMediaByID(RestServlet): """Protect local media from being quarantined.""" @@ -137,8 +166,31 @@ async def on_POST( logging.info("Protecting local media by ID: %s", media_id) - # Quarantine this media id - await self.store.mark_local_media_as_safe(media_id) + # Protect this media id + await self.store.mark_local_media_as_safe(media_id, safe=True) + + return 200, {} + + +class UnprotectMediaByID(RestServlet): + """Unprotect local media from being quarantined.""" + + PATTERNS = admin_patterns("/media/unprotect/(?P[^/]+)") + + def __init__(self, hs: "HomeServer"): + self.store = hs.get_datastore() + self.auth = hs.get_auth() + + async def on_POST( + self, request: SynapseRequest, media_id: str + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request) + await assert_user_is_admin(self.auth, requester.user) + + logging.info("Unprotecting local media by ID: %s", media_id) + + # Unprotect this media id + await self.store.mark_local_media_as_safe(media_id, safe=False) return 200, {} @@ -260,15 +312,17 @@ async def on_POST( return 200, {"deleted_media": deleted_media, "total": total} -def register_servlets_for_media_repo(hs: "HomeServer", http_server): +def register_servlets_for_media_repo(hs: "HomeServer", http_server: HttpServer) -> None: """ Media repo specific APIs. """ PurgeMediaCacheRestServlet(hs).register(http_server) QuarantineMediaInRoom(hs).register(http_server) QuarantineMediaByID(hs).register(http_server) + UnquarantineMediaByID(hs).register(http_server) QuarantineMediaByUser(hs).register(http_server) ProtectMediaByID(hs).register(http_server) + UnprotectMediaByID(hs).register(http_server) ListMediaInRoom(hs).register(http_server) DeleteMediaByID(hs).register(http_server) DeleteMediaByDateSize(hs).register(http_server) diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index f289ffe3d0..f0cddd2d2c 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -649,7 +649,7 @@ async def on_GET( limit = parse_integer(request, "limit", default=10) # picking the API shape for symmetry with /messages - filter_str = parse_string(request, b"filter", encoding="utf-8") + filter_str = parse_string(request, "filter", encoding="utf-8") if filter_str: filter_json = urlparse.unquote(filter_str) event_filter = Filter( diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 8c9d21d3ea..7d75564758 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -478,13 +478,12 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: class WhoisRestServlet(RestServlet): path_regex = "/whois/(?P[^/]*)$" - PATTERNS = ( - admin_patterns(path_regex) - + + PATTERNS = [ + *admin_patterns(path_regex), # URL for spec reason # https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid - client_patterns("/admin" + path_regex, v1=True) - ) + *client_patterns("/admin" + path_regex, v1=True), + ] def __init__(self, hs: "HomeServer"): self.hs = hs @@ -553,11 +552,7 @@ async def on_POST( class AccountValidityRenewServlet(RestServlet): PATTERNS = admin_patterns("/account_validity/validity$") - def __init__(self, hs): - """ - Args: - hs (synapse.server.HomeServer): server - """ + def __init__(self, hs: "HomeServer"): self.hs = hs self.account_activity_handler = hs.get_account_validity_handler() self.auth = hs.get_auth() diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 42e709ec14..f6be5f1020 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -14,7 +14,7 @@ import logging import re -from typing import TYPE_CHECKING, Awaitable, Callable, Dict, Optional +from typing import TYPE_CHECKING, Awaitable, Callable, Dict, List, Optional from synapse.api.errors import Codes, LoginError, SynapseError from synapse.api.ratelimiting import Ratelimiter @@ -25,6 +25,7 @@ from synapse.http.server import HttpServer, finish_request from synapse.http.servlet import ( RestServlet, + parse_bytes_from_args, parse_json_object_from_request, parse_string, ) @@ -437,9 +438,8 @@ async def on_GET( finish_request(request) return - client_redirect_url = parse_string( - request, "redirectUrl", required=True, encoding=None - ) + args = request.args # type: Dict[bytes, List[bytes]] # type: ignore + client_redirect_url = parse_bytes_from_args(args, "redirectUrl", required=True) sso_url = await self._sso_handler.handle_redirect_request( request, client_redirect_url, diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index d7e91f72dc..3e17038301 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -535,7 +535,7 @@ async def on_GET(self, request, room_id): self.store, request, default_limit=10 ) as_client_event = b"raw" not in request.args - filter_str = parse_string(request, b"filter", encoding="utf-8") + filter_str = parse_string(request, "filter", encoding="utf-8") if filter_str: filter_json = urlparse.unquote(filter_str) event_filter = Filter( @@ -650,7 +650,7 @@ async def on_GET(self, request, room_id, event_id): limit = parse_integer(request, "limit", default=10) # picking the API shape for symmetry with /messages - filter_str = parse_string(request, b"filter", encoding="utf-8") + filter_str = parse_string(request, "filter", encoding="utf-8") if filter_str: filter_json = urlparse.unquote(filter_str) event_filter = Filter( @@ -909,7 +909,7 @@ class RoomAliasListServlet(RestServlet): r"^/_matrix/client/unstable/org\.matrix\.msc2432" r"/rooms/(?P[^/]*)/aliases" ), - ] + ] + list(client_patterns("/rooms/(?P[^/]*)/aliases$", unstable=False)) def __init__(self, hs: "HomeServer"): super().__init__() @@ -1059,18 +1059,16 @@ def register_servlets(hs: "HomeServer", http_server, is_worker=False): RoomRedactEventRestServlet(hs).register(http_server) RoomTypingRestServlet(hs).register(http_server) RoomEventContextServlet(hs).register(http_server) - - if hs.config.experimental.spaces_enabled: - RoomSpaceSummaryRestServlet(hs).register(http_server) + RoomSpaceSummaryRestServlet(hs).register(http_server) + RoomEventServlet(hs).register(http_server) + JoinedRoomsRestServlet(hs).register(http_server) + RoomAliasListServlet(hs).register(http_server) + SearchRestServlet(hs).register(http_server) # Some servlets only get registered for the main process. if not is_worker: RoomCreateRestServlet(hs).register(http_server) RoomForgetRestServlet(hs).register(http_server) - SearchRestServlet(hs).register(http_server) - JoinedRoomsRestServlet(hs).register(http_server) - RoomEventServlet(hs).register(http_server) - RoomAliasListServlet(hs).register(http_server) def register_deprecated_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/report_event.py b/synapse/rest/client/v2_alpha/report_event.py index 2c169abbf3..07ea39a8a3 100644 --- a/synapse/rest/client/v2_alpha/report_event.py +++ b/synapse/rest/client/v2_alpha/report_event.py @@ -16,11 +16,7 @@ from http import HTTPStatus from synapse.api.errors import Codes, SynapseError -from synapse.http.servlet import ( - RestServlet, - assert_params_in_dict, - parse_json_object_from_request, -) +from synapse.http.servlet import RestServlet, parse_json_object_from_request from ._base import client_patterns @@ -42,15 +38,14 @@ async def on_POST(self, request, room_id, event_id): user_id = requester.user.to_string() body = parse_json_object_from_request(request) - assert_params_in_dict(body, ("reason", "score")) - if not isinstance(body["reason"], str): + if not isinstance(body.get("reason", ""), str): raise SynapseError( HTTPStatus.BAD_REQUEST, "Param 'reason' must be a string", Codes.BAD_JSON, ) - if not isinstance(body["score"], int): + if not isinstance(body.get("score", 0), int): raise SynapseError( HTTPStatus.BAD_REQUEST, "Param 'score' must be an integer", @@ -61,7 +56,7 @@ async def on_POST(self, request, room_id, event_id): room_id=room_id, event_id=event_id, user_id=user_id, - reason=body["reason"], + reason=body.get("reason"), content=body, received_ts=self.clock.time_msec(), ) diff --git a/synapse/rest/consent/consent_resource.py b/synapse/rest/consent/consent_resource.py index b19cd8afc5..e52570cd8e 100644 --- a/synapse/rest/consent/consent_resource.py +++ b/synapse/rest/consent/consent_resource.py @@ -17,6 +17,7 @@ from hashlib import sha256 from http import HTTPStatus from os import path +from typing import Dict, List import jinja2 from jinja2 import TemplateNotFound @@ -24,7 +25,7 @@ from synapse.api.errors import NotFoundError, StoreError, SynapseError from synapse.config import ConfigError from synapse.http.server import DirectServeHtmlResource, respond_with_html -from synapse.http.servlet import parse_string +from synapse.http.servlet import parse_bytes_from_args, parse_string from synapse.types import UserID # language to use for the templates. TODO: figure this out from Accept-Language @@ -116,7 +117,8 @@ async def _async_render_GET(self, request): has_consented = False public_version = username == "" if not public_version: - userhmac_bytes = parse_string(request, "h", required=True, encoding=None) + args = request.args # type: Dict[bytes, List[bytes]] + userhmac_bytes = parse_bytes_from_args(args, "h", required=True) self._check_hash(username, userhmac_bytes) @@ -152,7 +154,8 @@ async def _async_render_POST(self, request): """ version = parse_string(request, "v", required=True) username = parse_string(request, "u", required=True) - userhmac = parse_string(request, "h", required=True, encoding=None) + args = request.args # type: Dict[bytes, List[bytes]] + userhmac = parse_bytes_from_args(args, "h", required=True) self._check_hash(username, userhmac) diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index aba1734a55..d56a1ae482 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -22,6 +22,7 @@ from synapse.http.server import DirectServeJsonResource, respond_with_json from synapse.http.servlet import parse_integer, parse_json_object_from_request from synapse.util import json_decoder +from synapse.util.async_helpers import yieldable_gather_results logger = logging.getLogger(__name__) @@ -210,7 +211,13 @@ async def query_keys(self, request, query, query_remote_on_cache_miss=False): # If there is a cache miss, request the missing keys, then recurse (and # ensure the result is sent). if cache_misses and query_remote_on_cache_miss: - await self.fetcher.get_keys(cache_misses) + await yieldable_gather_results( + lambda t: self.fetcher.get_keys(*t), + ( + (server_name, list(keys), 0) + for server_name, keys in cache_misses.items() + ), + ) await self.query_keys(request, query, query_remote_on_cache_miss=False) else: signed_keys = [] diff --git a/synapse/rest/media/v1/upload_resource.py b/synapse/rest/media/v1/upload_resource.py index 024a105bf2..62dc4aae2d 100644 --- a/synapse/rest/media/v1/upload_resource.py +++ b/synapse/rest/media/v1/upload_resource.py @@ -14,13 +14,13 @@ # limitations under the License. import logging -from typing import IO, TYPE_CHECKING +from typing import IO, TYPE_CHECKING, Dict, List, Optional from twisted.web.server import Request from synapse.api.errors import Codes, SynapseError from synapse.http.server import DirectServeJsonResource, respond_with_json -from synapse.http.servlet import parse_string +from synapse.http.servlet import parse_bytes_from_args from synapse.http.site import SynapseRequest from synapse.rest.media.v1.media_storage import SpamMediaException @@ -61,10 +61,11 @@ async def _async_render_POST(self, request: SynapseRequest) -> None: errcode=Codes.TOO_LARGE, ) - upload_name = parse_string(request, b"filename", encoding=None) - if upload_name: + args = request.args # type: Dict[bytes, List[bytes]] # type: ignore + upload_name_bytes = parse_bytes_from_args(args, "filename") + if upload_name_bytes: try: - upload_name = upload_name.decode("utf8") + upload_name = upload_name_bytes.decode("utf8") # type: Optional[str] except UnicodeDecodeError: raise SynapseError( msg="Invalid UTF-8 filename parameter: %r" % (upload_name), code=400 diff --git a/synapse/storage/database.py b/synapse/storage/database.py index a761ad603b..d470cdacde 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -40,6 +40,7 @@ from synapse.api.errors import StoreError from synapse.config.database import DatabaseConnectionConfig +from synapse.logging import opentracing from synapse.logging.context import ( LoggingContext, current_context, @@ -90,12 +91,18 @@ def make_pool( db_args = dict(db_config.config.get("args", {})) db_args.setdefault("cp_reconnect", True) + def _on_new_connection(conn): + # Ensure we have a logging context so we can correctly track queries, + # etc. + with LoggingContext("db.on_new_connection"): + engine.on_new_connection( + LoggingDatabaseConnection(conn, engine, "on_new_connection") + ) + return adbapi.ConnectionPool( db_config.config["name"], cp_reactor=reactor, - cp_openfun=lambda conn: engine.on_new_connection( - LoggingDatabaseConnection(conn, engine, "on_new_connection") - ), + cp_openfun=_on_new_connection, **db_args, ) @@ -313,7 +320,14 @@ def _do_execute(self, func: Callable[..., R], sql: str, *args: Any) -> R: start = time.time() try: - return func(sql, *args) + with opentracing.start_active_span( + "db.query", + tags={ + opentracing.tags.DATABASE_TYPE: "sql", + opentracing.tags.DATABASE_STATEMENT: sql, + }, + ): + return func(sql, *args) except Exception as e: sql_logger.debug("[SQL FAIL] {%s} %s", self.name, e) raise @@ -525,9 +539,17 @@ def new_transaction( exception_callbacks=exception_callbacks, ) try: - r = func(cursor, *args, **kwargs) - conn.commit() - return r + with opentracing.start_active_span( + "db.txn", + tags={ + opentracing.SynapseTags.DB_TXN_DESC: desc, + opentracing.SynapseTags.DB_TXN_ID: name, + }, + ): + r = func(cursor, *args, **kwargs) + opentracing.log_kv({"message": "commit"}) + conn.commit() + return r except self.engine.module.OperationalError as e: # This can happen if the database disappears mid # transaction. @@ -541,7 +563,8 @@ def new_transaction( if i < N: i += 1 try: - conn.rollback() + with opentracing.start_active_span("db.rollback"): + conn.rollback() except self.engine.module.Error as e1: transaction_logger.warning("[TXN EROLL] {%s} %s", name, e1) continue @@ -554,7 +577,8 @@ def new_transaction( if i < N: i += 1 try: - conn.rollback() + with opentracing.start_active_span("db.rollback"): + conn.rollback() except self.engine.module.Error as e1: transaction_logger.warning( "[TXN EROLL] {%s} %s", @@ -653,16 +677,17 @@ async def runInteraction( logger.warning("Starting db txn '%s' from sentinel context", desc) try: - result = await self.runWithConnection( - self.new_transaction, - desc, - after_callbacks, - exception_callbacks, - func, - *args, - db_autocommit=db_autocommit, - **kwargs, - ) + with opentracing.start_active_span(f"db.{desc}"): + result = await self.runWithConnection( + self.new_transaction, + desc, + after_callbacks, + exception_callbacks, + func, + *args, + db_autocommit=db_autocommit, + **kwargs, + ) for after_callback, after_args, after_kwargs in after_callbacks: after_callback(*after_args, **after_kwargs) @@ -718,25 +743,29 @@ def inner_func(conn, *args, **kwargs): with LoggingContext( str(curr_context), parent_context=parent_context ) as context: - sched_duration_sec = monotonic_time() - start_time - sql_scheduling_timer.observe(sched_duration_sec) - context.add_database_scheduled(sched_duration_sec) - - if self.engine.is_connection_closed(conn): - logger.debug("Reconnecting closed database connection") - conn.reconnect() - - try: - if db_autocommit: - self.engine.attempt_to_set_autocommit(conn, True) - - db_conn = LoggingDatabaseConnection( - conn, self.engine, "runWithConnection" - ) - return func(db_conn, *args, **kwargs) - finally: - if db_autocommit: - self.engine.attempt_to_set_autocommit(conn, False) + with opentracing.start_active_span( + operation_name="db.connection", + ): + sched_duration_sec = monotonic_time() - start_time + sql_scheduling_timer.observe(sched_duration_sec) + context.add_database_scheduled(sched_duration_sec) + + if self.engine.is_connection_closed(conn): + logger.debug("Reconnecting closed database connection") + conn.reconnect() + opentracing.log_kv({"message": "reconnected"}) + + try: + if db_autocommit: + self.engine.attempt_to_set_autocommit(conn, True) + + db_conn = LoggingDatabaseConnection( + conn, self.engine, "runWithConnection" + ) + return func(db_conn, *args, **kwargs) + finally: + if db_autocommit: + self.engine.attempt_to_set_autocommit(conn, False) return await make_deferred_yieldable( self._db_pool.runWithConnection(inner_func, *args, **kwargs) diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index ecc1f935e2..c57ae5ef15 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -168,10 +168,11 @@ def _invalidate_caches_for_event( backfilled, ): self._invalidate_get_event_cache(event_id) + self.have_seen_event.invalidate((room_id, event_id)) self.get_latest_event_ids_in_room.invalidate((room_id,)) - self.get_unread_event_push_actions_by_room_for_user.invalidate_many((room_id,)) + self.get_unread_event_push_actions_by_room_for_user.invalidate((room_id,)) if not backfilled: self._events_stream_cache.entity_has_changed(room_id, stream_ordering) @@ -184,8 +185,8 @@ def _invalidate_caches_for_event( self.get_invited_rooms_for_local_user.invalidate((state_key,)) if relates_to: - self.get_relations_for_event.invalidate_many((relates_to,)) - self.get_aggregation_groups_for_event.invalidate_many((relates_to,)) + self.get_relations_for_event.invalidate((relates_to,)) + self.get_aggregation_groups_for_event.invalidate((relates_to,)) self.get_applicable_edit.invalidate((relates_to,)) async def invalidate_cache_and_stream(self, cache_name: str, keys: Tuple[Any, ...]): diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index fd87ba71ab..18f07d96dc 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -1282,7 +1282,7 @@ def _update_remote_device_list_cache_txn( ) txn.call_after(self.get_cached_devices_for_user.invalidate, (user_id,)) - txn.call_after(self._get_cached_user_device.invalidate_many, (user_id,)) + txn.call_after(self._get_cached_user_device.invalidate, (user_id,)) txn.call_after( self.get_device_list_last_stream_id_for_remote.invalidate, (user_id,) ) diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 5845322118..d1237c65cc 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -860,7 +860,7 @@ def _remove_old_push_actions_before_txn( not be deleted. """ txn.call_after( - self.get_unread_event_push_actions_by_room_for_user.invalidate_many, + self.get_unread_event_push_actions_by_room_for_user.invalidate, (room_id, user_id), ) diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index fd25c8112d..897fa06639 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1748,9 +1748,9 @@ def _handle_event_relations(self, txn, event): }, ) - txn.call_after(self.store.get_relations_for_event.invalidate_many, (parent_id,)) + txn.call_after(self.store.get_relations_for_event.invalidate, (parent_id,)) txn.call_after( - self.store.get_aggregation_groups_for_event.invalidate_many, (parent_id,) + self.store.get_aggregation_groups_for_event.invalidate, (parent_id,) ) if rel_type == RelationTypes.REPLACE: @@ -1903,7 +1903,7 @@ def _set_push_actions_for_event_and_users_txn( for user_id in user_ids: txn.call_after( - self.store.get_unread_event_push_actions_by_room_for_user.invalidate_many, + self.store.get_unread_event_push_actions_by_room_for_user.invalidate, (room_id, user_id), ) @@ -1917,7 +1917,7 @@ def _set_push_actions_for_event_and_users_txn( def _remove_push_actions_for_event_id_txn(self, txn, room_id, event_id): # Sad that we have to blow away the cache for the whole room here txn.call_after( - self.store.get_unread_event_push_actions_by_room_for_user.invalidate_many, + self.store.get_unread_event_push_actions_by_room_for_user.invalidate, (room_id,), ) txn.execute( diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 6963bbf7f4..403a5ddaba 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -22,6 +22,7 @@ Iterable, List, Optional, + Set, Tuple, overload, ) @@ -55,7 +56,7 @@ from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator from synapse.storage.util.sequence import build_sequence_generator from synapse.types import JsonDict, get_domain_from_id -from synapse.util.caches.descriptors import cached +from synapse.util.caches.descriptors import cached, cachedList from synapse.util.caches.lrucache import LruCache from synapse.util.iterutils import batch_iter from synapse.util.metrics import Measure @@ -1045,32 +1046,74 @@ async def have_events_in_timeline(self, event_ids): return {r["event_id"] for r in rows} - async def have_seen_events(self, event_ids): + async def have_seen_events( + self, room_id: str, event_ids: Iterable[str] + ) -> Set[str]: """Given a list of event ids, check if we have already processed them. + The room_id is only used to structure the cache (so that it can later be + invalidated by room_id) - there is no guarantee that the events are actually + in the room in question. + Args: - event_ids (iterable[str]): + room_id: Room we are polling + event_ids: events we are looking for Returns: set[str]: The events we have already seen. """ + res = await self._have_seen_events_dict( + (room_id, event_id) for event_id in event_ids + ) + return {eid for ((_rid, eid), have_event) in res.items() if have_event} + + @cachedList("have_seen_event", "keys") + async def _have_seen_events_dict( + self, keys: Iterable[Tuple[str, str]] + ) -> Dict[Tuple[str, str], bool]: + """Helper for have_seen_events + + Returns: + a dict {(room_id, event_id)-> bool} + """ # if the event cache contains the event, obviously we've seen it. - results = {x for x in event_ids if self._get_event_cache.contains(x)} - def have_seen_events_txn(txn, chunk): - sql = "SELECT event_id FROM events as e WHERE " + cache_results = { + (rid, eid) for (rid, eid) in keys if self._get_event_cache.contains((eid,)) + } + results = {x: True for x in cache_results} + + def have_seen_events_txn(txn, chunk: Tuple[Tuple[str, str], ...]): + # we deliberately do *not* query the database for room_id, to make the + # query an index-only lookup on `events_event_id_key`. + # + # We therefore pull the events from the database into a set... + + sql = "SELECT event_id FROM events AS e WHERE " clause, args = make_in_list_sql_clause( - txn.database_engine, "e.event_id", chunk + txn.database_engine, "e.event_id", [eid for (_rid, eid) in chunk] ) txn.execute(sql + clause, args) - results.update(row[0] for row in txn) + found_events = {eid for eid, in txn} - for chunk in batch_iter((x for x in event_ids if x not in results), 100): + # ... and then we can update the results for each row in the batch + results.update({(rid, eid): (eid in found_events) for (rid, eid) in chunk}) + + # each batch requires its own index scan, so we make the batches as big as + # possible. + for chunk in batch_iter((k for k in keys if k not in cache_results), 500): await self.db_pool.runInteraction( "have_seen_events", have_seen_events_txn, chunk ) + return results + @cached(max_entries=100000, tree=True) + async def have_seen_event(self, room_id: str, event_id: str): + # this only exists for the benefit of the @cachedList descriptor on + # _have_seen_events_dict + raise NotImplementedError() + def _get_current_state_event_counts_txn(self, txn, room_id): """ See get_current_state_event_counts. diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py index c584868188..2fa945d171 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py @@ -143,6 +143,7 @@ async def get_local_media(self, media_id: str) -> Optional[Dict[str, Any]]: "created_ts", "quarantined_by", "url_cache", + "safe_from_quarantine", ), allow_none=True, desc="get_local_media", @@ -296,12 +297,12 @@ async def store_local_media( desc="store_local_media", ) - async def mark_local_media_as_safe(self, media_id: str) -> None: - """Mark a local media as safe from quarantining.""" + async def mark_local_media_as_safe(self, media_id: str, safe: bool = True) -> None: + """Mark a local media as safe or unsafe from quarantining.""" await self.db_pool.simple_update_one( table="local_media_repository", keyvalues={"media_id": media_id}, - updatevalues={"safe_from_quarantine": True}, + updatevalues={"safe_from_quarantine": safe}, desc="mark_local_media_as_safe", ) diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py index 6a2baa7841..1388771c40 100644 --- a/synapse/storage/databases/main/presence.py +++ b/synapse/storage/databases/main/presence.py @@ -50,7 +50,7 @@ def __init__( instance_name=self._instance_name, tables=[("presence_stream", "instance_name", "stream_id")], sequence_name="presence_stream_sequence", - writers=hs.config.worker.writers.to_device, + writers=hs.config.worker.writers.presence, ) else: self._presence_id_gen = StreamIdGenerator( diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py index 8f83748b5e..7fb7780d0f 100644 --- a/synapse/storage/databases/main/purge_events.py +++ b/synapse/storage/databases/main/purge_events.py @@ -16,14 +16,14 @@ from typing import Any, List, Set, Tuple from synapse.api.errors import SynapseError -from synapse.storage._base import SQLBaseStore +from synapse.storage.databases.main import CacheInvalidationWorkerStore from synapse.storage.databases.main.state import StateGroupWorkerStore from synapse.types import RoomStreamToken logger = logging.getLogger(__name__) -class PurgeEventsStore(StateGroupWorkerStore, SQLBaseStore): +class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore): async def purge_history( self, room_id: str, token: str, delete_local_events: bool ) -> Set[int]: @@ -203,8 +203,6 @@ def _purge_history_txn( "DELETE FROM event_to_state_groups " "WHERE event_id IN (SELECT event_id from events_to_purge)" ) - for event_id, _ in event_rows: - txn.call_after(self._get_state_group_for_event.invalidate, (event_id,)) # Delete all remote non-state events for table in ( @@ -283,6 +281,20 @@ def _purge_history_txn( # so make sure to keep this actually last. txn.execute("DROP TABLE events_to_purge") + for event_id, should_delete in event_rows: + self._invalidate_cache_and_stream( + txn, self._get_state_group_for_event, (event_id,) + ) + + # XXX: This is racy, since have_seen_events could be called between the + # transaction completing and the invalidation running. On the other hand, + # that's no different to calling `have_seen_events` just before the + # event is deleted from the database. + if should_delete: + self._invalidate_cache_and_stream( + txn, self.have_seen_event, (room_id, event_id) + ) + logger.info("[purge] done") return referenced_state_groups @@ -422,7 +434,11 @@ def _purge_room_txn(self, txn, room_id: str) -> List[int]: # index on them. In any case we should be clearing out 'stream' tables # periodically anyway (#5888) - # TODO: we could probably usefully do a bunch of cache invalidation here + # TODO: we could probably usefully do a bunch more cache invalidation here + + # XXX: as with purge_history, this is racy, but no worse than other races + # that already exist. + self._invalidate_cache_and_stream(txn, self.have_seen_event, (room_id,)) logger.info("[purge] done") diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 3647276acb..edeaacd7a6 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -460,7 +460,7 @@ def _invalidate_get_users_with_receipts_in_room( def invalidate_caches_for_receipt(self, room_id, receipt_type, user_id): self.get_receipts_for_user.invalidate((user_id, receipt_type)) - self._get_linearized_receipts_for_room.invalidate_many((room_id,)) + self._get_linearized_receipts_for_room.invalidate((room_id,)) self.get_last_receipt_event_id_for_user.invalidate( (user_id, room_id, receipt_type) ) @@ -659,9 +659,7 @@ def insert_graph_receipt_txn( ) txn.call_after(self.get_receipts_for_user.invalidate, (user_id, receipt_type)) # FIXME: This shouldn't invalidate the whole cache - txn.call_after( - self._get_linearized_receipts_for_room.invalidate_many, (room_id,) - ) + txn.call_after(self._get_linearized_receipts_for_room.invalidate, (room_id,)) self.db_pool.simple_delete_txn( txn, diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index b4e3f052cc..bcf25f298e 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -789,14 +789,15 @@ async def quarantine_media_by_id( self, server_name: str, media_id: str, - quarantined_by: str, + quarantined_by: Optional[str], ) -> int: - """quarantines a single local or remote media id + """quarantines or unquarantines a single local or remote media id Args: server_name: The name of the server that holds this media media_id: The ID of the media to be quarantined quarantined_by: The user ID that initiated the quarantine request + If it is `None` media will be removed from quarantine """ logger.info("Quarantining media: %s/%s", server_name, media_id) is_local = server_name == self.config.server_name @@ -863,9 +864,9 @@ def _quarantine_media_txn( txn, local_mxcs: List[str], remote_mxcs: List[Tuple[str, str]], - quarantined_by: str, + quarantined_by: Optional[str], ) -> int: - """Quarantine local and remote media items + """Quarantine and unquarantine local and remote media items Args: txn (cursor) @@ -873,18 +874,27 @@ def _quarantine_media_txn( remote_mxcs: A list of (remote server, media id) tuples representing remote mxc URLs quarantined_by: The ID of the user who initiated the quarantine request + If it is `None` media will be removed from quarantine Returns: The total number of media items quarantined """ + # Update all the tables to set the quarantined_by flag - txn.executemany( - """ + sql = """ UPDATE local_media_repository SET quarantined_by = ? - WHERE media_id = ? AND safe_from_quarantine = ? - """, - ((quarantined_by, media_id, False) for media_id in local_mxcs), - ) + WHERE media_id = ? + """ + + # set quarantine + if quarantined_by is not None: + sql += "AND safe_from_quarantine = ?" + rows = [(quarantined_by, media_id, False) for media_id in local_mxcs] + # remove from quarantine + else: + rows = [(quarantined_by, media_id) for media_id in local_mxcs] + + txn.executemany(sql, rows) # Note that a rowcount of -1 can be used to indicate no rows were affected. total_media_quarantined = txn.rowcount if txn.rowcount > 0 else 0 @@ -1523,7 +1533,7 @@ async def add_event_report( room_id: str, event_id: str, user_id: str, - reason: str, + reason: Optional[str], content: JsonDict, received_ts: int, ) -> None: diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index b1bd3a52d9..f1e62f9e85 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -397,6 +397,11 @@ def get_next(self): # ... persist event ... """ + # If we have a list of instances that are allowed to write to this + # stream, make sure we're in it. + if self._writers and self._instance_name not in self._writers: + raise Exception("Tried to allocate stream ID on non-writer") + return _MultiWriterCtxManager(self) def get_next_mult(self, n: int): @@ -406,6 +411,11 @@ def get_next_mult(self, n: int): # ... persist events ... """ + # If we have a list of instances that are allowed to write to this + # stream, make sure we're in it. + if self._writers and self._instance_name not in self._writers: + raise Exception("Tried to allocate stream ID on non-writer") + return _MultiWriterCtxManager(self, n) def get_next_txn(self, txn: LoggingTransaction): @@ -416,6 +426,11 @@ def get_next_txn(self, txn: LoggingTransaction): # ... persist event ... """ + # If we have a list of instances that are allowed to write to this + # stream, make sure we're in it. + if self._writers and self._instance_name not in self._writers: + raise Exception("Tried to allocate stream ID on non-writer") + next_id = self._load_next_id_txn(txn) with self._lock: diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 5c55bb0125..061102c3c8 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -15,6 +15,7 @@ import collections import inspect +import itertools import logging from contextlib import contextmanager from typing import ( @@ -160,8 +161,11 @@ def __repr__(self) -> str: ) +T = TypeVar("T") + + def concurrently_execute( - func: Callable, args: Iterable[Any], limit: int + func: Callable[[T], Any], args: Iterable[T], limit: int ) -> defer.Deferred: """Executes the function with each argument concurrently while limiting the number of concurrent executions. @@ -173,20 +177,27 @@ def concurrently_execute( limit: Maximum number of conccurent executions. Returns: - Deferred[list]: Resolved when all function invocations have finished. + Deferred: Resolved when all function invocations have finished. """ it = iter(args) - async def _concurrently_execute_inner(): + async def _concurrently_execute_inner(value: T) -> None: try: while True: - await maybe_awaitable(func(next(it))) + await maybe_awaitable(func(value)) + value = next(it) except StopIteration: pass + # We use `itertools.islice` to handle the case where the number of args is + # less than the limit, avoiding needlessly spawning unnecessary background + # tasks. return make_deferred_yieldable( defer.gatherResults( - [run_in_background(_concurrently_execute_inner) for _ in range(limit)], + [ + run_in_background(_concurrently_execute_inner, value) + for value in itertools.islice(it, limit) + ], consumeErrors=True, ) ).addErrback(unwrapFirstError) diff --git a/synapse/util/batching_queue.py b/synapse/util/batching_queue.py index 44bbb7b1a8..8fd5bfb69b 100644 --- a/synapse/util/batching_queue.py +++ b/synapse/util/batching_queue.py @@ -25,10 +25,11 @@ TypeVar, ) +from prometheus_client import Gauge + from twisted.internet import defer from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable -from synapse.metrics import LaterGauge from synapse.metrics.background_process_metrics import run_as_background_process from synapse.util import Clock @@ -38,6 +39,24 @@ V = TypeVar("V") R = TypeVar("R") +number_queued = Gauge( + "synapse_util_batching_queue_number_queued", + "The number of items waiting in the queue across all keys", + labelnames=("name",), +) + +number_in_flight = Gauge( + "synapse_util_batching_queue_number_pending", + "The number of items across all keys either being processed or waiting in a queue", + labelnames=("name",), +) + +number_of_keys = Gauge( + "synapse_util_batching_queue_number_of_keys", + "The number of distinct keys that have items queued", + labelnames=("name",), +) + class BatchingQueue(Generic[V, R]): """A queue that batches up work, calling the provided processing function @@ -48,10 +67,20 @@ class BatchingQueue(Generic[V, R]): called, and will keep being called until the queue has been drained (for the given key). + If the processing function raises an exception then the exception is proxied + through to the callers waiting on that batch of work. + Note that the return value of `add_to_queue` will be the return value of the processing function that processed the given item. This means that the returned value will likely include data for other items that were in the batch. + + Args: + name: A name for the queue, used for logging contexts and metrics. + This must be unique, otherwise the metrics will be wrong. + clock: The clock to use to schedule work. + process_batch_callback: The callback to to be run to process a batch of + work. """ def __init__( @@ -73,19 +102,15 @@ def __init__( # The function to call with batches of values. self._process_batch_callback = process_batch_callback - LaterGauge( - "synapse_util_batching_queue_number_queued", - "The number of items waiting in the queue across all keys", - labels=("name",), - caller=lambda: sum(len(v) for v in self._next_values.values()), + number_queued.labels(self._name).set_function( + lambda: sum(len(q) for q in self._next_values.values()) ) - LaterGauge( - "synapse_util_batching_queue_number_of_keys", - "The number of distinct keys that have items queued", - labels=("name",), - caller=lambda: len(self._next_values), - ) + number_of_keys.labels(self._name).set_function(lambda: len(self._next_values)) + + self._number_in_flight_metric = number_in_flight.labels( + self._name + ) # type: Gauge async def add_to_queue(self, value: V, key: Hashable = ()) -> R: """Adds the value to the queue with the given key, returning the result @@ -107,17 +132,18 @@ async def add_to_queue(self, value: V, key: Hashable = ()) -> R: if key not in self._processing_keys: run_as_background_process(self._name, self._process_queue, key) - return await make_deferred_yieldable(d) + with self._number_in_flight_metric.track_inprogress(): + return await make_deferred_yieldable(d) async def _process_queue(self, key: Hashable) -> None: """A background task to repeatedly pull things off the queue for the given key and call the `self._process_batch_callback` with the values. """ - try: - if key in self._processing_keys: - return + if key in self._processing_keys: + return + try: self._processing_keys.add(key) while True: @@ -137,16 +163,16 @@ async def _process_queue(self, key: Hashable) -> None: values = [value for value, _ in next_values] results = await self._process_batch_callback(values) - for _, deferred in next_values: - with PreserveLoggingContext(): + with PreserveLoggingContext(): + for _, deferred in next_values: deferred.callback(results) except Exception as e: - for _, deferred in next_values: - if deferred.called: - continue + with PreserveLoggingContext(): + for _, deferred in next_values: + if deferred.called: + continue - with PreserveLoggingContext(): deferred.errback(e) finally: diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py index 371e7e4dd0..1044139119 100644 --- a/synapse/util/caches/deferred_cache.py +++ b/synapse/util/caches/deferred_cache.py @@ -16,16 +16,7 @@ import enum import threading -from typing import ( - Callable, - Generic, - Iterable, - MutableMapping, - Optional, - TypeVar, - Union, - cast, -) +from typing import Callable, Generic, Iterable, MutableMapping, Optional, TypeVar, Union from prometheus_client import Gauge @@ -91,7 +82,7 @@ def __init__( # _pending_deferred_cache maps from the key value to a `CacheEntry` object. self._pending_deferred_cache = ( cache_type() - ) # type: MutableMapping[KT, CacheEntry] + ) # type: Union[TreeCache, MutableMapping[KT, CacheEntry]] def metrics_cb(): cache_pending_metric.labels(name).set(len(self._pending_deferred_cache)) @@ -287,8 +278,17 @@ def prefill( self.cache.set(key, value, callbacks=callbacks) def invalidate(self, key): + """Delete a key, or tree of entries + + If the cache is backed by a regular dict, then "key" must be of + the right type for this cache + + If the cache is backed by a TreeCache, then "key" must be a tuple, but + may be of lower cardinality than the TreeCache - in which case the whole + subtree is deleted. + """ self.check_thread() - self.cache.pop(key, None) + self.cache.del_multi(key) # if we have a pending lookup for this key, remove it from the # _pending_deferred_cache, which will (a) stop it being returned @@ -299,20 +299,10 @@ def invalidate(self, key): # run the invalidation callbacks now, rather than waiting for the # deferred to resolve. if entry: - entry.invalidate() - - def invalidate_many(self, key: KT): - self.check_thread() - if not isinstance(key, tuple): - raise TypeError("The cache key must be a tuple not %r" % (type(key),)) - key = cast(KT, key) - self.cache.del_multi(key) - - # if we have a pending lookup for this key, remove it from the - # _pending_deferred_cache, as above - entry_dict = self._pending_deferred_cache.pop(key, None) - if entry_dict is not None: - for entry in iterate_tree_cache_entry(entry_dict): + # _pending_deferred_cache.pop should either return a CacheEntry, or, in the + # case of a TreeCache, a dict of keys to cache entries. Either way calling + # iterate_tree_cache_entry on it will do the right thing. + for entry in iterate_tree_cache_entry(entry): entry.invalidate() def invalidate_all(self): diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 2ac24a2f25..d77e8edeea 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -48,7 +48,6 @@ class _CachedFunction(Generic[F]): invalidate = None # type: Any invalidate_all = None # type: Any - invalidate_many = None # type: Any prefill = None # type: Any cache = None # type: Any num_args = None # type: Any @@ -262,6 +261,11 @@ def __init__( ): super().__init__(orig, num_args=num_args, cache_context=cache_context) + if tree and self.num_args < 2: + raise RuntimeError( + "tree=True is nonsensical for cached functions with a single parameter" + ) + self.max_entries = max_entries self.tree = tree self.iterable = iterable @@ -302,11 +306,11 @@ def _wrapped(*args, **kwargs): wrapped = cast(_CachedFunction, _wrapped) if self.num_args == 1: + assert not self.tree wrapped.invalidate = lambda key: cache.invalidate(key[0]) wrapped.prefill = lambda key, val: cache.prefill(key[0], val) else: wrapped.invalidate = cache.invalidate - wrapped.invalidate_many = cache.invalidate_many wrapped.prefill = cache.prefill wrapped.invalidate_all = cache.invalidate_all diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 54df407ff7..d89e9d9b1d 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -152,7 +152,6 @@ class LruCache(Generic[KT, VT]): """ Least-recently-used cache, supporting prometheus metrics and invalidation callbacks. - Supports del_multi only if cache_type=TreeCache If cache_type=TreeCache, all keys must be tuples. """ @@ -393,10 +392,16 @@ def cache_pop(key: KT, default: Optional[T] = None): @synchronized def cache_del_multi(key: KT) -> None: + """Delete an entry, or tree of entries + + If the LruCache is backed by a regular dict, then "key" must be of + the right type for this cache + + If the LruCache is backed by a TreeCache, then "key" must be a tuple, but + may be of lower cardinality than the TreeCache - in which case the whole + subtree is deleted. """ - This will only work if constructed with cache_type=TreeCache - """ - popped = cache.pop(key) + popped = cache.pop(key, None) if popped is None: return # for each deleted node, we now need to remove it from the linked list @@ -430,11 +435,10 @@ def cache_contains(key: KT) -> bool: self.set = cache_set self.setdefault = cache_set_default self.pop = cache_pop + self.del_multi = cache_del_multi # `invalidate` is exposed for consistency with DeferredCache, so that it can be # invalidated by the cache invalidation replication stream. - self.invalidate = cache_pop - if cache_type is TreeCache: - self.del_multi = cache_del_multi + self.invalidate = cache_del_multi self.len = synchronized(cache_len) self.contains = cache_contains self.clear = cache_clear diff --git a/synapse/util/caches/treecache.py b/synapse/util/caches/treecache.py index 73502a8b06..a6df81ebff 100644 --- a/synapse/util/caches/treecache.py +++ b/synapse/util/caches/treecache.py @@ -89,6 +89,9 @@ def pop(self, key, default=None): value. If the key is partial, the TreeCacheNode corresponding to the part of the tree that was removed. """ + if not isinstance(key, tuple): + raise TypeError("The cache key must be a tuple not %r" % (type(key),)) + # a list of the nodes we have touched on the way down the tree nodes = [] diff --git a/synctl b/synctl index 6ce19918d2..90559ded62 100755 --- a/synctl +++ b/synctl @@ -97,11 +97,15 @@ def start(pidfile: str, app: str, config_files: Iterable[str], daemonize: bool) write("started %s(%s)" % (app, ",".join(config_files)), colour=GREEN) return True except subprocess.CalledProcessError as e: - write( - "error starting %s(%s) (exit code: %d); see above for logs" - % (app, ",".join(config_files), e.returncode), - colour=RED, + err = "%s(%s) failed to start (exit code: %d). Check the Synapse logfile" % ( + app, + ",".join(config_files), + e.returncode, ) + if daemonize: + err += ", or run synctl with --no-daemonize" + err += "." + write(err, colour=RED, stream=sys.stderr) return False diff --git a/tests/config/test_tls.py b/tests/config/test_tls.py index 183034f7d4..dcf336416c 100644 --- a/tests/config/test_tls.py +++ b/tests/config/test_tls.py @@ -74,12 +74,11 @@ def test_warn_self_signed(self): config = { "tls_certificate_path": os.path.join(config_dir, "cert.pem"), - "tls_fingerprints": [], } t = TestConfig() t.read_config(config, config_dir_path="", data_dir_path="") - t.read_certificate_from_disk(require_cert_and_key=False) + t.read_tls_certificate() warnings = self.flushWarnings() self.assertEqual(len(warnings), 1) diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 2775dfd880..745c295d3b 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import time +from typing import Dict, List from unittest.mock import Mock import attr @@ -21,7 +22,6 @@ from nacl.signing import SigningKey from signedjson.key import encode_verify_key_base64, get_verify_key -from twisted.internet import defer from twisted.internet.defer import Deferred, ensureDeferred from synapse.api.errors import SynapseError @@ -92,23 +92,23 @@ def test_verify_json_objects_for_server_awaits_previous_requests(self): # deferred completes. first_lookup_deferred = Deferred() - async def first_lookup_fetch(keys_to_fetch): - self.assertEquals(current_context().request.id, "context_11") - self.assertEqual(keys_to_fetch, {"server10": {get_key_id(key1): 0}}) + async def first_lookup_fetch( + server_name: str, key_ids: List[str], minimum_valid_until_ts: int + ) -> Dict[str, FetchKeyResult]: + # self.assertEquals(current_context().request.id, "context_11") + self.assertEqual(server_name, "server10") + self.assertEqual(key_ids, [get_key_id(key1)]) + self.assertEqual(minimum_valid_until_ts, 0) await make_deferred_yieldable(first_lookup_deferred) - return { - "server10": { - get_key_id(key1): FetchKeyResult(get_verify_key(key1), 100) - } - } + return {get_key_id(key1): FetchKeyResult(get_verify_key(key1), 100)} mock_fetcher.get_keys.side_effect = first_lookup_fetch async def first_lookup(): with LoggingContext("context_11", request=FakeRequest("context_11")): res_deferreds = kr.verify_json_objects_for_server( - [("server10", json1, 0, "test10"), ("server11", {}, 0, "test11")] + [("server10", json1, 0), ("server11", {}, 0)] ) # the unsigned json should be rejected pretty quickly @@ -126,18 +126,18 @@ async def first_lookup(): d0 = ensureDeferred(first_lookup()) + self.pump() + mock_fetcher.get_keys.assert_called_once() # a second request for a server with outstanding requests # should block rather than start a second call - async def second_lookup_fetch(keys_to_fetch): - self.assertEquals(current_context().request.id, "context_12") - return { - "server10": { - get_key_id(key1): FetchKeyResult(get_verify_key(key1), 100) - } - } + async def second_lookup_fetch( + server_name: str, key_ids: List[str], minimum_valid_until_ts: int + ) -> Dict[str, FetchKeyResult]: + # self.assertEquals(current_context().request.id, "context_12") + return {get_key_id(key1): FetchKeyResult(get_verify_key(key1), 100)} mock_fetcher.get_keys.reset_mock() mock_fetcher.get_keys.side_effect = second_lookup_fetch @@ -146,7 +146,13 @@ async def second_lookup_fetch(keys_to_fetch): async def second_lookup(): with LoggingContext("context_12", request=FakeRequest("context_12")): res_deferreds_2 = kr.verify_json_objects_for_server( - [("server10", json1, 0, "test")] + [ + ( + "server10", + json1, + 0, + ) + ] ) res_deferreds_2[0].addBoth(self.check_context, None) second_lookup_state[0] = 1 @@ -183,11 +189,11 @@ def test_verify_json_for_server(self): signedjson.sign.sign_json(json1, "server9", key1) # should fail immediately on an unsigned object - d = _verify_json_for_server(kr, "server9", {}, 0, "test unsigned") + d = kr.verify_json_for_server("server9", {}, 0) self.get_failure(d, SynapseError) # should succeed on a signed object - d = _verify_json_for_server(kr, "server9", json1, 500, "test signed") + d = kr.verify_json_for_server("server9", json1, 500) # self.assertFalse(d.called) self.get_success(d) @@ -214,24 +220,24 @@ def test_verify_json_for_server_with_null_valid_until_ms(self): signedjson.sign.sign_json(json1, "server9", key1) # should fail immediately on an unsigned object - d = _verify_json_for_server(kr, "server9", {}, 0, "test unsigned") + d = kr.verify_json_for_server("server9", {}, 0) self.get_failure(d, SynapseError) # should fail on a signed object with a non-zero minimum_valid_until_ms, # as it tries to refetch the keys and fails. - d = _verify_json_for_server( - kr, "server9", json1, 500, "test signed non-zero min" - ) + d = kr.verify_json_for_server("server9", json1, 500) self.get_failure(d, SynapseError) # We expect the keyring tried to refetch the key once. mock_fetcher.get_keys.assert_called_once_with( - {"server9": {get_key_id(key1): 500}} + "server9", [get_key_id(key1)], 500 ) # should succeed on a signed object with a 0 minimum_valid_until_ms - d = _verify_json_for_server( - kr, "server9", json1, 0, "test signed with zero min" + d = kr.verify_json_for_server( + "server9", + json1, + 0, ) self.get_success(d) @@ -239,15 +245,15 @@ def test_verify_json_dedupes_key_requests(self): """Two requests for the same key should be deduped.""" key1 = signedjson.key.generate_signing_key(1) - async def get_keys(keys_to_fetch): + async def get_keys( + server_name: str, key_ids: List[str], minimum_valid_until_ts: int + ) -> Dict[str, FetchKeyResult]: # there should only be one request object (with the max validity) - self.assertEqual(keys_to_fetch, {"server1": {get_key_id(key1): 1500}}) + self.assertEqual(server_name, "server1") + self.assertEqual(key_ids, [get_key_id(key1)]) + self.assertEqual(minimum_valid_until_ts, 1500) - return { - "server1": { - get_key_id(key1): FetchKeyResult(get_verify_key(key1), 1200) - } - } + return {get_key_id(key1): FetchKeyResult(get_verify_key(key1), 1200)} mock_fetcher = Mock() mock_fetcher.get_keys = Mock(side_effect=get_keys) @@ -259,7 +265,14 @@ async def get_keys(keys_to_fetch): # the first request should succeed; the second should fail because the key # has expired results = kr.verify_json_objects_for_server( - [("server1", json1, 500, "test1"), ("server1", json1, 1500, "test2")] + [ + ( + "server1", + json1, + 500, + ), + ("server1", json1, 1500), + ] ) self.assertEqual(len(results), 2) self.get_success(results[0]) @@ -274,19 +287,21 @@ def test_verify_json_falls_back_to_other_fetchers(self): """If the first fetcher cannot provide a recent enough key, we fall back""" key1 = signedjson.key.generate_signing_key(1) - async def get_keys1(keys_to_fetch): - self.assertEqual(keys_to_fetch, {"server1": {get_key_id(key1): 1500}}) - return { - "server1": {get_key_id(key1): FetchKeyResult(get_verify_key(key1), 800)} - } - - async def get_keys2(keys_to_fetch): - self.assertEqual(keys_to_fetch, {"server1": {get_key_id(key1): 1500}}) - return { - "server1": { - get_key_id(key1): FetchKeyResult(get_verify_key(key1), 1200) - } - } + async def get_keys1( + server_name: str, key_ids: List[str], minimum_valid_until_ts: int + ) -> Dict[str, FetchKeyResult]: + self.assertEqual(server_name, "server1") + self.assertEqual(key_ids, [get_key_id(key1)]) + self.assertEqual(minimum_valid_until_ts, 1500) + return {get_key_id(key1): FetchKeyResult(get_verify_key(key1), 800)} + + async def get_keys2( + server_name: str, key_ids: List[str], minimum_valid_until_ts: int + ) -> Dict[str, FetchKeyResult]: + self.assertEqual(server_name, "server1") + self.assertEqual(key_ids, [get_key_id(key1)]) + self.assertEqual(minimum_valid_until_ts, 1500) + return {get_key_id(key1): FetchKeyResult(get_verify_key(key1), 1200)} mock_fetcher1 = Mock() mock_fetcher1.get_keys = Mock(side_effect=get_keys1) @@ -298,7 +313,18 @@ async def get_keys2(keys_to_fetch): signedjson.sign.sign_json(json1, "server1", key1) results = kr.verify_json_objects_for_server( - [("server1", json1, 1200, "test1"), ("server1", json1, 1500, "test2")] + [ + ( + "server1", + json1, + 1200, + ), + ( + "server1", + json1, + 1500, + ), + ] ) self.assertEqual(len(results), 2) self.get_success(results[0]) @@ -349,9 +375,8 @@ async def get_json(destination, path, **kwargs): self.http_client.get_json.side_effect = get_json - keys_to_fetch = {SERVER_NAME: {"key1": 0}} - keys = self.get_success(fetcher.get_keys(keys_to_fetch)) - k = keys[SERVER_NAME][testverifykey_id] + keys = self.get_success(fetcher.get_keys(SERVER_NAME, ["key1"], 0)) + k = keys[testverifykey_id] self.assertEqual(k.valid_until_ts, VALID_UNTIL_TS) self.assertEqual(k.verify_key, testverifykey) self.assertEqual(k.verify_key.alg, "ed25519") @@ -378,7 +403,7 @@ async def get_json(destination, path, **kwargs): # change the server name: the result should be ignored response["server_name"] = "OTHER_SERVER" - keys = self.get_success(fetcher.get_keys(keys_to_fetch)) + keys = self.get_success(fetcher.get_keys(SERVER_NAME, ["key1"], 0)) self.assertEqual(keys, {}) @@ -465,10 +490,9 @@ def test_get_keys_from_perspectives(self): self.expect_outgoing_key_query(SERVER_NAME, "key1", response) - keys_to_fetch = {SERVER_NAME: {"key1": 0}} - keys = self.get_success(fetcher.get_keys(keys_to_fetch)) - self.assertIn(SERVER_NAME, keys) - k = keys[SERVER_NAME][testverifykey_id] + keys = self.get_success(fetcher.get_keys(SERVER_NAME, ["key1"], 0)) + self.assertIn(testverifykey_id, keys) + k = keys[testverifykey_id] self.assertEqual(k.valid_until_ts, VALID_UNTIL_TS) self.assertEqual(k.verify_key, testverifykey) self.assertEqual(k.verify_key.alg, "ed25519") @@ -515,10 +539,9 @@ def test_get_perspectives_own_key(self): self.expect_outgoing_key_query(SERVER_NAME, "key1", response) - keys_to_fetch = {SERVER_NAME: {"key1": 0}} - keys = self.get_success(fetcher.get_keys(keys_to_fetch)) - self.assertIn(SERVER_NAME, keys) - k = keys[SERVER_NAME][testverifykey_id] + keys = self.get_success(fetcher.get_keys(SERVER_NAME, ["key1"], 0)) + self.assertIn(testverifykey_id, keys) + k = keys[testverifykey_id] self.assertEqual(k.valid_until_ts, VALID_UNTIL_TS) self.assertEqual(k.verify_key, testverifykey) self.assertEqual(k.verify_key.alg, "ed25519") @@ -559,14 +582,13 @@ def build_response(): def get_key_from_perspectives(response): fetcher = PerspectivesKeyFetcher(self.hs) - keys_to_fetch = {SERVER_NAME: {"key1": 0}} self.expect_outgoing_key_query(SERVER_NAME, "key1", response) - return self.get_success(fetcher.get_keys(keys_to_fetch)) + return self.get_success(fetcher.get_keys(SERVER_NAME, ["key1"], 0)) # start with a valid response so we can check we are testing the right thing response = build_response() keys = get_key_from_perspectives(response) - k = keys[SERVER_NAME][testverifykey_id] + k = keys[testverifykey_id] self.assertEqual(k.verify_key, testverifykey) # remove the perspectives server's signature @@ -585,23 +607,3 @@ def get_key_from_perspectives(response): def get_key_id(key): """Get the matrix ID tag for a given SigningKey or VerifyKey""" return "%s:%s" % (key.alg, key.version) - - -@defer.inlineCallbacks -def run_in_context(f, *args, **kwargs): - with LoggingContext("testctx"): - rv = yield f(*args, **kwargs) - return rv - - -def _verify_json_for_server(kr, *args): - """thin wrapper around verify_json_for_server which makes sure it is wrapped - with the patched defer.inlineCallbacks. - """ - - @defer.inlineCallbacks - def v(): - rv1 = yield kr.verify_json_for_server(*args) - return rv1 - - return run_in_context(v) diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index b037b12a0f..5d6cc2885f 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -57,10 +57,10 @@ def test_notify_interested_services(self): sender="@someone:anywhere", type="m.room.message", room_id="!foo:bar" ) self.mock_store.get_new_events_for_appservice.side_effect = [ - make_awaitable((0, [event])), make_awaitable((0, [])), + make_awaitable((1, [event])), ] - self.handler.notify_interested_services(RoomStreamToken(None, 0)) + self.handler.notify_interested_services(RoomStreamToken(None, 1)) self.mock_scheduler.submit_event_for_as.assert_called_once_with( interested_service, event @@ -77,7 +77,6 @@ def test_query_user_exists_unknown_user(self): self.mock_as_api.query_user.return_value = make_awaitable(True) self.mock_store.get_new_events_for_appservice.side_effect = [ make_awaitable((0, [event])), - make_awaitable((0, [])), ] self.handler.notify_interested_services(RoomStreamToken(None, 0)) @@ -95,7 +94,6 @@ def test_query_user_exists_known_user(self): self.mock_as_api.query_user.return_value = make_awaitable(True) self.mock_store.get_new_events_for_appservice.side_effect = [ make_awaitable((0, [event])), - make_awaitable((0, [])), ] self.handler.notify_interested_services(RoomStreamToken(None, 0)) diff --git a/tests/rest/admin/test_event_reports.py b/tests/rest/admin/test_event_reports.py index 29341bc6e9..f15d1cf6f7 100644 --- a/tests/rest/admin/test_event_reports.py +++ b/tests/rest/admin/test_event_reports.py @@ -64,7 +64,7 @@ def prepare(self, reactor, clock, hs): user_tok=self.admin_user_tok, ) for _ in range(5): - self._create_event_and_report( + self._create_event_and_report_without_parameters( room_id=self.room_id2, user_tok=self.admin_user_tok, ) @@ -378,6 +378,19 @@ def _create_event_and_report(self, room_id, user_tok): ) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + def _create_event_and_report_without_parameters(self, room_id, user_tok): + """Create and report an event, but omit reason and score""" + resp = self.helper.send(room_id, tok=user_tok) + event_id = resp["event_id"] + + channel = self.make_request( + "POST", + "rooms/%s/report/%s" % (room_id, event_id), + json.dumps({}), + access_token=user_tok, + ) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + def _check_fields(self, content): """Checks that all attributes are present in an event report""" for c in content: diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py index ac7b219700..6fee0f95b6 100644 --- a/tests/rest/admin/test_media.py +++ b/tests/rest/admin/test_media.py @@ -16,6 +16,8 @@ import os from binascii import unhexlify +from parameterized import parameterized + import synapse.rest.admin from synapse.api.errors import Codes from synapse.rest.client.v1 import login, profile, room @@ -562,3 +564,228 @@ def _access_media(self, server_and_media_id, expect_success=True): ) # Test that the file is deleted self.assertFalse(os.path.exists(local_path)) + + +class QuarantineMediaByIDTestCase(unittest.HomeserverTestCase): + + servlets = [ + synapse.rest.admin.register_servlets, + synapse.rest.admin.register_servlets_for_media_repo, + login.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + media_repo = hs.get_media_repository_resource() + self.store = hs.get_datastore() + self.server_name = hs.hostname + + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + # Create media + upload_resource = media_repo.children[b"upload"] + # file size is 67 Byte + image_data = unhexlify( + b"89504e470d0a1a0a0000000d4948445200000001000000010806" + b"0000001f15c4890000000a49444154789c63000100000500010d" + b"0a2db40000000049454e44ae426082" + ) + + # Upload some media into the room + response = self.helper.upload_media( + upload_resource, image_data, tok=self.admin_user_tok, expect_code=200 + ) + # Extract media ID from the response + server_and_media_id = response["content_uri"][6:] # Cut off 'mxc://' + self.media_id = server_and_media_id.split("/")[1] + + self.url = "/_synapse/admin/v1/media/%s/%s/%s" + + @parameterized.expand(["quarantine", "unquarantine"]) + def test_no_auth(self, action: str): + """ + Try to protect media without authentication. + """ + + channel = self.make_request( + "POST", + self.url % (action, self.server_name, self.media_id), + b"{}", + ) + + self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) + + @parameterized.expand(["quarantine", "unquarantine"]) + def test_requester_is_no_admin(self, action: str): + """ + If the user is not a server admin, an error is returned. + """ + self.other_user = self.register_user("user", "pass") + self.other_user_token = self.login("user", "pass") + + channel = self.make_request( + "POST", + self.url % (action, self.server_name, self.media_id), + access_token=self.other_user_token, + ) + + self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + def test_quarantine_media(self): + """ + Tests that quarantining and remove from quarantine a media is successfully + """ + + media_info = self.get_success(self.store.get_local_media(self.media_id)) + self.assertFalse(media_info["quarantined_by"]) + + # quarantining + channel = self.make_request( + "POST", + self.url % ("quarantine", self.server_name, self.media_id), + access_token=self.admin_user_tok, + ) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertFalse(channel.json_body) + + media_info = self.get_success(self.store.get_local_media(self.media_id)) + self.assertTrue(media_info["quarantined_by"]) + + # remove from quarantine + channel = self.make_request( + "POST", + self.url % ("unquarantine", self.server_name, self.media_id), + access_token=self.admin_user_tok, + ) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertFalse(channel.json_body) + + media_info = self.get_success(self.store.get_local_media(self.media_id)) + self.assertFalse(media_info["quarantined_by"]) + + def test_quarantine_protected_media(self): + """ + Tests that quarantining from protected media fails + """ + + # protect + self.get_success(self.store.mark_local_media_as_safe(self.media_id, safe=True)) + + # verify protection + media_info = self.get_success(self.store.get_local_media(self.media_id)) + self.assertTrue(media_info["safe_from_quarantine"]) + + # quarantining + channel = self.make_request( + "POST", + self.url % ("quarantine", self.server_name, self.media_id), + access_token=self.admin_user_tok, + ) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertFalse(channel.json_body) + + # verify that is not in quarantine + media_info = self.get_success(self.store.get_local_media(self.media_id)) + self.assertFalse(media_info["quarantined_by"]) + + +class ProtectMediaByIDTestCase(unittest.HomeserverTestCase): + + servlets = [ + synapse.rest.admin.register_servlets, + synapse.rest.admin.register_servlets_for_media_repo, + login.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + media_repo = hs.get_media_repository_resource() + self.store = hs.get_datastore() + + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + # Create media + upload_resource = media_repo.children[b"upload"] + # file size is 67 Byte + image_data = unhexlify( + b"89504e470d0a1a0a0000000d4948445200000001000000010806" + b"0000001f15c4890000000a49444154789c63000100000500010d" + b"0a2db40000000049454e44ae426082" + ) + + # Upload some media into the room + response = self.helper.upload_media( + upload_resource, image_data, tok=self.admin_user_tok, expect_code=200 + ) + # Extract media ID from the response + server_and_media_id = response["content_uri"][6:] # Cut off 'mxc://' + self.media_id = server_and_media_id.split("/")[1] + + self.url = "/_synapse/admin/v1/media/%s/%s" + + @parameterized.expand(["protect", "unprotect"]) + def test_no_auth(self, action: str): + """ + Try to protect media without authentication. + """ + + channel = self.make_request("POST", self.url % (action, self.media_id), b"{}") + + self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) + + @parameterized.expand(["protect", "unprotect"]) + def test_requester_is_no_admin(self, action: str): + """ + If the user is not a server admin, an error is returned. + """ + self.other_user = self.register_user("user", "pass") + self.other_user_token = self.login("user", "pass") + + channel = self.make_request( + "POST", + self.url % (action, self.media_id), + access_token=self.other_user_token, + ) + + self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + def test_protect_media(self): + """ + Tests that protect and unprotect a media is successfully + """ + + media_info = self.get_success(self.store.get_local_media(self.media_id)) + self.assertFalse(media_info["safe_from_quarantine"]) + + # protect + channel = self.make_request( + "POST", + self.url % ("protect", self.media_id), + access_token=self.admin_user_tok, + ) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertFalse(channel.json_body) + + media_info = self.get_success(self.store.get_local_media(self.media_id)) + self.assertTrue(media_info["safe_from_quarantine"]) + + # unprotect + channel = self.make_request( + "POST", + self.url % ("unprotect", self.media_id), + access_token=self.admin_user_tok, + ) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertFalse(channel.json_body) + + media_info = self.get_success(self.store.get_local_media(self.media_id)) + self.assertFalse(media_info["safe_from_quarantine"]) diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 7c4bdcdfdd..5b1096d091 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -1880,8 +1880,7 @@ def _get_aliases(self, access_token: str, expected_code: int = 200) -> JsonDict: """Calls the endpoint under test. returns the json response object.""" channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc2432/rooms/%s/aliases" - % (self.room_id,), + "/_matrix/client/r0/rooms/%s/aliases" % (self.room_id,), access_token=access_token, ) self.assertEqual(channel.code, expected_code, channel.result) diff --git a/tests/rest/client/v2_alpha/test_report_event.py b/tests/rest/client/v2_alpha/test_report_event.py new file mode 100644 index 0000000000..1ec6b05e5b --- /dev/null +++ b/tests/rest/client/v2_alpha/test_report_event.py @@ -0,0 +1,83 @@ +# Copyright 2021 Callum Brown +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json + +import synapse.rest.admin +from synapse.rest.client.v1 import login, room +from synapse.rest.client.v2_alpha import report_event + +from tests import unittest + + +class ReportEventTestCase(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + report_event.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + self.other_user = self.register_user("user", "pass") + self.other_user_tok = self.login("user", "pass") + + self.room_id = self.helper.create_room_as( + self.other_user, tok=self.other_user_tok, is_public=True + ) + self.helper.join(self.room_id, user=self.admin_user, tok=self.admin_user_tok) + resp = self.helper.send(self.room_id, tok=self.admin_user_tok) + self.event_id = resp["event_id"] + self.report_path = "rooms/{}/report/{}".format(self.room_id, self.event_id) + + def test_reason_str_and_score_int(self): + data = {"reason": "this makes me sad", "score": -100} + self._assert_status(200, data) + + def test_no_reason(self): + data = {"score": 0} + self._assert_status(200, data) + + def test_no_score(self): + data = {"reason": "this makes me sad"} + self._assert_status(200, data) + + def test_no_reason_and_no_score(self): + data = {} + self._assert_status(200, data) + + def test_reason_int_and_score_str(self): + data = {"reason": 10, "score": "string"} + self._assert_status(400, data) + + def test_reason_zero_and_score_blank(self): + data = {"reason": 0, "score": ""} + self._assert_status(400, data) + + def test_reason_and_score_null(self): + data = {"reason": None, "score": None} + self._assert_status(400, data) + + def _assert_status(self, response_status, data): + channel = self.make_request( + "POST", + self.report_path, + json.dumps(data), + access_token=self.other_user_tok, + ) + self.assertEqual( + response_status, int(channel.result["code"]), msg=channel.result["body"] + ) diff --git a/tests/rest/key/v2/test_remote_key_resource.py b/tests/rest/key/v2/test_remote_key_resource.py index 3b275bc23b..a75c0ea3f0 100644 --- a/tests/rest/key/v2/test_remote_key_resource.py +++ b/tests/rest/key/v2/test_remote_key_resource.py @@ -208,10 +208,10 @@ def test_get_key(self): keyid = "ed25519:%s" % (testkey.version,) fetcher = PerspectivesKeyFetcher(self.hs2) - d = fetcher.get_keys({"targetserver": {keyid: 1000}}) + d = fetcher.get_keys("targetserver", [keyid], 1000) res = self.get_success(d) - self.assertIn("targetserver", res) - keyres = res["targetserver"][keyid] + self.assertIn(keyid, res) + keyres = res[keyid] assert isinstance(keyres, FetchKeyResult) self.assertEqual( signedjson.key.encode_verify_key_base64(keyres.verify_key), @@ -230,10 +230,10 @@ def test_get_notary_key(self): keyid = "ed25519:%s" % (testkey.version,) fetcher = PerspectivesKeyFetcher(self.hs2) - d = fetcher.get_keys({self.hs.hostname: {keyid: 1000}}) + d = fetcher.get_keys(self.hs.hostname, [keyid], 1000) res = self.get_success(d) - self.assertIn(self.hs.hostname, res) - keyres = res[self.hs.hostname][keyid] + self.assertIn(keyid, res) + keyres = res[keyid] assert isinstance(keyres, FetchKeyResult) self.assertEqual( signedjson.key.encode_verify_key_base64(keyres.verify_key), @@ -247,10 +247,10 @@ def test_get_notary_keyserver_key(self): keyid = "ed25519:%s" % (self.hs_signing_key.version,) fetcher = PerspectivesKeyFetcher(self.hs2) - d = fetcher.get_keys({self.hs.hostname: {keyid: 1000}}) + d = fetcher.get_keys(self.hs.hostname, [keyid], 1000) res = self.get_success(d) - self.assertIn(self.hs.hostname, res) - keyres = res[self.hs.hostname][keyid] + self.assertIn(keyid, res) + keyres = res[keyid] assert isinstance(keyres, FetchKeyResult) self.assertEqual( signedjson.key.encode_verify_key_base64(keyres.verify_key), diff --git a/tests/storage/databases/__init__.py b/tests/storage/databases/__init__.py new file mode 100644 index 0000000000..c24c7ecd92 --- /dev/null +++ b/tests/storage/databases/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/storage/databases/main/__init__.py b/tests/storage/databases/main/__init__.py new file mode 100644 index 0000000000..c24c7ecd92 --- /dev/null +++ b/tests/storage/databases/main/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/storage/databases/main/test_events_worker.py b/tests/storage/databases/main/test_events_worker.py new file mode 100644 index 0000000000..932970fd9a --- /dev/null +++ b/tests/storage/databases/main/test_events_worker.py @@ -0,0 +1,96 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json + +from synapse.logging.context import LoggingContext +from synapse.storage.databases.main.events_worker import EventsWorkerStore + +from tests import unittest + + +class HaveSeenEventsTestCase(unittest.HomeserverTestCase): + def prepare(self, reactor, clock, hs): + self.store: EventsWorkerStore = hs.get_datastore() + + # insert some test data + for rid in ("room1", "room2"): + self.get_success( + self.store.db_pool.simple_insert( + "rooms", + {"room_id": rid, "room_version": 4}, + ) + ) + + for idx, (rid, eid) in enumerate( + ( + ("room1", "event10"), + ("room1", "event11"), + ("room1", "event12"), + ("room2", "event20"), + ) + ): + self.get_success( + self.store.db_pool.simple_insert( + "events", + { + "event_id": eid, + "room_id": rid, + "topological_ordering": idx, + "stream_ordering": idx, + "type": "test", + "processed": True, + "outlier": False, + }, + ) + ) + self.get_success( + self.store.db_pool.simple_insert( + "event_json", + { + "event_id": eid, + "room_id": rid, + "json": json.dumps({"type": "test", "room_id": rid}), + "internal_metadata": "{}", + "format_version": 3, + }, + ) + ) + + def test_simple(self): + with LoggingContext(name="test") as ctx: + res = self.get_success( + self.store.have_seen_events("room1", ["event10", "event19"]) + ) + self.assertEquals(res, {"event10"}) + + # that should result in a single db query + self.assertEquals(ctx.get_resource_usage().db_txn_count, 1) + + # a second lookup of the same events should cause no queries + with LoggingContext(name="test") as ctx: + res = self.get_success( + self.store.have_seen_events("room1", ["event10", "event19"]) + ) + self.assertEquals(res, {"event10"}) + self.assertEquals(ctx.get_resource_usage().db_txn_count, 0) + + def test_query_via_event_cache(self): + # fetch an event into the event cache + self.get_success(self.store.get_event("event10")) + + # looking it up should now cause no db hits + with LoggingContext(name="test") as ctx: + res = self.get_success(self.store.have_seen_events("room1", ["event10"])) + self.assertEquals(res, {"event10"}) + self.assertEquals(ctx.get_resource_usage().db_txn_count, 0) diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index bbbc276697..0277998cbe 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -622,17 +622,17 @@ def func2(self, key, cache_context): self.assertEquals(callcount2[0], 1) a.func2.invalidate(("foo",)) - self.assertEquals(a.func2.cache.cache.pop.call_count, 1) + self.assertEquals(a.func2.cache.cache.del_multi.call_count, 1) yield a.func2("foo") a.func2.invalidate(("foo",)) - self.assertEquals(a.func2.cache.cache.pop.call_count, 2) + self.assertEquals(a.func2.cache.cache.del_multi.call_count, 2) self.assertEquals(callcount[0], 1) self.assertEquals(callcount2[0], 2) a.func.invalidate(("foo",)) - self.assertEquals(a.func2.cache.cache.pop.call_count, 3) + self.assertEquals(a.func2.cache.cache.del_multi.call_count, 3) yield a.func("foo") self.assertEquals(callcount[0], 2) diff --git a/tests/util/test_batching_queue.py b/tests/util/test_batching_queue.py index 5def1e56c9..07be57d72c 100644 --- a/tests/util/test_batching_queue.py +++ b/tests/util/test_batching_queue.py @@ -14,7 +14,12 @@ from twisted.internet import defer from synapse.logging.context import make_deferred_yieldable -from synapse.util.batching_queue import BatchingQueue +from synapse.util.batching_queue import ( + BatchingQueue, + number_in_flight, + number_of_keys, + number_queued, +) from tests.server import get_clock from tests.unittest import TestCase @@ -24,6 +29,14 @@ class BatchingQueueTestCase(TestCase): def setUp(self): self.clock, hs_clock = get_clock() + # We ensure that we remove any existing metrics for "test_queue". + try: + number_queued.remove("test_queue") + number_of_keys.remove("test_queue") + number_in_flight.remove("test_queue") + except KeyError: + pass + self._pending_calls = [] self.queue = BatchingQueue("test_queue", hs_clock, self._process_queue) @@ -32,6 +45,36 @@ async def _process_queue(self, values): self._pending_calls.append((values, d)) return await make_deferred_yieldable(d) + def _get_sample_with_name(self, metric, name) -> int: + """For a prometheus metric get the value of the sample that has a + matching "name" label. + """ + for sample in metric.collect()[0].samples: + if sample.labels.get("name") == name: + return sample.value + + self.fail("Found no matching sample") + + def _assert_metrics(self, queued, keys, in_flight): + """Assert that the metrics are correct""" + + sample = self._get_sample_with_name(number_queued, self.queue._name) + self.assertEqual( + sample, + queued, + "number_queued", + ) + + sample = self._get_sample_with_name(number_of_keys, self.queue._name) + self.assertEqual(sample, keys, "number_of_keys") + + sample = self._get_sample_with_name(number_in_flight, self.queue._name) + self.assertEqual( + sample, + in_flight, + "number_in_flight", + ) + def test_simple(self): """Tests the basic case of calling `add_to_queue` once and having `_process_queue` return. @@ -41,6 +84,8 @@ def test_simple(self): queue_d = defer.ensureDeferred(self.queue.add_to_queue("foo")) + self._assert_metrics(queued=1, keys=1, in_flight=1) + # The queue should wait a reactor tick before calling the processing # function. self.assertFalse(self._pending_calls) @@ -52,12 +97,15 @@ def test_simple(self): self.assertEqual(len(self._pending_calls), 1) self.assertEqual(self._pending_calls[0][0], ["foo"]) self.assertFalse(queue_d.called) + self._assert_metrics(queued=0, keys=0, in_flight=1) # Return value of the `_process_queue` should be propagated back. self._pending_calls.pop()[1].callback("bar") self.assertEqual(self.successResultOf(queue_d), "bar") + self._assert_metrics(queued=0, keys=0, in_flight=0) + def test_batching(self): """Test that multiple calls at the same time get batched up into one call to `_process_queue`. @@ -68,6 +116,8 @@ def test_batching(self): queue_d1 = defer.ensureDeferred(self.queue.add_to_queue("foo1")) queue_d2 = defer.ensureDeferred(self.queue.add_to_queue("foo2")) + self._assert_metrics(queued=2, keys=1, in_flight=2) + self.clock.pump([0]) # We should see only *one* call to `_process_queue` @@ -75,12 +125,14 @@ def test_batching(self): self.assertEqual(self._pending_calls[0][0], ["foo1", "foo2"]) self.assertFalse(queue_d1.called) self.assertFalse(queue_d2.called) + self._assert_metrics(queued=0, keys=0, in_flight=2) # Return value of the `_process_queue` should be propagated back to both. self._pending_calls.pop()[1].callback("bar") self.assertEqual(self.successResultOf(queue_d1), "bar") self.assertEqual(self.successResultOf(queue_d2), "bar") + self._assert_metrics(queued=0, keys=0, in_flight=0) def test_queuing(self): """Test that we queue up requests while a `_process_queue` is being @@ -92,13 +144,20 @@ def test_queuing(self): queue_d1 = defer.ensureDeferred(self.queue.add_to_queue("foo1")) self.clock.pump([0]) + self.assertEqual(len(self._pending_calls), 1) + + # We queue up work after the process function has been called, testing + # that they get correctly queued up. queue_d2 = defer.ensureDeferred(self.queue.add_to_queue("foo2")) + queue_d3 = defer.ensureDeferred(self.queue.add_to_queue("foo3")) # We should see only *one* call to `_process_queue` self.assertEqual(len(self._pending_calls), 1) self.assertEqual(self._pending_calls[0][0], ["foo1"]) self.assertFalse(queue_d1.called) self.assertFalse(queue_d2.called) + self.assertFalse(queue_d3.called) + self._assert_metrics(queued=2, keys=1, in_flight=3) # Return value of the `_process_queue` should be propagated back to the # first. @@ -106,18 +165,24 @@ def test_queuing(self): self.assertEqual(self.successResultOf(queue_d1), "bar1") self.assertFalse(queue_d2.called) + self.assertFalse(queue_d3.called) + self._assert_metrics(queued=2, keys=1, in_flight=2) # We should now see a second call to `_process_queue` self.clock.pump([0]) self.assertEqual(len(self._pending_calls), 1) - self.assertEqual(self._pending_calls[0][0], ["foo2"]) + self.assertEqual(self._pending_calls[0][0], ["foo2", "foo3"]) self.assertFalse(queue_d2.called) + self.assertFalse(queue_d3.called) + self._assert_metrics(queued=0, keys=0, in_flight=2) # Return value of the `_process_queue` should be propagated back to the # second. self._pending_calls.pop()[1].callback("bar2") self.assertEqual(self.successResultOf(queue_d2), "bar2") + self.assertEqual(self.successResultOf(queue_d3), "bar2") + self._assert_metrics(queued=0, keys=0, in_flight=0) def test_different_keys(self): """Test that calls to different keys get processed in parallel.""" @@ -140,6 +205,7 @@ def test_different_keys(self): self.assertFalse(queue_d1.called) self.assertFalse(queue_d2.called) self.assertFalse(queue_d3.called) + self._assert_metrics(queued=1, keys=1, in_flight=3) # Return value of the `_process_queue` should be propagated back to the # first. @@ -148,6 +214,7 @@ def test_different_keys(self): self.assertEqual(self.successResultOf(queue_d1), "bar1") self.assertFalse(queue_d2.called) self.assertFalse(queue_d3.called) + self._assert_metrics(queued=1, keys=1, in_flight=2) # Return value of the `_process_queue` should be propagated back to the # second. @@ -161,9 +228,11 @@ def test_different_keys(self): self.assertEqual(len(self._pending_calls), 1) self.assertEqual(self._pending_calls[0][0], ["foo3"]) self.assertFalse(queue_d3.called) + self._assert_metrics(queued=0, keys=0, in_flight=1) # Return value of the `_process_queue` should be propagated back to the # third deferred. self._pending_calls.pop()[1].callback("bar4") self.assertEqual(self.successResultOf(queue_d3), "bar4") + self._assert_metrics(queued=0, keys=0, in_flight=0)