mirror of
https://github.com/matrix-org/dendrite.git
synced 2026-01-16 18:43:10 -06:00
Pull latest main into branch
This commit is contained in:
commit
ec486f9f54
15
.github/ISSUE_TEMPLATE/BUG_REPORT.md
vendored
15
.github/ISSUE_TEMPLATE/BUG_REPORT.md
vendored
|
|
@ -7,6 +7,11 @@ about: Create a report to help us improve
|
||||||
<!--
|
<!--
|
||||||
All bug reports must provide the following background information
|
All bug reports must provide the following background information
|
||||||
Text between <!-- and --> marks will be invisible in the report.
|
Text between <!-- and --> marks will be invisible in the report.
|
||||||
|
|
||||||
|
IF YOUR ISSUE IS CONSIDERED A SECURITY VULNERABILITY THEN PLEASE STOP
|
||||||
|
AND DO NOT POST IT AS A GITHUB ISSUE! Please report the issue responsibly by
|
||||||
|
disclosing in private by email to security@matrix.org instead. For more details, please
|
||||||
|
see: https://www.matrix.org/security-disclosure-policy/
|
||||||
-->
|
-->
|
||||||
|
|
||||||
### Background information
|
### Background information
|
||||||
|
|
@ -18,13 +23,12 @@ Text between <!-- and --> marks will be invisible in the report.
|
||||||
- **`go version`**:
|
- **`go version`**:
|
||||||
- **Client used (if applicable)**:
|
- **Client used (if applicable)**:
|
||||||
|
|
||||||
|
|
||||||
### Description
|
### Description
|
||||||
|
|
||||||
- **What** is the problem:
|
- **What** is the problem:
|
||||||
- **Who** is affected:
|
- **Who** is affected:
|
||||||
- **How** is this bug manifesting:
|
- **How** is this bug manifesting:
|
||||||
- **When** did this first appear:
|
- **When** did this first appear:
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
Examples of good descriptions:
|
Examples of good descriptions:
|
||||||
|
|
@ -38,7 +42,6 @@ Examples of good descriptions:
|
||||||
- How: "Lots of logs about device change updates"
|
- How: "Lots of logs about device change updates"
|
||||||
- When: "After my server joined Matrix HQ"
|
- When: "After my server joined Matrix HQ"
|
||||||
|
|
||||||
|
|
||||||
Examples of bad descriptions:
|
Examples of bad descriptions:
|
||||||
- What: "Can't send messages" - This is bad because it isn't specfic enough. Which endpoint isn't working and what is the response code? Does the message send but encryption fail?
|
- What: "Can't send messages" - This is bad because it isn't specfic enough. Which endpoint isn't working and what is the response code? Does the message send but encryption fail?
|
||||||
- Who: "Me" - Who are you? Running the server or a user on a Dendrite server?
|
- Who: "Me" - Who are you? Running the server or a user on a Dendrite server?
|
||||||
|
|
|
||||||
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
|
|
@ -1,8 +1,8 @@
|
||||||
### Pull Request Checklist
|
### Pull Request Checklist
|
||||||
|
|
||||||
<!-- Please read docs/CONTRIBUTING.md before submitting your pull request -->
|
<!-- Please read https://matrix-org.github.io/dendrite/development/contributing before submitting your pull request -->
|
||||||
|
|
||||||
* [ ] I have added added tests for PR _or_ I have justified why this PR doesn't need tests.
|
* [ ] I have added tests for PR _or_ I have justified why this PR doesn't need tests.
|
||||||
* [ ] Pull request includes a [sign off](https://github.com/matrix-org/dendrite/blob/main/docs/CONTRIBUTING.md#sign-off)
|
* [ ] Pull request includes a [sign off below using a legally identifiable name](https://matrix-org.github.io/dendrite/development/contributing#sign-off) _or_ I have already signed off privately
|
||||||
|
|
||||||
Signed-off-by: `Your Name <your@email.example.org>`
|
Signed-off-by: `Your Name <your@email.example.org>`
|
||||||
|
|
|
||||||
5
.github/workflows/dendrite.yml
vendored
5
.github/workflows/dendrite.yml
vendored
|
|
@ -7,6 +7,7 @@ on:
|
||||||
pull_request:
|
pull_request:
|
||||||
release:
|
release:
|
||||||
types: [published]
|
types: [published]
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
|
@ -341,7 +342,7 @@ jobs:
|
||||||
# See https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md specifically GOROOT_1_17_X64
|
# See https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md specifically GOROOT_1_17_X64
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update && sudo apt-get install -y libolm3 libolm-dev
|
sudo apt-get update && sudo apt-get install -y libolm3 libolm-dev
|
||||||
go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest
|
go get -v github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest
|
||||||
|
|
||||||
- name: Run actions/checkout@v2 for dendrite
|
- name: Run actions/checkout@v2 for dendrite
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
|
|
@ -375,6 +376,8 @@ jobs:
|
||||||
# Build initial Dendrite image
|
# Build initial Dendrite image
|
||||||
- run: docker build -t complement-dendrite -f build/scripts/Complement${{ matrix.postgres }}.Dockerfile .
|
- run: docker build -t complement-dendrite -f build/scripts/Complement${{ matrix.postgres }}.Dockerfile .
|
||||||
working-directory: dendrite
|
working-directory: dendrite
|
||||||
|
env:
|
||||||
|
DOCKER_BUILDKIT: 1
|
||||||
|
|
||||||
# Run Complement
|
# Run Complement
|
||||||
- run: |
|
- run: |
|
||||||
|
|
|
||||||
60
.github/workflows/docker.yml
vendored
60
.github/workflows/docker.yml
vendored
|
|
@ -137,3 +137,63 @@ jobs:
|
||||||
${{ env.DOCKER_NAMESPACE }}/dendrite-polylith:${{ env.RELEASE_VERSION }}
|
${{ env.DOCKER_NAMESPACE }}/dendrite-polylith:${{ env.RELEASE_VERSION }}
|
||||||
ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-polylith:latest
|
ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-polylith:latest
|
||||||
ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-polylith:${{ env.RELEASE_VERSION }}
|
ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-polylith:${{ env.RELEASE_VERSION }}
|
||||||
|
|
||||||
|
demo-pinecone:
|
||||||
|
name: Pinecone demo image
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Get release tag
|
||||||
|
if: github.event_name == 'release' # Only for GitHub releases
|
||||||
|
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v1
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v1
|
||||||
|
- name: Login to Docker Hub
|
||||||
|
uses: docker/login-action@v1
|
||||||
|
with:
|
||||||
|
username: ${{ env.DOCKER_HUB_USER }}
|
||||||
|
password: ${{ secrets.DOCKER_TOKEN }}
|
||||||
|
- name: Login to GitHub Containers
|
||||||
|
uses: docker/login-action@v1
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.repository_owner }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Build main pinecone demo image
|
||||||
|
if: github.ref_name == 'main'
|
||||||
|
id: docker_build_demo_pinecone
|
||||||
|
uses: docker/build-push-action@v2
|
||||||
|
with:
|
||||||
|
cache-from: type=gha
|
||||||
|
cache-to: type=gha,mode=max
|
||||||
|
context: .
|
||||||
|
file: ./build/docker/Dockerfile.demo-pinecone
|
||||||
|
platforms: ${{ env.PLATFORMS }}
|
||||||
|
push: true
|
||||||
|
tags: |
|
||||||
|
${{ env.DOCKER_NAMESPACE }}/dendrite-demo-pinecone:${{ github.ref_name }}
|
||||||
|
ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-demo-pinecone:${{ github.ref_name }}
|
||||||
|
|
||||||
|
- name: Build release pinecone demo image
|
||||||
|
if: github.event_name == 'release' # Only for GitHub releases
|
||||||
|
id: docker_build_demo_pinecone_release
|
||||||
|
uses: docker/build-push-action@v2
|
||||||
|
with:
|
||||||
|
cache-from: type=gha
|
||||||
|
cache-to: type=gha,mode=max
|
||||||
|
context: .
|
||||||
|
file: ./build/docker/Dockerfile.demo-pinecone
|
||||||
|
platforms: ${{ env.PLATFORMS }}
|
||||||
|
push: true
|
||||||
|
tags: |
|
||||||
|
${{ env.DOCKER_NAMESPACE }}/dendrite-demo-pinecone:latest
|
||||||
|
${{ env.DOCKER_NAMESPACE }}/dendrite-demo-pinecone:${{ env.RELEASE_VERSION }}
|
||||||
|
ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-demo-pinecone:latest
|
||||||
|
ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-demo-pinecone:${{ env.RELEASE_VERSION }}
|
||||||
|
|
|
||||||
201
CHANGES.md
201
CHANGES.md
|
|
@ -1,5 +1,206 @@
|
||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## Dendrite 0.10.4 (2022-10-21)
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* Various tables belonging to the user API will be renamed so that they are namespaced with the `userapi_` prefix
|
||||||
|
* Note that, after upgrading to this version, you should not revert to an older version of Dendrite as the database changes **will not** be reverted automatically
|
||||||
|
* The backoff and retry behaviour in the federation API has been refactored and improved
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
|
||||||
|
* Private read receipt support is now advertised in the client `/versions` endpoint
|
||||||
|
* Private read receipts will now clear notification counts properly
|
||||||
|
* A bug where a false `leave` membership transition was inserted into the timeline after accepting an invite has been fixed
|
||||||
|
* Some panics caused by concurrent map writes in the key server have been fixed
|
||||||
|
* The sync API now calculates membership transitions from state deltas more accurately
|
||||||
|
* Transaction IDs are now scoped to endpoints, which should fix some bugs where transaction ID reuse could cause nonsensical cached responses from some endpoints
|
||||||
|
* The length of the `type`, `sender`, `state_key` and `room_id` fields in events are now verified by number of bytes rather than codepoints after a spec clarification, reverting a change made in Dendrite 0.9.6
|
||||||
|
|
||||||
|
## Dendrite 0.10.3 (2022-10-14)
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* Event relations are now tracked and support for the `/room/{roomID}/relations/...` client API endpoints have been added
|
||||||
|
* Support has been added for private read receipts
|
||||||
|
* The built-in NATS Server has been updated to version 2.9.3
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
|
||||||
|
* The `unread_notifications` are now always populated in joined room responses
|
||||||
|
* The `/get_missing_events` federation API endpoint should now work correctly for rooms with `joined` and `invited` visibility settings, returning redacted events for events that other servers are not allowed to see
|
||||||
|
* The `/event` client API endpoint now applies history visibility correctly
|
||||||
|
* Read markers should now be updated much more reliably
|
||||||
|
* A rare bug in the sync API which could cause some `join` memberships to be incorrectly overwritten by other memberships when working out which rooms to populate has been fixed
|
||||||
|
* The federation API now correctly updates the joined hosts table during a state rewrite
|
||||||
|
|
||||||
|
## Dendrite 0.10.2 (2022-10-07)
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* Dendrite will now fail to start if there is an obvious problem with the configured `max_open_conns` when using PostgreSQL database backends, since this can lead to instability and performance issues
|
||||||
|
* More information on this is available [in the documentation](https://matrix-org.github.io/dendrite/installation/start/optimisation#postgresql-connection-limit)
|
||||||
|
* Unnecessary/empty fields will no longer be sent in `/sync` responses
|
||||||
|
* It is now possible to configure `old_private_keys` from previous Matrix installations on the same domain if only public key is known, to make it easier to expire old keys correctly
|
||||||
|
* You can configure either just the `private_key` path, or you can supply both the `public_key` and `key_id`
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
|
||||||
|
* The sync transaction behaviour has been modified further so that errors in one stream should not propagate to other streams unnecessarily
|
||||||
|
* Rooms should now be classified as DM rooms correctly by passing through `is_direct` and unsigned hints
|
||||||
|
* A bug which caused marking device lists as stale to consume lots of CPU has been fixed
|
||||||
|
* Users accepting invites should no longer cause unnecessary federated joins if there are already other local users in the room
|
||||||
|
* The sync API state range queries have been optimised by adding missing indexes
|
||||||
|
* It should now be possible to configure non-English languages for full-text search in `search.language`
|
||||||
|
* The roomserver will no longer attempt to perform federated requests to the local server when trying to fetch missing events
|
||||||
|
* The `/keys/upload` endpoint will now always return the `one_time_keys_counts`, which may help with E2EE reliability
|
||||||
|
* The sync API will now retrieve the latest stream position before processing each stream rather than at the beginning of the request, to hopefully reduce the number of round-trips to `/sync`
|
||||||
|
|
||||||
|
## Dendrite 0.10.1 (2022-09-30)
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* The built-in NATS Server has been updated to version 2.9.2
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
|
||||||
|
* A regression introduced in 0.10.0 in `/sync` as a result of transaction errors has been fixed
|
||||||
|
* Account data updates will no longer send duplicate output events
|
||||||
|
|
||||||
|
## Dendrite 0.10.0 (2022-09-30)
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* High performance full-text searching has been added to Dendrite
|
||||||
|
* Search must be enabled in the [`search` section of the `sync_api` config](https://github.com/matrix-org/dendrite/blob/6348486a1365c7469a498101f5035a9b6bd16d22/dendrite-sample.monolith.yaml#L279-L290) before it can be used
|
||||||
|
* The search index is stored on the filesystem rather than the sync API database, so a path to a suitable storage location on disk must be configured
|
||||||
|
* Sync requests should now complete faster and use considerably less database connections as a result of better transactional isolation
|
||||||
|
* The notifications code has been refactored to hopefully make notifications more reliable
|
||||||
|
* A new `/_dendrite/admin/refreshDevices/{userID}` admin endpoint has been added for forcing a refresh of a remote user's device lists without having to modify the database by hand
|
||||||
|
* A new `/_dendrite/admin/fulltext/reindex` admin endpoint has been added for rebuilding the search index (although this may take some time)
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
|
||||||
|
* A number of bugs in the device list updater have been fixed, which should help considerably with federated device list synchronisation and E2EE reliability
|
||||||
|
* A state resolution bug has been fixed which should help to prevent unexpected state resets
|
||||||
|
* The deprecated `"origin"` field in events will now be correctly ignored in all cases
|
||||||
|
* Room versions 8 and 9 will now correctly evaluate `"knock"` join rules and membership states
|
||||||
|
* A database index has been added to speed up finding room memberships in the sync API (contributed by [PiotrKozimor](https://github.com/PiotrKozimor))
|
||||||
|
* The client API will now return an `M_UNRECOGNIZED` error for unknown endpoints/methods, which should help with client error handling
|
||||||
|
* A bug has been fixed when updating push rules which could result in `database is locked` on SQLite
|
||||||
|
|
||||||
|
## Dendrite 0.9.9 (2022-09-22)
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* Dendrite will now try to keep HTTP connections open to remote federated servers for a few minutes after a request and attempt to reuse those connections where possible
|
||||||
|
* This should reduce the amount of time spent on TLS handshakes and often speed up requests to remote servers
|
||||||
|
* This new behaviour can be disabled with the `federation_api.disable_http_keepalives` option if needed
|
||||||
|
* A number of dependencies have been updated
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
|
||||||
|
* A bug where the roomserver did not correctly propagate rewritten room state to downstream components (like the federation API and sync API) has been fixed, which could cause issues when performing a federated join to a previously left room
|
||||||
|
* Event auth now correctly parses the `join_authorised_via_users_server` field in the membership event content
|
||||||
|
* Database migrations should no longer produce unique constraint errors at Dendrite startup
|
||||||
|
* The `origin` of device list updates should now be populated correctly
|
||||||
|
* Send-to-device messages will no longer be dropped if we fail to publish them to specific devices
|
||||||
|
* The roomserver query to find state after events will now always resolve state if there are multiple prev events
|
||||||
|
* The roomserver will now return no memberships if querying history visibility for an event which has no state snapshot
|
||||||
|
* The device list updater will now mark a device list as stale if a requesting device ID is not known
|
||||||
|
* Transactions sent to appservices should no longer have accidental duplicated transaction IDs (contributed by [tak-hntlabs](https://github.com/tak-hntlabs))
|
||||||
|
|
||||||
|
## Dendrite 0.9.8 (2022-09-12)
|
||||||
|
|
||||||
|
### Important
|
||||||
|
|
||||||
|
* This is a **security release** to fix a vulnerability where missing events retrieved from other servers did not have their signatures verified in all cases, affecting all versions of Dendrite before 0.9.8. Upgrading to this version is highly recommended. For more information, [see here](https://github.com/matrix-org/dendrite/security/advisories/GHSA-pfw4-xjgm-267c).
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* The built-in NATS Server has been updated to the final 2.9.0 release version
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
|
||||||
|
* Dendrite will now correctly verify the signatures of events retrieved using `/get_missing_events`
|
||||||
|
|
||||||
|
## Dendrite 0.9.7 (2022-09-09)
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* Initial supporting code to enable full-text search has been merged (although not ready for use yet)
|
||||||
|
* Newly created rooms now have higher default power levels for enabling encryption, setting server ACLs or sending tombstone events
|
||||||
|
* Incoming signing key updates over federation are now queued in JetStream for processing, so that they cannot be dropped accidentally
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
|
||||||
|
* A race condition between the roomserver output events being generated, forward extremities being updated and room info being updated has been fixed
|
||||||
|
* Appservices will no longer receive invite events which they are not interested in, which caused heavy load in some cases or excessive request sizes in others
|
||||||
|
* A bug in state resolution v2 where events could incorrectly be classified as control events has been fixed
|
||||||
|
* A bug in state resolution v2 where some specific events with unexpected non-empty state keys are dropped has been fixed
|
||||||
|
* A bug in state resolution v2 when fetching auth events vs partial state has been fixed
|
||||||
|
* Stale device lists should now be handled correctly for all user IDs, which may help with E2EE reliability
|
||||||
|
* A number of database writer issues have been fixed in the user API and sync API, which should help to reduce `database is locked` errors with SQLite databases
|
||||||
|
* Database migrations should now be detected more reliably to prevent unexpected errors at startup
|
||||||
|
* A number of minor database transaction issues have been fixed, particularly for assigning NIDs in the roomserver, cleaning up device keys and cleaning up notifications
|
||||||
|
* The database query for finding shared users in the sync API has been optimised, using significantly less CPU time as a result
|
||||||
|
|
||||||
|
## Dendrite 0.9.6 (2022-09-01)
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* The appservice API has been refactored for improved performance and stability
|
||||||
|
* The appservice database has been deprecated, as the roomserver output stream is now used as the data source instead
|
||||||
|
* The `generate-config` tool has been updated to support additional scenarios, i.e. for CI configuration generation and generating both monolith and polylith skeleton config files
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
|
||||||
|
* The username length check has been fixed on new account creation
|
||||||
|
* The length of the `type`, `sender`, `state_key` and `room_id` fields in events are now verified by number of codepoints rather than bytes, fixing the "Cat Overflow" bug
|
||||||
|
* UTF-16 surrogate handling in the canonical JSON implementation has been fixed
|
||||||
|
* A race condition when starting the keyserver has been fixed
|
||||||
|
* A race condition when configuring HTTP servers and routing at startup has been fixed
|
||||||
|
* A bug where the incorrect limit was used for lazy-loading memberships has been fixed
|
||||||
|
* The number of push notifications will now be sent to the push gateway
|
||||||
|
* A missing index causing slow performance on the sync API send-to-device table has been added (contributed by [PiotrKozimor](https://github.com/PiotrKozimor))
|
||||||
|
* Event auth will now correctly check for the existence of the `"creator"` field in create events
|
||||||
|
|
||||||
|
## Dendrite 0.9.5 (2022-08-25)
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
|
||||||
|
* The roomserver will now correctly unreject previously rejected events if necessary when reprocessing
|
||||||
|
* The handling of event soft-failure has been improved on the roomserver input by no longer applying rejection rules and still calculating state before the event if possible
|
||||||
|
* The federation `/state` and `/state_ids` endpoints should now return the correct error code when the state isn't known instead of returning a HTTP 500
|
||||||
|
* The federation `/event` should now return outlier events correctly instead of returning a HTTP 500
|
||||||
|
* A bug in the federation backoff allowing zero intervals has been corrected
|
||||||
|
* The `create-account` utility will no longer error if the homeserver URL ends in a trailing slash
|
||||||
|
* A regression in `/sync` introduced in 0.9.4 should be fixed
|
||||||
|
|
||||||
|
## Dendrite 0.9.4 (2022-08-19)
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
|
||||||
|
* A bug in the roomserver around handling rejected outliers has been fixed
|
||||||
|
* Backfilled events will now use the correct history visibility where possible
|
||||||
|
* The device list updater backoff has been fixed, which should reduce the number of outbound HTTP requests and `Failed to query device keys for some users` log entries for dead servers
|
||||||
|
* The `/sync` endpoint will no longer incorrectly return room entries for retired invites which could cause some rooms to show up in the client "Historical" section
|
||||||
|
* The `/createRoom` endpoint will now correctly populate `is_direct` in invite membership events, which may help clients to classify direct messages correctly
|
||||||
|
* The `create-account` tool will now log an error if the shared secret is not set in the Dendrite config
|
||||||
|
* A couple of minor bugs have been fixed in the membership lazy-loading
|
||||||
|
* Queued EDUs in the federation API are now cached properly
|
||||||
|
|
||||||
|
## Dendrite 0.9.3 (2022-08-15)
|
||||||
|
|
||||||
|
### Important
|
||||||
|
|
||||||
|
* This is a **security release** to fix a vulnerability within event auth, affecting all versions of Dendrite before 0.9.3. Upgrading to this version is highly recommended. For more information, [see here](https://github.com/matrix-org/gomatrixserverlib/security/advisories/GHSA-grvv-h2f9-7v9c).
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
|
||||||
|
* Dendrite will now correctly parse the `"events_default"` power level value for event auth.
|
||||||
|
|
||||||
## Dendrite 0.9.2 (2022-08-12)
|
## Dendrite 0.9.2 (2022-08-12)
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
|
|
|
||||||
14
README.md
14
README.md
|
|
@ -11,21 +11,20 @@ It intends to provide an **efficient**, **reliable** and **scalable** alternativ
|
||||||
a [brand new Go test suite](https://github.com/matrix-org/complement).
|
a [brand new Go test suite](https://github.com/matrix-org/complement).
|
||||||
- Scalable: can run on multiple machines and eventually scale to massive homeserver deployments.
|
- Scalable: can run on multiple machines and eventually scale to massive homeserver deployments.
|
||||||
|
|
||||||
As of October 2020 (current [progress below](#progress)), Dendrite has now entered **beta** which means:
|
Dendrite is **beta** software, which means:
|
||||||
|
|
||||||
- Dendrite is ready for early adopters. We recommend running in Monolith mode with a PostgreSQL database.
|
- Dendrite is ready for early adopters. We recommend running in Monolith mode with a PostgreSQL database.
|
||||||
- Dendrite has periodic semver releases. We intend to release new versions as we land significant features.
|
- Dendrite has periodic releases. We intend to release new versions as we fix bugs and land significant features.
|
||||||
- Dendrite supports database schema upgrades between releases. This means you should never lose your messages when upgrading Dendrite.
|
- Dendrite supports database schema upgrades between releases. This means you should never lose your messages when upgrading Dendrite.
|
||||||
- Breaking changes will not occur on minor releases. This means you can safely upgrade Dendrite without modifying your database or config file.
|
|
||||||
|
|
||||||
This does not mean:
|
This does not mean:
|
||||||
|
|
||||||
- Dendrite is bug-free. It has not yet been battle-tested in the real world and so will be error prone initially.
|
- Dendrite is bug-free. It has not yet been battle-tested in the real world and so will be error prone initially.
|
||||||
- Dendrite is feature-complete. There may be client or federation APIs that are not implemented.
|
- Dendrite is feature-complete. There may be client or federation APIs that are not implemented.
|
||||||
- Dendrite is ready for massive homeserver deployments. You cannot shard each microservice, only run each one on a different machine.
|
- Dendrite is ready for massive homeserver deployments. There is no sharding of microservices (although it is possible to run them on separate machines) and there is no high-availability/clustering support.
|
||||||
|
|
||||||
Currently, we expect Dendrite to function well for small (10s/100s of users) homeserver deployments as well as P2P Matrix nodes in-browser or on mobile devices.
|
Currently, we expect Dendrite to function well for small (10s/100s of users) homeserver deployments as well as P2P Matrix nodes in-browser or on mobile devices.
|
||||||
In the future, we will be able to scale up to gigantic servers (equivalent to matrix.org) via polylith mode.
|
In the future, we will be able to scale up to gigantic servers (equivalent to `matrix.org`) via polylith mode.
|
||||||
|
|
||||||
If you have further questions, please take a look at [our FAQ](docs/FAQ.md) or join us in:
|
If you have further questions, please take a look at [our FAQ](docs/FAQ.md) or join us in:
|
||||||
|
|
||||||
|
|
@ -80,7 +79,7 @@ $ ./bin/dendrite-monolith-server --tls-cert server.crt --tls-key server.key --co
|
||||||
|
|
||||||
# Create an user account (add -admin for an admin user).
|
# Create an user account (add -admin for an admin user).
|
||||||
# Specify the localpart only, e.g. 'alice' for '@alice:domain.com'
|
# Specify the localpart only, e.g. 'alice' for '@alice:domain.com'
|
||||||
$ ./bin/create-account --config dendrite.yaml --url http://localhost:8008 --username alice
|
$ ./bin/create-account --config dendrite.yaml --username alice
|
||||||
```
|
```
|
||||||
|
|
||||||
Then point your favourite Matrix client at `http://localhost:8008` or `https://localhost:8448`.
|
Then point your favourite Matrix client at `http://localhost:8008` or `https://localhost:8448`.
|
||||||
|
|
@ -91,7 +90,7 @@ We use a script called Are We Synapse Yet which checks Sytest compliance rates.
|
||||||
test rig with around 900 tests. The script works out how many of these tests are passing on Dendrite and it
|
test rig with around 900 tests. The script works out how many of these tests are passing on Dendrite and it
|
||||||
updates with CI. As of August 2022 we're at around 90% CS API coverage and 95% Federation coverage, though check
|
updates with CI. As of August 2022 we're at around 90% CS API coverage and 95% Federation coverage, though check
|
||||||
CI for the latest numbers. In practice, this means you can communicate locally and via federation with Synapse
|
CI for the latest numbers. In practice, this means you can communicate locally and via federation with Synapse
|
||||||
servers such as matrix.org reasonably well, although there are still some missing features (like Search).
|
servers such as matrix.org reasonably well, although there are still some missing features (like SSO and Third-party ID APIs).
|
||||||
|
|
||||||
We are prioritising features that will benefit single-user homeservers first (e.g Receipts, E2E) rather
|
We are prioritising features that will benefit single-user homeservers first (e.g Receipts, E2E) rather
|
||||||
than features that massive deployments may be interested in (OpenID, Guests, Admin APIs, AS API).
|
than features that massive deployments may be interested in (OpenID, Guests, Admin APIs, AS API).
|
||||||
|
|
@ -113,6 +112,7 @@ This means Dendrite supports amongst others:
|
||||||
- Guests
|
- Guests
|
||||||
- User Directory
|
- User Directory
|
||||||
- Presence
|
- Presence
|
||||||
|
- Fulltext search
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,10 +0,0 @@
|
||||||
# Application Service
|
|
||||||
|
|
||||||
This component interfaces with external [Application
|
|
||||||
Services](https://matrix.org/docs/spec/application_service/unstable.html).
|
|
||||||
This includes any HTTP endpoints that application services call, as well as talking
|
|
||||||
to any HTTP endpoints that application services provide themselves.
|
|
||||||
|
|
||||||
## Consumers
|
|
||||||
|
|
||||||
This component consumes and filters events from the Roomserver Kafka stream, passing on any necessary events to subscribing application services.
|
|
||||||
|
|
@ -18,7 +18,6 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"net/http"
|
"net/http"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
|
|
@ -28,9 +27,6 @@ import (
|
||||||
"github.com/matrix-org/dendrite/appservice/consumers"
|
"github.com/matrix-org/dendrite/appservice/consumers"
|
||||||
"github.com/matrix-org/dendrite/appservice/inthttp"
|
"github.com/matrix-org/dendrite/appservice/inthttp"
|
||||||
"github.com/matrix-org/dendrite/appservice/query"
|
"github.com/matrix-org/dendrite/appservice/query"
|
||||||
"github.com/matrix-org/dendrite/appservice/storage"
|
|
||||||
"github.com/matrix-org/dendrite/appservice/types"
|
|
||||||
"github.com/matrix-org/dendrite/appservice/workers"
|
|
||||||
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
||||||
"github.com/matrix-org/dendrite/setup/base"
|
"github.com/matrix-org/dendrite/setup/base"
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
"github.com/matrix-org/dendrite/setup/config"
|
||||||
|
|
@ -59,57 +55,40 @@ func NewInternalAPI(
|
||||||
Proxy: http.ProxyFromEnvironment,
|
Proxy: http.ProxyFromEnvironment,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
js, _ := base.NATS.Prepare(base.ProcessContext, &base.Cfg.Global.JetStream)
|
// Create appserivce query API with an HTTP client that will be used for all
|
||||||
|
// outbound and inbound requests (inbound only for the internal API)
|
||||||
|
appserviceQueryAPI := &query.AppServiceQueryAPI{
|
||||||
|
HTTPClient: client,
|
||||||
|
Cfg: &base.Cfg.AppServiceAPI,
|
||||||
|
}
|
||||||
|
|
||||||
// Create a connection to the appservice postgres DB
|
if len(base.Cfg.Derived.ApplicationServices) == 0 {
|
||||||
appserviceDB, err := storage.NewDatabase(base, &base.Cfg.AppServiceAPI.Database)
|
return appserviceQueryAPI
|
||||||
if err != nil {
|
|
||||||
logrus.WithError(err).Panicf("failed to connect to appservice db")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrap application services in a type that relates the application service and
|
// Wrap application services in a type that relates the application service and
|
||||||
// a sync.Cond object that can be used to notify workers when there are new
|
// a sync.Cond object that can be used to notify workers when there are new
|
||||||
// events to be sent out.
|
// events to be sent out.
|
||||||
workerStates := make([]types.ApplicationServiceWorkerState, len(base.Cfg.Derived.ApplicationServices))
|
for _, appservice := range base.Cfg.Derived.ApplicationServices {
|
||||||
for i, appservice := range base.Cfg.Derived.ApplicationServices {
|
|
||||||
m := sync.Mutex{}
|
|
||||||
ws := types.ApplicationServiceWorkerState{
|
|
||||||
AppService: appservice,
|
|
||||||
Cond: sync.NewCond(&m),
|
|
||||||
}
|
|
||||||
workerStates[i] = ws
|
|
||||||
|
|
||||||
// Create bot account for this AS if it doesn't already exist
|
// Create bot account for this AS if it doesn't already exist
|
||||||
if err = generateAppServiceAccount(userAPI, appservice); err != nil {
|
if err := generateAppServiceAccount(userAPI, appservice); err != nil {
|
||||||
logrus.WithFields(logrus.Fields{
|
logrus.WithFields(logrus.Fields{
|
||||||
"appservice": appservice.ID,
|
"appservice": appservice.ID,
|
||||||
}).WithError(err).Panicf("failed to generate bot account for appservice")
|
}).WithError(err).Panicf("failed to generate bot account for appservice")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create appserivce query API with an HTTP client that will be used for all
|
|
||||||
// outbound and inbound requests (inbound only for the internal API)
|
|
||||||
appserviceQueryAPI := &query.AppServiceQueryAPI{
|
|
||||||
HTTPClient: client,
|
|
||||||
Cfg: base.Cfg,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only consume if we actually have ASes to track, else we'll just chew cycles needlessly.
|
// Only consume if we actually have ASes to track, else we'll just chew cycles needlessly.
|
||||||
// We can't add ASes at runtime so this is safe to do.
|
// We can't add ASes at runtime so this is safe to do.
|
||||||
if len(workerStates) > 0 {
|
js, _ := base.NATS.Prepare(base.ProcessContext, &base.Cfg.Global.JetStream)
|
||||||
consumer := consumers.NewOutputRoomEventConsumer(
|
consumer := consumers.NewOutputRoomEventConsumer(
|
||||||
base.ProcessContext, base.Cfg, js, appserviceDB,
|
base.ProcessContext, &base.Cfg.AppServiceAPI,
|
||||||
rsAPI, workerStates,
|
client, js, rsAPI,
|
||||||
)
|
)
|
||||||
if err := consumer.Start(); err != nil {
|
if err := consumer.Start(); err != nil {
|
||||||
logrus.WithError(err).Panicf("failed to start appservice roomserver consumer")
|
logrus.WithError(err).Panicf("failed to start appservice roomserver consumer")
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create application service transaction workers
|
|
||||||
if err := workers.SetupTransactionWorkers(client, appserviceDB, workerStates); err != nil {
|
|
||||||
logrus.WithError(err).Panicf("failed to start app service transaction workers")
|
|
||||||
}
|
|
||||||
return appserviceQueryAPI
|
return appserviceQueryAPI
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -15,193 +15,229 @@
|
||||||
package consumers
|
package consumers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
"github.com/nats-io/nats.go"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/appservice/storage"
|
|
||||||
"github.com/matrix-org/dendrite/appservice/types"
|
|
||||||
"github.com/matrix-org/dendrite/roomserver/api"
|
"github.com/matrix-org/dendrite/roomserver/api"
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
"github.com/matrix-org/dendrite/setup/config"
|
||||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||||
"github.com/matrix-org/dendrite/setup/process"
|
"github.com/matrix-org/dendrite/setup/process"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
|
||||||
"github.com/nats-io/nats.go"
|
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// OutputRoomEventConsumer consumes events that originated in the room server.
|
// OutputRoomEventConsumer consumes events that originated in the room server.
|
||||||
type OutputRoomEventConsumer struct {
|
type OutputRoomEventConsumer struct {
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
jetstream nats.JetStreamContext
|
cfg *config.AppServiceAPI
|
||||||
durable string
|
client *http.Client
|
||||||
topic string
|
jetstream nats.JetStreamContext
|
||||||
asDB storage.Database
|
topic string
|
||||||
rsAPI api.AppserviceRoomserverAPI
|
rsAPI api.AppserviceRoomserverAPI
|
||||||
serverName string
|
}
|
||||||
workerStates []types.ApplicationServiceWorkerState
|
|
||||||
|
type appserviceState struct {
|
||||||
|
*config.ApplicationService
|
||||||
|
backoff int
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewOutputRoomEventConsumer creates a new OutputRoomEventConsumer. Call
|
// NewOutputRoomEventConsumer creates a new OutputRoomEventConsumer. Call
|
||||||
// Start() to begin consuming from room servers.
|
// Start() to begin consuming from room servers.
|
||||||
func NewOutputRoomEventConsumer(
|
func NewOutputRoomEventConsumer(
|
||||||
process *process.ProcessContext,
|
process *process.ProcessContext,
|
||||||
cfg *config.Dendrite,
|
cfg *config.AppServiceAPI,
|
||||||
|
client *http.Client,
|
||||||
js nats.JetStreamContext,
|
js nats.JetStreamContext,
|
||||||
appserviceDB storage.Database,
|
|
||||||
rsAPI api.AppserviceRoomserverAPI,
|
rsAPI api.AppserviceRoomserverAPI,
|
||||||
workerStates []types.ApplicationServiceWorkerState,
|
|
||||||
) *OutputRoomEventConsumer {
|
) *OutputRoomEventConsumer {
|
||||||
return &OutputRoomEventConsumer{
|
return &OutputRoomEventConsumer{
|
||||||
ctx: process.Context(),
|
ctx: process.Context(),
|
||||||
jetstream: js,
|
cfg: cfg,
|
||||||
durable: cfg.Global.JetStream.Durable("AppserviceRoomserverConsumer"),
|
client: client,
|
||||||
topic: cfg.Global.JetStream.Prefixed(jetstream.OutputRoomEvent),
|
jetstream: js,
|
||||||
asDB: appserviceDB,
|
topic: cfg.Matrix.JetStream.Prefixed(jetstream.OutputRoomEvent),
|
||||||
rsAPI: rsAPI,
|
rsAPI: rsAPI,
|
||||||
serverName: string(cfg.Global.ServerName),
|
|
||||||
workerStates: workerStates,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start consuming from room servers
|
// Start consuming from room servers
|
||||||
func (s *OutputRoomEventConsumer) Start() error {
|
func (s *OutputRoomEventConsumer) Start() error {
|
||||||
return jetstream.JetStreamConsumer(
|
for _, as := range s.cfg.Derived.ApplicationServices {
|
||||||
s.ctx, s.jetstream, s.topic, s.durable, s.onMessage,
|
appsvc := as
|
||||||
nats.DeliverAll(), nats.ManualAck(),
|
state := &appserviceState{
|
||||||
)
|
ApplicationService: &appsvc,
|
||||||
|
}
|
||||||
|
token := jetstream.Tokenise(as.ID)
|
||||||
|
if err := jetstream.JetStreamConsumer(
|
||||||
|
s.ctx, s.jetstream, s.topic,
|
||||||
|
s.cfg.Matrix.JetStream.Durable("Appservice_"+token),
|
||||||
|
50, // maximum number of events to send in a single transaction
|
||||||
|
func(ctx context.Context, msgs []*nats.Msg) bool {
|
||||||
|
return s.onMessage(ctx, state, msgs)
|
||||||
|
},
|
||||||
|
nats.DeliverNew(), nats.ManualAck(),
|
||||||
|
); err != nil {
|
||||||
|
return fmt.Errorf("failed to create %q consumer: %w", token, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// onMessage is called when the appservice component receives a new event from
|
// onMessage is called when the appservice component receives a new event from
|
||||||
// the room server output log.
|
// the room server output log.
|
||||||
func (s *OutputRoomEventConsumer) onMessage(ctx context.Context, msg *nats.Msg) bool {
|
func (s *OutputRoomEventConsumer) onMessage(
|
||||||
// Parse out the event JSON
|
ctx context.Context, state *appserviceState, msgs []*nats.Msg,
|
||||||
var output api.OutputEvent
|
) bool {
|
||||||
if err := json.Unmarshal(msg.Data, &output); err != nil {
|
log.WithField("appservice", state.ID).Tracef("Appservice worker received %d message(s) from roomserver", len(msgs))
|
||||||
// If the message was invalid, log it and move on to the next message in the stream
|
events := make([]*gomatrixserverlib.HeaderedEvent, 0, len(msgs))
|
||||||
log.WithError(err).Errorf("roomserver output log: message parse failure")
|
for _, msg := range msgs {
|
||||||
return true
|
// Only handle events we care about
|
||||||
}
|
receivedType := api.OutputType(msg.Header.Get(jetstream.RoomEventType))
|
||||||
|
if receivedType != api.OutputTypeNewRoomEvent && receivedType != api.OutputTypeNewInviteEvent {
|
||||||
log.WithFields(log.Fields{
|
continue
|
||||||
"type": output.Type,
|
|
||||||
}).Debug("Got a message in OutputRoomEventConsumer")
|
|
||||||
|
|
||||||
events := []*gomatrixserverlib.HeaderedEvent{}
|
|
||||||
if output.Type == api.OutputTypeNewRoomEvent && output.NewRoomEvent != nil {
|
|
||||||
newEventID := output.NewRoomEvent.Event.EventID()
|
|
||||||
events = append(events, output.NewRoomEvent.Event)
|
|
||||||
if len(output.NewRoomEvent.AddsStateEventIDs) > 0 {
|
|
||||||
eventsReq := &api.QueryEventsByIDRequest{
|
|
||||||
EventIDs: make([]string, 0, len(output.NewRoomEvent.AddsStateEventIDs)),
|
|
||||||
}
|
|
||||||
eventsRes := &api.QueryEventsByIDResponse{}
|
|
||||||
for _, eventID := range output.NewRoomEvent.AddsStateEventIDs {
|
|
||||||
if eventID != newEventID {
|
|
||||||
eventsReq.EventIDs = append(eventsReq.EventIDs, eventID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(eventsReq.EventIDs) > 0 {
|
|
||||||
if err := s.rsAPI.QueryEventsByID(s.ctx, eventsReq, eventsRes); err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
events = append(events, eventsRes.Events...)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else if output.Type == api.OutputTypeNewInviteEvent && output.NewInviteEvent != nil {
|
// Parse out the event JSON
|
||||||
events = append(events, output.NewInviteEvent.Event)
|
var output api.OutputEvent
|
||||||
} else {
|
if err := json.Unmarshal(msg.Data, &output); err != nil {
|
||||||
log.WithFields(log.Fields{
|
// If the message was invalid, log it and move on to the next message in the stream
|
||||||
"type": output.Type,
|
log.WithField("appservice", state.ID).WithError(err).Errorf("Appservice failed to parse message, ignoring")
|
||||||
}).Debug("appservice OutputRoomEventConsumer ignoring event", string(msg.Data))
|
continue
|
||||||
|
}
|
||||||
|
switch output.Type {
|
||||||
|
case api.OutputTypeNewRoomEvent:
|
||||||
|
if output.NewRoomEvent == nil || !s.appserviceIsInterestedInEvent(ctx, output.NewRoomEvent.Event, state.ApplicationService) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
events = append(events, output.NewRoomEvent.Event)
|
||||||
|
if len(output.NewRoomEvent.AddsStateEventIDs) > 0 {
|
||||||
|
newEventID := output.NewRoomEvent.Event.EventID()
|
||||||
|
eventsReq := &api.QueryEventsByIDRequest{
|
||||||
|
EventIDs: make([]string, 0, len(output.NewRoomEvent.AddsStateEventIDs)),
|
||||||
|
}
|
||||||
|
eventsRes := &api.QueryEventsByIDResponse{}
|
||||||
|
for _, eventID := range output.NewRoomEvent.AddsStateEventIDs {
|
||||||
|
if eventID != newEventID {
|
||||||
|
eventsReq.EventIDs = append(eventsReq.EventIDs, eventID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(eventsReq.EventIDs) > 0 {
|
||||||
|
if err := s.rsAPI.QueryEventsByID(s.ctx, eventsReq, eventsRes); err != nil {
|
||||||
|
log.WithError(err).Errorf("s.rsAPI.QueryEventsByID failed")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
events = append(events, eventsRes.Events...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case api.OutputTypeNewInviteEvent:
|
||||||
|
if output.NewInviteEvent == nil || !s.appserviceIsInterestedInEvent(ctx, output.NewInviteEvent.Event, state.ApplicationService) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
events = append(events, output.NewInviteEvent.Event)
|
||||||
|
|
||||||
|
default:
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there are no events selected for sending then we should
|
||||||
|
// ack the messages so that we don't get sent them again in the
|
||||||
|
// future.
|
||||||
|
if len(events) == 0 {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send event to any relevant application services
|
txnID := ""
|
||||||
if err := s.filterRoomserverEvents(context.TODO(), events); err != nil {
|
// Try to get the message metadata, if we're able to, use the timestamp as the txnID
|
||||||
log.WithError(err).Errorf("roomserver output log: filter error")
|
metadata, err := msgs[0].Metadata()
|
||||||
return true
|
if err == nil {
|
||||||
|
txnID = strconv.Itoa(int(metadata.Timestamp.UnixNano()))
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
// Send event to any relevant application services. If we hit
|
||||||
|
// an error here, return false, so that we negatively ack.
|
||||||
|
log.WithField("appservice", state.ID).Debugf("Appservice worker sending %d events(s) from roomserver", len(events))
|
||||||
|
return s.sendEvents(ctx, state, events, txnID) == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// filterRoomserverEvents takes in events and decides whether any of them need
|
// sendEvents passes events to the appservice by using the transactions
|
||||||
// to be passed on to an external application service. It does this by checking
|
// endpoint. It will block for the backoff period if necessary.
|
||||||
// each namespace of each registered application service, and if there is a
|
func (s *OutputRoomEventConsumer) sendEvents(
|
||||||
// match, adds the event to the queue for events to be sent to a particular
|
ctx context.Context, state *appserviceState,
|
||||||
// application service.
|
|
||||||
func (s *OutputRoomEventConsumer) filterRoomserverEvents(
|
|
||||||
ctx context.Context,
|
|
||||||
events []*gomatrixserverlib.HeaderedEvent,
|
events []*gomatrixserverlib.HeaderedEvent,
|
||||||
|
txnID string,
|
||||||
) error {
|
) error {
|
||||||
for _, ws := range s.workerStates {
|
// Create the transaction body.
|
||||||
for _, event := range events {
|
transaction, err := json.Marshal(
|
||||||
// Check if this event is interesting to this application service
|
gomatrixserverlib.ApplicationServiceTransaction{
|
||||||
if s.appserviceIsInterestedInEvent(ctx, event, ws.AppService) {
|
Events: gomatrixserverlib.HeaderedToClientEvents(events, gomatrixserverlib.FormatAll),
|
||||||
// Queue this event to be sent off to the application service
|
},
|
||||||
if err := s.asDB.StoreEvent(ctx, ws.AppService.ID, event); err != nil {
|
)
|
||||||
log.WithError(err).Warn("failed to insert incoming event into appservices database")
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
} else {
|
|
||||||
// Tell our worker to send out new messages by updating remaining message
|
|
||||||
// count and waking them up with a broadcast
|
|
||||||
ws.NotifyNewEvents()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If txnID is not defined, generate one from the events.
|
||||||
|
if txnID == "" {
|
||||||
|
txnID = fmt.Sprintf("%d_%d", events[0].Event.OriginServerTS(), len(transaction))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send the transaction to the appservice.
|
||||||
|
// https://matrix.org/docs/spec/application_service/r0.1.2#put-matrix-app-v1-transactions-txnid
|
||||||
|
address := fmt.Sprintf("%s/transactions/%s?access_token=%s", state.URL, txnID, url.QueryEscape(state.HSToken))
|
||||||
|
req, err := http.NewRequestWithContext(ctx, "PUT", address, bytes.NewBuffer(transaction))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
resp, err := s.client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return state.backoffAndPause(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the response was fine then we can clear any backoffs in place and
|
||||||
|
// report that everything was OK. Otherwise, back off for a while.
|
||||||
|
switch resp.StatusCode {
|
||||||
|
case http.StatusOK:
|
||||||
|
state.backoff = 0
|
||||||
|
default:
|
||||||
|
return state.backoffAndPause(fmt.Errorf("received HTTP status code %d from appservice", resp.StatusCode))
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// appserviceJoinedAtEvent returns a boolean depending on whether a given
|
// backoff pauses the calling goroutine for a 2^some backoff exponent seconds
|
||||||
// appservice has membership at the time a given event was created.
|
func (s *appserviceState) backoffAndPause(err error) error {
|
||||||
func (s *OutputRoomEventConsumer) appserviceJoinedAtEvent(ctx context.Context, event *gomatrixserverlib.HeaderedEvent, appservice config.ApplicationService) bool {
|
if s.backoff < 6 {
|
||||||
// TODO: This is only checking the current room state, not the state at
|
s.backoff++
|
||||||
// the event in question. Pretty sure this is what Synapse does too, but
|
|
||||||
// until we have a lighter way of checking the state before the event that
|
|
||||||
// doesn't involve state res, then this is probably OK.
|
|
||||||
membershipReq := &api.QueryMembershipsForRoomRequest{
|
|
||||||
RoomID: event.RoomID(),
|
|
||||||
JoinedOnly: true,
|
|
||||||
}
|
}
|
||||||
membershipRes := &api.QueryMembershipsForRoomResponse{}
|
duration := time.Second * time.Duration(math.Pow(2, float64(s.backoff)))
|
||||||
|
log.WithField("appservice", s.ID).WithError(err).Errorf("Unable to send transaction to appservice, backing off for %s", duration.String())
|
||||||
// XXX: This could potentially race if the state for the event is not known yet
|
time.Sleep(duration)
|
||||||
// e.g. the event came over federation but we do not have the full state persisted.
|
return err
|
||||||
if err := s.rsAPI.QueryMembershipsForRoom(ctx, membershipReq, membershipRes); err == nil {
|
|
||||||
for _, ev := range membershipRes.JoinEvents {
|
|
||||||
var membership gomatrixserverlib.MemberContent
|
|
||||||
if err = json.Unmarshal(ev.Content, &membership); err != nil || ev.StateKey == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if appservice.IsInterestedInUserID(*ev.StateKey) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"room_id": event.RoomID(),
|
|
||||||
}).WithError(err).Errorf("Unable to get membership for room")
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// appserviceIsInterestedInEvent returns a boolean depending on whether a given
|
// appserviceIsInterestedInEvent returns a boolean depending on whether a given
|
||||||
// event falls within one of a given application service's namespaces.
|
// event falls within one of a given application service's namespaces.
|
||||||
//
|
//
|
||||||
// TODO: This should be cached, see https://github.com/matrix-org/dendrite/issues/1682
|
// TODO: This should be cached, see https://github.com/matrix-org/dendrite/issues/1682
|
||||||
func (s *OutputRoomEventConsumer) appserviceIsInterestedInEvent(ctx context.Context, event *gomatrixserverlib.HeaderedEvent, appservice config.ApplicationService) bool {
|
func (s *OutputRoomEventConsumer) appserviceIsInterestedInEvent(ctx context.Context, event *gomatrixserverlib.HeaderedEvent, appservice *config.ApplicationService) bool {
|
||||||
// No reason to queue events if they'll never be sent to the application
|
switch {
|
||||||
// service
|
case appservice.URL == "":
|
||||||
if appservice.URL == "" {
|
|
||||||
return false
|
return false
|
||||||
}
|
case appservice.IsInterestedInUserID(event.Sender()):
|
||||||
|
return true
|
||||||
// Check Room ID and Sender of the event
|
case appservice.IsInterestedInRoomID(event.RoomID()):
|
||||||
if appservice.IsInterestedInUserID(event.Sender()) ||
|
|
||||||
appservice.IsInterestedInRoomID(event.RoomID()) {
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -222,10 +258,54 @@ func (s *OutputRoomEventConsumer) appserviceIsInterestedInEvent(ctx context.Cont
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.WithFields(log.Fields{
|
log.WithFields(log.Fields{
|
||||||
"room_id": event.RoomID(),
|
"appservice": appservice.ID,
|
||||||
|
"room_id": event.RoomID(),
|
||||||
}).WithError(err).Errorf("Unable to get aliases for room")
|
}).WithError(err).Errorf("Unable to get aliases for room")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if any of the members in the room match the appservice
|
// Check if any of the members in the room match the appservice
|
||||||
return s.appserviceJoinedAtEvent(ctx, event, appservice)
|
return s.appserviceJoinedAtEvent(ctx, event, appservice)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// appserviceJoinedAtEvent returns a boolean depending on whether a given
|
||||||
|
// appservice has membership at the time a given event was created.
|
||||||
|
func (s *OutputRoomEventConsumer) appserviceJoinedAtEvent(ctx context.Context, event *gomatrixserverlib.HeaderedEvent, appservice *config.ApplicationService) bool {
|
||||||
|
// TODO: This is only checking the current room state, not the state at
|
||||||
|
// the event in question. Pretty sure this is what Synapse does too, but
|
||||||
|
// until we have a lighter way of checking the state before the event that
|
||||||
|
// doesn't involve state res, then this is probably OK.
|
||||||
|
membershipReq := &api.QueryMembershipsForRoomRequest{
|
||||||
|
RoomID: event.RoomID(),
|
||||||
|
JoinedOnly: true,
|
||||||
|
}
|
||||||
|
membershipRes := &api.QueryMembershipsForRoomResponse{}
|
||||||
|
|
||||||
|
// XXX: This could potentially race if the state for the event is not known yet
|
||||||
|
// e.g. the event came over federation but we do not have the full state persisted.
|
||||||
|
if err := s.rsAPI.QueryMembershipsForRoom(ctx, membershipReq, membershipRes); err == nil {
|
||||||
|
for _, ev := range membershipRes.JoinEvents {
|
||||||
|
switch {
|
||||||
|
case ev.StateKey == nil:
|
||||||
|
continue
|
||||||
|
case ev.Type != gomatrixserverlib.MRoomMember:
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var membership gomatrixserverlib.MemberContent
|
||||||
|
err = json.Unmarshal(ev.Content, &membership)
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
continue
|
||||||
|
case membership.Membership == gomatrixserverlib.Join:
|
||||||
|
if appservice.IsInterestedInUserID(*ev.StateKey) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"appservice": appservice.ID,
|
||||||
|
"room_id": event.RoomID(),
|
||||||
|
}).WithError(err).Errorf("Unable to get membership for room")
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -33,7 +33,7 @@ const userIDExistsPath = "/users/"
|
||||||
// AppServiceQueryAPI is an implementation of api.AppServiceQueryAPI
|
// AppServiceQueryAPI is an implementation of api.AppServiceQueryAPI
|
||||||
type AppServiceQueryAPI struct {
|
type AppServiceQueryAPI struct {
|
||||||
HTTPClient *http.Client
|
HTTPClient *http.Client
|
||||||
Cfg *config.Dendrite
|
Cfg *config.AppServiceAPI
|
||||||
}
|
}
|
||||||
|
|
||||||
// RoomAliasExists performs a request to '/room/{roomAlias}' on all known
|
// RoomAliasExists performs a request to '/room/{roomAlias}' on all known
|
||||||
|
|
|
||||||
|
|
@ -1,30 +0,0 @@
|
||||||
// Copyright 2020 The Matrix.org Foundation C.I.C.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Database interface {
|
|
||||||
StoreEvent(ctx context.Context, appServiceID string, event *gomatrixserverlib.HeaderedEvent) error
|
|
||||||
GetEventsWithAppServiceID(ctx context.Context, appServiceID string, limit int) (int, int, []gomatrixserverlib.HeaderedEvent, bool, error)
|
|
||||||
CountEventsWithAppServiceID(ctx context.Context, appServiceID string) (int, error)
|
|
||||||
UpdateTxnIDForEvents(ctx context.Context, appserviceID string, maxID, txnID int) error
|
|
||||||
RemoveEventsBeforeAndIncludingID(ctx context.Context, appserviceID string, eventTableID int) error
|
|
||||||
GetLatestTxnID(ctx context.Context) (int, error)
|
|
||||||
}
|
|
||||||
|
|
@ -1,256 +0,0 @@
|
||||||
// Copyright 2018 New Vector Ltd
|
|
||||||
// Copyright 2019-2020 The Matrix.org Foundation C.I.C.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package postgres
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"database/sql"
|
|
||||||
"encoding/json"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
const appserviceEventsSchema = `
|
|
||||||
-- Stores events to be sent to application services
|
|
||||||
CREATE TABLE IF NOT EXISTS appservice_events (
|
|
||||||
-- An auto-incrementing id unique to each event in the table
|
|
||||||
id BIGSERIAL NOT NULL PRIMARY KEY,
|
|
||||||
-- The ID of the application service the event will be sent to
|
|
||||||
as_id TEXT NOT NULL,
|
|
||||||
-- JSON representation of the event
|
|
||||||
headered_event_json TEXT NOT NULL,
|
|
||||||
-- The ID of the transaction that this event is a part of
|
|
||||||
txn_id BIGINT NOT NULL
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS appservice_events_as_id ON appservice_events(as_id);
|
|
||||||
`
|
|
||||||
|
|
||||||
const selectEventsByApplicationServiceIDSQL = "" +
|
|
||||||
"SELECT id, headered_event_json, txn_id " +
|
|
||||||
"FROM appservice_events WHERE as_id = $1 ORDER BY txn_id DESC, id ASC"
|
|
||||||
|
|
||||||
const countEventsByApplicationServiceIDSQL = "" +
|
|
||||||
"SELECT COUNT(id) FROM appservice_events WHERE as_id = $1"
|
|
||||||
|
|
||||||
const insertEventSQL = "" +
|
|
||||||
"INSERT INTO appservice_events(as_id, headered_event_json, txn_id) " +
|
|
||||||
"VALUES ($1, $2, $3)"
|
|
||||||
|
|
||||||
const updateTxnIDForEventsSQL = "" +
|
|
||||||
"UPDATE appservice_events SET txn_id = $1 WHERE as_id = $2 AND id <= $3"
|
|
||||||
|
|
||||||
const deleteEventsBeforeAndIncludingIDSQL = "" +
|
|
||||||
"DELETE FROM appservice_events WHERE as_id = $1 AND id <= $2"
|
|
||||||
|
|
||||||
const (
|
|
||||||
// A transaction ID number that no transaction should ever have. Used for
|
|
||||||
// checking again the default value.
|
|
||||||
invalidTxnID = -2
|
|
||||||
)
|
|
||||||
|
|
||||||
type eventsStatements struct {
|
|
||||||
selectEventsByApplicationServiceIDStmt *sql.Stmt
|
|
||||||
countEventsByApplicationServiceIDStmt *sql.Stmt
|
|
||||||
insertEventStmt *sql.Stmt
|
|
||||||
updateTxnIDForEventsStmt *sql.Stmt
|
|
||||||
deleteEventsBeforeAndIncludingIDStmt *sql.Stmt
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *eventsStatements) prepare(db *sql.DB) (err error) {
|
|
||||||
_, err = db.Exec(appserviceEventsSchema)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.selectEventsByApplicationServiceIDStmt, err = db.Prepare(selectEventsByApplicationServiceIDSQL); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if s.countEventsByApplicationServiceIDStmt, err = db.Prepare(countEventsByApplicationServiceIDSQL); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if s.insertEventStmt, err = db.Prepare(insertEventSQL); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if s.updateTxnIDForEventsStmt, err = db.Prepare(updateTxnIDForEventsSQL); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if s.deleteEventsBeforeAndIncludingIDStmt, err = db.Prepare(deleteEventsBeforeAndIncludingIDSQL); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// selectEventsByApplicationServiceID takes in an application service ID and
|
|
||||||
// returns a slice of events that need to be sent to that application service,
|
|
||||||
// as well as an int later used to remove these same events from the database
|
|
||||||
// once successfully sent to an application service.
|
|
||||||
func (s *eventsStatements) selectEventsByApplicationServiceID(
|
|
||||||
ctx context.Context,
|
|
||||||
applicationServiceID string,
|
|
||||||
limit int,
|
|
||||||
) (
|
|
||||||
txnID, maxID int,
|
|
||||||
events []gomatrixserverlib.HeaderedEvent,
|
|
||||||
eventsRemaining bool,
|
|
||||||
err error,
|
|
||||||
) {
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"appservice": applicationServiceID,
|
|
||||||
}).WithError(err).Fatalf("appservice unable to select new events to send")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
// Retrieve events from the database. Unsuccessfully sent events first
|
|
||||||
eventRows, err := s.selectEventsByApplicationServiceIDStmt.QueryContext(ctx, applicationServiceID)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer checkNamedErr(eventRows.Close, &err)
|
|
||||||
events, maxID, txnID, eventsRemaining, err = retrieveEvents(eventRows, limit)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkNamedErr calls fn and overwrite err if it was nil and fn returned non-nil
|
|
||||||
func checkNamedErr(fn func() error, err *error) {
|
|
||||||
if e := fn(); e != nil && *err == nil {
|
|
||||||
*err = e
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func retrieveEvents(eventRows *sql.Rows, limit int) (events []gomatrixserverlib.HeaderedEvent, maxID, txnID int, eventsRemaining bool, err error) {
|
|
||||||
// Get current time for use in calculating event age
|
|
||||||
nowMilli := time.Now().UnixNano() / int64(time.Millisecond)
|
|
||||||
|
|
||||||
// Iterate through each row and store event contents
|
|
||||||
// If txn_id changes dramatically, we've switched from collecting old events to
|
|
||||||
// new ones. Send back those events first.
|
|
||||||
lastTxnID := invalidTxnID
|
|
||||||
for eventsProcessed := 0; eventRows.Next(); {
|
|
||||||
var event gomatrixserverlib.HeaderedEvent
|
|
||||||
var eventJSON []byte
|
|
||||||
var id int
|
|
||||||
err = eventRows.Scan(
|
|
||||||
&id,
|
|
||||||
&eventJSON,
|
|
||||||
&txnID,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, 0, false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshal eventJSON
|
|
||||||
if err = json.Unmarshal(eventJSON, &event); err != nil {
|
|
||||||
return nil, 0, 0, false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// If txnID has changed on this event from the previous event, then we've
|
|
||||||
// reached the end of a transaction's events. Return only those events.
|
|
||||||
if lastTxnID > invalidTxnID && lastTxnID != txnID {
|
|
||||||
return events, maxID, lastTxnID, true, nil
|
|
||||||
}
|
|
||||||
lastTxnID = txnID
|
|
||||||
|
|
||||||
// Limit events that aren't part of an old transaction
|
|
||||||
if txnID == -1 {
|
|
||||||
// Return if we've hit the limit
|
|
||||||
if eventsProcessed++; eventsProcessed > limit {
|
|
||||||
return events, maxID, lastTxnID, true, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if id > maxID {
|
|
||||||
maxID = id
|
|
||||||
}
|
|
||||||
|
|
||||||
// Portion of the event that is unsigned due to rapid change
|
|
||||||
// TODO: Consider removing age as not many app services use it
|
|
||||||
if err = event.SetUnsignedField("age", nowMilli-int64(event.OriginServerTS())); err != nil {
|
|
||||||
return nil, 0, 0, false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
events = append(events, event)
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// countEventsByApplicationServiceID inserts an event mapped to its corresponding application service
|
|
||||||
// IDs into the db.
|
|
||||||
func (s *eventsStatements) countEventsByApplicationServiceID(
|
|
||||||
ctx context.Context,
|
|
||||||
appServiceID string,
|
|
||||||
) (int, error) {
|
|
||||||
var count int
|
|
||||||
err := s.countEventsByApplicationServiceIDStmt.QueryRowContext(ctx, appServiceID).Scan(&count)
|
|
||||||
if err != nil && err != sql.ErrNoRows {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return count, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// insertEvent inserts an event mapped to its corresponding application service
|
|
||||||
// IDs into the db.
|
|
||||||
func (s *eventsStatements) insertEvent(
|
|
||||||
ctx context.Context,
|
|
||||||
appServiceID string,
|
|
||||||
event *gomatrixserverlib.HeaderedEvent,
|
|
||||||
) (err error) {
|
|
||||||
// Convert event to JSON before inserting
|
|
||||||
eventJSON, err := json.Marshal(event)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = s.insertEventStmt.ExecContext(
|
|
||||||
ctx,
|
|
||||||
appServiceID,
|
|
||||||
eventJSON,
|
|
||||||
-1, // No transaction ID yet
|
|
||||||
)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// updateTxnIDForEvents sets the transactionID for a collection of events. Done
|
|
||||||
// before sending them to an AppService. Referenced before sending to make sure
|
|
||||||
// we aren't constructing multiple transactions with the same events.
|
|
||||||
func (s *eventsStatements) updateTxnIDForEvents(
|
|
||||||
ctx context.Context,
|
|
||||||
appserviceID string,
|
|
||||||
maxID, txnID int,
|
|
||||||
) (err error) {
|
|
||||||
_, err = s.updateTxnIDForEventsStmt.ExecContext(ctx, txnID, appserviceID, maxID)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// deleteEventsBeforeAndIncludingID removes events matching given IDs from the database.
|
|
||||||
func (s *eventsStatements) deleteEventsBeforeAndIncludingID(
|
|
||||||
ctx context.Context,
|
|
||||||
appserviceID string,
|
|
||||||
eventTableID int,
|
|
||||||
) (err error) {
|
|
||||||
_, err = s.deleteEventsBeforeAndIncludingIDStmt.ExecContext(ctx, appserviceID, eventTableID)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
@ -1,115 +0,0 @@
|
||||||
// Copyright 2018 New Vector Ltd
|
|
||||||
// Copyright 2019-2020 The Matrix.org Foundation C.I.C.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package postgres
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"database/sql"
|
|
||||||
|
|
||||||
// Import postgres database driver
|
|
||||||
_ "github.com/lib/pq"
|
|
||||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
|
||||||
"github.com/matrix-org/dendrite/setup/base"
|
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Database stores events intended to be later sent to application services
|
|
||||||
type Database struct {
|
|
||||||
events eventsStatements
|
|
||||||
txnID txnStatements
|
|
||||||
db *sql.DB
|
|
||||||
writer sqlutil.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDatabase opens a new database
|
|
||||||
func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions) (*Database, error) {
|
|
||||||
var result Database
|
|
||||||
var err error
|
|
||||||
if result.db, result.writer, err = base.DatabaseConnection(dbProperties, sqlutil.NewDummyWriter()); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err = result.prepare(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Database) prepare() error {
|
|
||||||
if err := d.events.prepare(d.db); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return d.txnID.prepare(d.db)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StoreEvent takes in a gomatrixserverlib.HeaderedEvent and stores it in the database
|
|
||||||
// for a transaction worker to pull and later send to an application service.
|
|
||||||
func (d *Database) StoreEvent(
|
|
||||||
ctx context.Context,
|
|
||||||
appServiceID string,
|
|
||||||
event *gomatrixserverlib.HeaderedEvent,
|
|
||||||
) error {
|
|
||||||
return d.events.insertEvent(ctx, appServiceID, event)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetEventsWithAppServiceID returns a slice of events and their IDs intended to
|
|
||||||
// be sent to an application service given its ID.
|
|
||||||
func (d *Database) GetEventsWithAppServiceID(
|
|
||||||
ctx context.Context,
|
|
||||||
appServiceID string,
|
|
||||||
limit int,
|
|
||||||
) (int, int, []gomatrixserverlib.HeaderedEvent, bool, error) {
|
|
||||||
return d.events.selectEventsByApplicationServiceID(ctx, appServiceID, limit)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CountEventsWithAppServiceID returns the number of events destined for an
|
|
||||||
// application service given its ID.
|
|
||||||
func (d *Database) CountEventsWithAppServiceID(
|
|
||||||
ctx context.Context,
|
|
||||||
appServiceID string,
|
|
||||||
) (int, error) {
|
|
||||||
return d.events.countEventsByApplicationServiceID(ctx, appServiceID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateTxnIDForEvents takes in an application service ID and a
|
|
||||||
// and stores them in the DB, unless the pair already exists, in
|
|
||||||
// which case it updates them.
|
|
||||||
func (d *Database) UpdateTxnIDForEvents(
|
|
||||||
ctx context.Context,
|
|
||||||
appserviceID string,
|
|
||||||
maxID, txnID int,
|
|
||||||
) error {
|
|
||||||
return d.events.updateTxnIDForEvents(ctx, appserviceID, maxID, txnID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveEventsBeforeAndIncludingID removes all events from the database that
|
|
||||||
// are less than or equal to a given maximum ID. IDs here are implemented as a
|
|
||||||
// serial, thus this should always delete events in chronological order.
|
|
||||||
func (d *Database) RemoveEventsBeforeAndIncludingID(
|
|
||||||
ctx context.Context,
|
|
||||||
appserviceID string,
|
|
||||||
eventTableID int,
|
|
||||||
) error {
|
|
||||||
return d.events.deleteEventsBeforeAndIncludingID(ctx, appserviceID, eventTableID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLatestTxnID returns the latest available transaction id
|
|
||||||
func (d *Database) GetLatestTxnID(
|
|
||||||
ctx context.Context,
|
|
||||||
) (int, error) {
|
|
||||||
return d.txnID.selectTxnID(ctx)
|
|
||||||
}
|
|
||||||
|
|
@ -1,53 +0,0 @@
|
||||||
// Copyright 2018 New Vector Ltd
|
|
||||||
// Copyright 2019-2020 The Matrix.org Foundation C.I.C.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package postgres
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"database/sql"
|
|
||||||
)
|
|
||||||
|
|
||||||
const txnIDSchema = `
|
|
||||||
-- Keeps a count of the current transaction ID
|
|
||||||
CREATE SEQUENCE IF NOT EXISTS txn_id_counter START 1;
|
|
||||||
`
|
|
||||||
|
|
||||||
const selectTxnIDSQL = "SELECT nextval('txn_id_counter')"
|
|
||||||
|
|
||||||
type txnStatements struct {
|
|
||||||
selectTxnIDStmt *sql.Stmt
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *txnStatements) prepare(db *sql.DB) (err error) {
|
|
||||||
_, err = db.Exec(txnIDSchema)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.selectTxnIDStmt, err = db.Prepare(selectTxnIDSQL); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// selectTxnID selects the latest ascending transaction ID
|
|
||||||
func (s *txnStatements) selectTxnID(
|
|
||||||
ctx context.Context,
|
|
||||||
) (txnID int, err error) {
|
|
||||||
err = s.selectTxnIDStmt.QueryRowContext(ctx).Scan(&txnID)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
@ -1,267 +0,0 @@
|
||||||
// Copyright 2018 New Vector Ltd
|
|
||||||
// Copyright 2019-2020 The Matrix.org Foundation C.I.C.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package sqlite3
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"database/sql"
|
|
||||||
"encoding/json"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
const appserviceEventsSchema = `
|
|
||||||
-- Stores events to be sent to application services
|
|
||||||
CREATE TABLE IF NOT EXISTS appservice_events (
|
|
||||||
-- An auto-incrementing id unique to each event in the table
|
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
||||||
-- The ID of the application service the event will be sent to
|
|
||||||
as_id TEXT NOT NULL,
|
|
||||||
-- JSON representation of the event
|
|
||||||
headered_event_json TEXT NOT NULL,
|
|
||||||
-- The ID of the transaction that this event is a part of
|
|
||||||
txn_id INTEGER NOT NULL
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS appservice_events_as_id ON appservice_events(as_id);
|
|
||||||
`
|
|
||||||
|
|
||||||
const selectEventsByApplicationServiceIDSQL = "" +
|
|
||||||
"SELECT id, headered_event_json, txn_id " +
|
|
||||||
"FROM appservice_events WHERE as_id = $1 ORDER BY txn_id DESC, id ASC"
|
|
||||||
|
|
||||||
const countEventsByApplicationServiceIDSQL = "" +
|
|
||||||
"SELECT COUNT(id) FROM appservice_events WHERE as_id = $1"
|
|
||||||
|
|
||||||
const insertEventSQL = "" +
|
|
||||||
"INSERT INTO appservice_events(as_id, headered_event_json, txn_id) " +
|
|
||||||
"VALUES ($1, $2, $3)"
|
|
||||||
|
|
||||||
const updateTxnIDForEventsSQL = "" +
|
|
||||||
"UPDATE appservice_events SET txn_id = $1 WHERE as_id = $2 AND id <= $3"
|
|
||||||
|
|
||||||
const deleteEventsBeforeAndIncludingIDSQL = "" +
|
|
||||||
"DELETE FROM appservice_events WHERE as_id = $1 AND id <= $2"
|
|
||||||
|
|
||||||
const (
|
|
||||||
// A transaction ID number that no transaction should ever have. Used for
|
|
||||||
// checking again the default value.
|
|
||||||
invalidTxnID = -2
|
|
||||||
)
|
|
||||||
|
|
||||||
type eventsStatements struct {
|
|
||||||
db *sql.DB
|
|
||||||
writer sqlutil.Writer
|
|
||||||
selectEventsByApplicationServiceIDStmt *sql.Stmt
|
|
||||||
countEventsByApplicationServiceIDStmt *sql.Stmt
|
|
||||||
insertEventStmt *sql.Stmt
|
|
||||||
updateTxnIDForEventsStmt *sql.Stmt
|
|
||||||
deleteEventsBeforeAndIncludingIDStmt *sql.Stmt
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *eventsStatements) prepare(db *sql.DB, writer sqlutil.Writer) (err error) {
|
|
||||||
s.db = db
|
|
||||||
s.writer = writer
|
|
||||||
_, err = db.Exec(appserviceEventsSchema)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.selectEventsByApplicationServiceIDStmt, err = db.Prepare(selectEventsByApplicationServiceIDSQL); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if s.countEventsByApplicationServiceIDStmt, err = db.Prepare(countEventsByApplicationServiceIDSQL); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if s.insertEventStmt, err = db.Prepare(insertEventSQL); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if s.updateTxnIDForEventsStmt, err = db.Prepare(updateTxnIDForEventsSQL); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if s.deleteEventsBeforeAndIncludingIDStmt, err = db.Prepare(deleteEventsBeforeAndIncludingIDSQL); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// selectEventsByApplicationServiceID takes in an application service ID and
|
|
||||||
// returns a slice of events that need to be sent to that application service,
|
|
||||||
// as well as an int later used to remove these same events from the database
|
|
||||||
// once successfully sent to an application service.
|
|
||||||
func (s *eventsStatements) selectEventsByApplicationServiceID(
|
|
||||||
ctx context.Context,
|
|
||||||
applicationServiceID string,
|
|
||||||
limit int,
|
|
||||||
) (
|
|
||||||
txnID, maxID int,
|
|
||||||
events []gomatrixserverlib.HeaderedEvent,
|
|
||||||
eventsRemaining bool,
|
|
||||||
err error,
|
|
||||||
) {
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"appservice": applicationServiceID,
|
|
||||||
}).WithError(err).Fatalf("appservice unable to select new events to send")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
// Retrieve events from the database. Unsuccessfully sent events first
|
|
||||||
eventRows, err := s.selectEventsByApplicationServiceIDStmt.QueryContext(ctx, applicationServiceID)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer checkNamedErr(eventRows.Close, &err)
|
|
||||||
events, maxID, txnID, eventsRemaining, err = retrieveEvents(eventRows, limit)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkNamedErr calls fn and overwrite err if it was nil and fn returned non-nil
|
|
||||||
func checkNamedErr(fn func() error, err *error) {
|
|
||||||
if e := fn(); e != nil && *err == nil {
|
|
||||||
*err = e
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func retrieveEvents(eventRows *sql.Rows, limit int) (events []gomatrixserverlib.HeaderedEvent, maxID, txnID int, eventsRemaining bool, err error) {
|
|
||||||
// Get current time for use in calculating event age
|
|
||||||
nowMilli := time.Now().UnixNano() / int64(time.Millisecond)
|
|
||||||
|
|
||||||
// Iterate through each row and store event contents
|
|
||||||
// If txn_id changes dramatically, we've switched from collecting old events to
|
|
||||||
// new ones. Send back those events first.
|
|
||||||
lastTxnID := invalidTxnID
|
|
||||||
for eventsProcessed := 0; eventRows.Next(); {
|
|
||||||
var event gomatrixserverlib.HeaderedEvent
|
|
||||||
var eventJSON []byte
|
|
||||||
var id int
|
|
||||||
err = eventRows.Scan(
|
|
||||||
&id,
|
|
||||||
&eventJSON,
|
|
||||||
&txnID,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, 0, false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshal eventJSON
|
|
||||||
if err = json.Unmarshal(eventJSON, &event); err != nil {
|
|
||||||
return nil, 0, 0, false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// If txnID has changed on this event from the previous event, then we've
|
|
||||||
// reached the end of a transaction's events. Return only those events.
|
|
||||||
if lastTxnID > invalidTxnID && lastTxnID != txnID {
|
|
||||||
return events, maxID, lastTxnID, true, nil
|
|
||||||
}
|
|
||||||
lastTxnID = txnID
|
|
||||||
|
|
||||||
// Limit events that aren't part of an old transaction
|
|
||||||
if txnID == -1 {
|
|
||||||
// Return if we've hit the limit
|
|
||||||
if eventsProcessed++; eventsProcessed > limit {
|
|
||||||
return events, maxID, lastTxnID, true, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if id > maxID {
|
|
||||||
maxID = id
|
|
||||||
}
|
|
||||||
|
|
||||||
// Portion of the event that is unsigned due to rapid change
|
|
||||||
// TODO: Consider removing age as not many app services use it
|
|
||||||
if err = event.SetUnsignedField("age", nowMilli-int64(event.OriginServerTS())); err != nil {
|
|
||||||
return nil, 0, 0, false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
events = append(events, event)
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// countEventsByApplicationServiceID inserts an event mapped to its corresponding application service
|
|
||||||
// IDs into the db.
|
|
||||||
func (s *eventsStatements) countEventsByApplicationServiceID(
|
|
||||||
ctx context.Context,
|
|
||||||
appServiceID string,
|
|
||||||
) (int, error) {
|
|
||||||
var count int
|
|
||||||
err := s.countEventsByApplicationServiceIDStmt.QueryRowContext(ctx, appServiceID).Scan(&count)
|
|
||||||
if err != nil && err != sql.ErrNoRows {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return count, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// insertEvent inserts an event mapped to its corresponding application service
|
|
||||||
// IDs into the db.
|
|
||||||
func (s *eventsStatements) insertEvent(
|
|
||||||
ctx context.Context,
|
|
||||||
appServiceID string,
|
|
||||||
event *gomatrixserverlib.HeaderedEvent,
|
|
||||||
) (err error) {
|
|
||||||
// Convert event to JSON before inserting
|
|
||||||
eventJSON, err := json.Marshal(event)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return s.writer.Do(s.db, nil, func(txn *sql.Tx) error {
|
|
||||||
_, err := s.insertEventStmt.ExecContext(
|
|
||||||
ctx,
|
|
||||||
appServiceID,
|
|
||||||
eventJSON,
|
|
||||||
-1, // No transaction ID yet
|
|
||||||
)
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// updateTxnIDForEvents sets the transactionID for a collection of events. Done
|
|
||||||
// before sending them to an AppService. Referenced before sending to make sure
|
|
||||||
// we aren't constructing multiple transactions with the same events.
|
|
||||||
func (s *eventsStatements) updateTxnIDForEvents(
|
|
||||||
ctx context.Context,
|
|
||||||
appserviceID string,
|
|
||||||
maxID, txnID int,
|
|
||||||
) (err error) {
|
|
||||||
return s.writer.Do(s.db, nil, func(txn *sql.Tx) error {
|
|
||||||
_, err := s.updateTxnIDForEventsStmt.ExecContext(ctx, txnID, appserviceID, maxID)
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// deleteEventsBeforeAndIncludingID removes events matching given IDs from the database.
|
|
||||||
func (s *eventsStatements) deleteEventsBeforeAndIncludingID(
|
|
||||||
ctx context.Context,
|
|
||||||
appserviceID string,
|
|
||||||
eventTableID int,
|
|
||||||
) (err error) {
|
|
||||||
return s.writer.Do(s.db, nil, func(txn *sql.Tx) error {
|
|
||||||
_, err := s.deleteEventsBeforeAndIncludingIDStmt.ExecContext(ctx, appserviceID, eventTableID)
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
@ -1,114 +0,0 @@
|
||||||
// Copyright 2018 New Vector Ltd
|
|
||||||
// Copyright 2019-2020 The Matrix.org Foundation C.I.C.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package sqlite3
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"database/sql"
|
|
||||||
|
|
||||||
// Import SQLite database driver
|
|
||||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
|
||||||
"github.com/matrix-org/dendrite/setup/base"
|
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Database stores events intended to be later sent to application services
|
|
||||||
type Database struct {
|
|
||||||
events eventsStatements
|
|
||||||
txnID txnStatements
|
|
||||||
db *sql.DB
|
|
||||||
writer sqlutil.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDatabase opens a new database
|
|
||||||
func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions) (*Database, error) {
|
|
||||||
var result Database
|
|
||||||
var err error
|
|
||||||
if result.db, result.writer, err = base.DatabaseConnection(dbProperties, sqlutil.NewExclusiveWriter()); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err = result.prepare(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Database) prepare() error {
|
|
||||||
if err := d.events.prepare(d.db, d.writer); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return d.txnID.prepare(d.db, d.writer)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StoreEvent takes in a gomatrixserverlib.HeaderedEvent and stores it in the database
|
|
||||||
// for a transaction worker to pull and later send to an application service.
|
|
||||||
func (d *Database) StoreEvent(
|
|
||||||
ctx context.Context,
|
|
||||||
appServiceID string,
|
|
||||||
event *gomatrixserverlib.HeaderedEvent,
|
|
||||||
) error {
|
|
||||||
return d.events.insertEvent(ctx, appServiceID, event)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetEventsWithAppServiceID returns a slice of events and their IDs intended to
|
|
||||||
// be sent to an application service given its ID.
|
|
||||||
func (d *Database) GetEventsWithAppServiceID(
|
|
||||||
ctx context.Context,
|
|
||||||
appServiceID string,
|
|
||||||
limit int,
|
|
||||||
) (int, int, []gomatrixserverlib.HeaderedEvent, bool, error) {
|
|
||||||
return d.events.selectEventsByApplicationServiceID(ctx, appServiceID, limit)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CountEventsWithAppServiceID returns the number of events destined for an
|
|
||||||
// application service given its ID.
|
|
||||||
func (d *Database) CountEventsWithAppServiceID(
|
|
||||||
ctx context.Context,
|
|
||||||
appServiceID string,
|
|
||||||
) (int, error) {
|
|
||||||
return d.events.countEventsByApplicationServiceID(ctx, appServiceID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateTxnIDForEvents takes in an application service ID and a
|
|
||||||
// and stores them in the DB, unless the pair already exists, in
|
|
||||||
// which case it updates them.
|
|
||||||
func (d *Database) UpdateTxnIDForEvents(
|
|
||||||
ctx context.Context,
|
|
||||||
appserviceID string,
|
|
||||||
maxID, txnID int,
|
|
||||||
) error {
|
|
||||||
return d.events.updateTxnIDForEvents(ctx, appserviceID, maxID, txnID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveEventsBeforeAndIncludingID removes all events from the database that
|
|
||||||
// are less than or equal to a given maximum ID. IDs here are implemented as a
|
|
||||||
// serial, thus this should always delete events in chronological order.
|
|
||||||
func (d *Database) RemoveEventsBeforeAndIncludingID(
|
|
||||||
ctx context.Context,
|
|
||||||
appserviceID string,
|
|
||||||
eventTableID int,
|
|
||||||
) error {
|
|
||||||
return d.events.deleteEventsBeforeAndIncludingID(ctx, appserviceID, eventTableID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLatestTxnID returns the latest available transaction id
|
|
||||||
func (d *Database) GetLatestTxnID(
|
|
||||||
ctx context.Context,
|
|
||||||
) (int, error) {
|
|
||||||
return d.txnID.selectTxnID(ctx)
|
|
||||||
}
|
|
||||||
|
|
@ -1,82 +0,0 @@
|
||||||
// Copyright 2018 New Vector Ltd
|
|
||||||
// Copyright 2019-2020 The Matrix.org Foundation C.I.C.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package sqlite3
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"database/sql"
|
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
const txnIDSchema = `
|
|
||||||
-- Keeps a count of the current transaction ID
|
|
||||||
CREATE TABLE IF NOT EXISTS appservice_counters (
|
|
||||||
name TEXT PRIMARY KEY NOT NULL,
|
|
||||||
last_id INTEGER DEFAULT 1
|
|
||||||
);
|
|
||||||
INSERT OR IGNORE INTO appservice_counters (name, last_id) VALUES('txn_id', 1);
|
|
||||||
`
|
|
||||||
|
|
||||||
const selectTxnIDSQL = `
|
|
||||||
SELECT last_id FROM appservice_counters WHERE name='txn_id'
|
|
||||||
`
|
|
||||||
|
|
||||||
const updateTxnIDSQL = `
|
|
||||||
UPDATE appservice_counters SET last_id=last_id+1 WHERE name='txn_id'
|
|
||||||
`
|
|
||||||
|
|
||||||
type txnStatements struct {
|
|
||||||
db *sql.DB
|
|
||||||
writer sqlutil.Writer
|
|
||||||
selectTxnIDStmt *sql.Stmt
|
|
||||||
updateTxnIDStmt *sql.Stmt
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *txnStatements) prepare(db *sql.DB, writer sqlutil.Writer) (err error) {
|
|
||||||
s.db = db
|
|
||||||
s.writer = writer
|
|
||||||
_, err = db.Exec(txnIDSchema)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.selectTxnIDStmt, err = db.Prepare(selectTxnIDSQL); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.updateTxnIDStmt, err = db.Prepare(updateTxnIDSQL); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// selectTxnID selects the latest ascending transaction ID
|
|
||||||
func (s *txnStatements) selectTxnID(
|
|
||||||
ctx context.Context,
|
|
||||||
) (txnID int, err error) {
|
|
||||||
err = s.writer.Do(s.db, nil, func(txn *sql.Tx) error {
|
|
||||||
err := s.selectTxnIDStmt.QueryRowContext(ctx).Scan(&txnID)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = s.updateTxnIDStmt.ExecContext(ctx)
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
@ -1,40 +0,0 @@
|
||||||
// Copyright 2020 The Matrix.org Foundation C.I.C.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
//go:build !wasm
|
|
||||||
// +build !wasm
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/appservice/storage/postgres"
|
|
||||||
"github.com/matrix-org/dendrite/appservice/storage/sqlite3"
|
|
||||||
"github.com/matrix-org/dendrite/setup/base"
|
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewDatabase opens a new Postgres or Sqlite database (based on dataSourceName scheme)
|
|
||||||
// and sets DB connection parameters
|
|
||||||
func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions) (Database, error) {
|
|
||||||
switch {
|
|
||||||
case dbProperties.ConnectionString.IsSQLite():
|
|
||||||
return sqlite3.NewDatabase(base, dbProperties)
|
|
||||||
case dbProperties.ConnectionString.IsPostgres():
|
|
||||||
return postgres.NewDatabase(base, dbProperties)
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unexpected database type")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,34 +0,0 @@
|
||||||
// Copyright 2020 The Matrix.org Foundation C.I.C.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/appservice/storage/sqlite3"
|
|
||||||
"github.com/matrix-org/dendrite/setup/base"
|
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
|
||||||
)
|
|
||||||
|
|
||||||
func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions) (Database, error) {
|
|
||||||
switch {
|
|
||||||
case dbProperties.ConnectionString.IsSQLite():
|
|
||||||
return sqlite3.NewDatabase(base, dbProperties)
|
|
||||||
case dbProperties.ConnectionString.IsPostgres():
|
|
||||||
return nil, fmt.Errorf("can't use Postgres implementation")
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unexpected database type")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,64 +0,0 @@
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package types
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// AppServiceDeviceID is the AS dummy device ID
|
|
||||||
AppServiceDeviceID = "AS_Device"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ApplicationServiceWorkerState is a type that couples an application service,
|
|
||||||
// a lockable condition as well as some other state variables, allowing the
|
|
||||||
// roomserver to notify appservice workers when there are events ready to send
|
|
||||||
// externally to application services.
|
|
||||||
type ApplicationServiceWorkerState struct {
|
|
||||||
AppService config.ApplicationService
|
|
||||||
Cond *sync.Cond
|
|
||||||
// Events ready to be sent
|
|
||||||
EventsReady bool
|
|
||||||
// Backoff exponent (2^x secs). Max 6, aka 64s.
|
|
||||||
Backoff int
|
|
||||||
}
|
|
||||||
|
|
||||||
// NotifyNewEvents wakes up all waiting goroutines, notifying that events remain
|
|
||||||
// in the event queue for this application service worker.
|
|
||||||
func (a *ApplicationServiceWorkerState) NotifyNewEvents() {
|
|
||||||
a.Cond.L.Lock()
|
|
||||||
a.EventsReady = true
|
|
||||||
a.Cond.Broadcast()
|
|
||||||
a.Cond.L.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// FinishEventProcessing marks all events of this worker as being sent to the
|
|
||||||
// application service.
|
|
||||||
func (a *ApplicationServiceWorkerState) FinishEventProcessing() {
|
|
||||||
a.Cond.L.Lock()
|
|
||||||
a.EventsReady = false
|
|
||||||
a.Cond.L.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitForNewEvents causes the calling goroutine to wait on the worker state's
|
|
||||||
// condition for a broadcast or similar wakeup, if there are no events ready.
|
|
||||||
func (a *ApplicationServiceWorkerState) WaitForNewEvents() {
|
|
||||||
a.Cond.L.Lock()
|
|
||||||
if !a.EventsReady {
|
|
||||||
a.Cond.Wait()
|
|
||||||
}
|
|
||||||
a.Cond.L.Unlock()
|
|
||||||
}
|
|
||||||
|
|
@ -1,236 +0,0 @@
|
||||||
// Copyright 2018 Vector Creations Ltd
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package workers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/appservice/storage"
|
|
||||||
"github.com/matrix-org/dendrite/appservice/types"
|
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// Maximum size of events sent in each transaction.
|
|
||||||
transactionBatchSize = 50
|
|
||||||
)
|
|
||||||
|
|
||||||
// SetupTransactionWorkers spawns a separate goroutine for each application
|
|
||||||
// service. Each of these "workers" handle taking all events intended for their
|
|
||||||
// app service, batch them up into a single transaction (up to a max transaction
|
|
||||||
// size), then send that off to the AS's /transactions/{txnID} endpoint. It also
|
|
||||||
// handles exponentially backing off in case the AS isn't currently available.
|
|
||||||
func SetupTransactionWorkers(
|
|
||||||
client *http.Client,
|
|
||||||
appserviceDB storage.Database,
|
|
||||||
workerStates []types.ApplicationServiceWorkerState,
|
|
||||||
) error {
|
|
||||||
// Create a worker that handles transmitting events to a single homeserver
|
|
||||||
for _, workerState := range workerStates {
|
|
||||||
// Don't create a worker if this AS doesn't want to receive events
|
|
||||||
if workerState.AppService.URL != "" {
|
|
||||||
go worker(client, appserviceDB, workerState)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// worker is a goroutine that sends any queued events to the application service
|
|
||||||
// it is given.
|
|
||||||
func worker(client *http.Client, db storage.Database, ws types.ApplicationServiceWorkerState) {
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"appservice": ws.AppService.ID,
|
|
||||||
}).Info("Starting application service")
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
// Initial check for any leftover events to send from last time
|
|
||||||
eventCount, err := db.CountEventsWithAppServiceID(ctx, ws.AppService.ID)
|
|
||||||
if err != nil {
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"appservice": ws.AppService.ID,
|
|
||||||
}).WithError(err).Fatal("appservice worker unable to read queued events from DB")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if eventCount > 0 {
|
|
||||||
ws.NotifyNewEvents()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Loop forever and keep waiting for more events to send
|
|
||||||
for {
|
|
||||||
// Wait for more events if we've sent all the events in the database
|
|
||||||
ws.WaitForNewEvents()
|
|
||||||
|
|
||||||
// Batch events up into a transaction
|
|
||||||
transactionJSON, txnID, maxEventID, eventsRemaining, err := createTransaction(ctx, db, ws.AppService.ID)
|
|
||||||
if err != nil {
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"appservice": ws.AppService.ID,
|
|
||||||
}).WithError(err).Fatal("appservice worker unable to create transaction")
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send the events off to the application service
|
|
||||||
// Backoff if the application service does not respond
|
|
||||||
err = send(client, ws.AppService, txnID, transactionJSON)
|
|
||||||
if err != nil {
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"appservice": ws.AppService.ID,
|
|
||||||
}).WithError(err).Error("unable to send event")
|
|
||||||
// Backoff
|
|
||||||
backoff(&ws, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// We sent successfully, hooray!
|
|
||||||
ws.Backoff = 0
|
|
||||||
|
|
||||||
// Transactions have a maximum event size, so there may still be some events
|
|
||||||
// left over to send. Keep sending until none are left
|
|
||||||
if !eventsRemaining {
|
|
||||||
ws.FinishEventProcessing()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove sent events from the DB
|
|
||||||
err = db.RemoveEventsBeforeAndIncludingID(ctx, ws.AppService.ID, maxEventID)
|
|
||||||
if err != nil {
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"appservice": ws.AppService.ID,
|
|
||||||
}).WithError(err).Fatal("unable to remove appservice events from the database")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// backoff pauses the calling goroutine for a 2^some backoff exponent seconds
|
|
||||||
func backoff(ws *types.ApplicationServiceWorkerState, err error) {
|
|
||||||
// Calculate how long to backoff for
|
|
||||||
backoffDuration := time.Duration(math.Pow(2, float64(ws.Backoff)))
|
|
||||||
backoffSeconds := time.Second * backoffDuration
|
|
||||||
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"appservice": ws.AppService.ID,
|
|
||||||
}).WithError(err).Warnf("unable to send transactions successfully, backing off for %ds",
|
|
||||||
backoffDuration)
|
|
||||||
|
|
||||||
ws.Backoff++
|
|
||||||
if ws.Backoff > 6 {
|
|
||||||
ws.Backoff = 6
|
|
||||||
}
|
|
||||||
|
|
||||||
// Backoff
|
|
||||||
time.Sleep(backoffSeconds)
|
|
||||||
}
|
|
||||||
|
|
||||||
// createTransaction takes in a slice of AS events, stores them in an AS
|
|
||||||
// transaction, and JSON-encodes the results.
|
|
||||||
func createTransaction(
|
|
||||||
ctx context.Context,
|
|
||||||
db storage.Database,
|
|
||||||
appserviceID string,
|
|
||||||
) (
|
|
||||||
transactionJSON []byte,
|
|
||||||
txnID, maxID int,
|
|
||||||
eventsRemaining bool,
|
|
||||||
err error,
|
|
||||||
) {
|
|
||||||
// Retrieve the latest events from the DB (will return old events if they weren't successfully sent)
|
|
||||||
txnID, maxID, events, eventsRemaining, err := db.GetEventsWithAppServiceID(ctx, appserviceID, transactionBatchSize)
|
|
||||||
if err != nil {
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"appservice": appserviceID,
|
|
||||||
}).WithError(err).Fatalf("appservice worker unable to read queued events from DB")
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if these events do not already have a transaction ID
|
|
||||||
if txnID == -1 {
|
|
||||||
// If not, grab next available ID from the DB
|
|
||||||
txnID, err = db.GetLatestTxnID(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, 0, false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mark new events with current transactionID
|
|
||||||
if err = db.UpdateTxnIDForEvents(ctx, appserviceID, maxID, txnID); err != nil {
|
|
||||||
return nil, 0, 0, false, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var ev []*gomatrixserverlib.HeaderedEvent
|
|
||||||
for i := range events {
|
|
||||||
ev = append(ev, &events[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a transaction and store the events inside
|
|
||||||
transaction := gomatrixserverlib.ApplicationServiceTransaction{
|
|
||||||
Events: gomatrixserverlib.HeaderedToClientEvents(ev, gomatrixserverlib.FormatAll),
|
|
||||||
}
|
|
||||||
|
|
||||||
transactionJSON, err = json.Marshal(transaction)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// send sends events to an application service. Returns an error if an OK was not
|
|
||||||
// received back from the application service or the request timed out.
|
|
||||||
func send(
|
|
||||||
client *http.Client,
|
|
||||||
appservice config.ApplicationService,
|
|
||||||
txnID int,
|
|
||||||
transaction []byte,
|
|
||||||
) (err error) {
|
|
||||||
// PUT a transaction to our AS
|
|
||||||
// https://matrix.org/docs/spec/application_service/r0.1.2#put-matrix-app-v1-transactions-txnid
|
|
||||||
address := fmt.Sprintf("%s/transactions/%d?access_token=%s", appservice.URL, txnID, url.QueryEscape(appservice.HSToken))
|
|
||||||
req, err := http.NewRequest("PUT", address, bytes.NewBuffer(transaction))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
|
||||||
resp, err := client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer checkNamedErr(resp.Body.Close, &err)
|
|
||||||
|
|
||||||
// Check the AS received the events correctly
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
// TODO: Handle non-200 error codes from application services
|
|
||||||
return fmt.Errorf("non-OK status code %d returned from AS", resp.StatusCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkNamedErr calls fn and overwrite err if it was nil and fn returned non-nil
|
|
||||||
func checkNamedErr(fn func() error, err *error) {
|
|
||||||
if e := fn(); e != nil && *err == nil {
|
|
||||||
*err = e
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -643,7 +643,7 @@ fed Inbound federation redacts events from erased users
|
||||||
fme Outbound federation can request missing events
|
fme Outbound federation can request missing events
|
||||||
fme Inbound federation can return missing events for world_readable visibility
|
fme Inbound federation can return missing events for world_readable visibility
|
||||||
fme Inbound federation can return missing events for shared visibility
|
fme Inbound federation can return missing events for shared visibility
|
||||||
fme Inbound federation can return missing events for invite visibility
|
fme Inbound federation can return missing events for invited visibility
|
||||||
fme Inbound federation can return missing events for joined visibility
|
fme Inbound federation can return missing events for joined visibility
|
||||||
fme outliers whose auth_events are in a different room are correctly rejected
|
fme outliers whose auth_events are in a different room are correctly rejected
|
||||||
fbk Outbound federation can backfill events
|
fbk Outbound federation can backfill events
|
||||||
|
|
|
||||||
25
build/docker/Dockerfile.demo-pinecone
Normal file
25
build/docker/Dockerfile.demo-pinecone
Normal file
|
|
@ -0,0 +1,25 @@
|
||||||
|
FROM docker.io/golang:1.19-alpine AS base
|
||||||
|
|
||||||
|
RUN apk --update --no-cache add bash build-base
|
||||||
|
|
||||||
|
WORKDIR /build
|
||||||
|
|
||||||
|
COPY . /build
|
||||||
|
|
||||||
|
RUN mkdir -p bin
|
||||||
|
RUN go build -trimpath -o bin/ ./cmd/dendrite-demo-pinecone
|
||||||
|
RUN go build -trimpath -o bin/ ./cmd/create-account
|
||||||
|
RUN go build -trimpath -o bin/ ./cmd/generate-keys
|
||||||
|
|
||||||
|
FROM alpine:latest
|
||||||
|
LABEL org.opencontainers.image.title="Dendrite (Pinecone demo)"
|
||||||
|
LABEL org.opencontainers.image.description="Next-generation Matrix homeserver written in Go"
|
||||||
|
LABEL org.opencontainers.image.source="https://github.com/matrix-org/dendrite"
|
||||||
|
LABEL org.opencontainers.image.licenses="Apache-2.0"
|
||||||
|
|
||||||
|
COPY --from=base /build/bin/* /usr/bin/
|
||||||
|
|
||||||
|
VOLUME /etc/dendrite
|
||||||
|
WORKDIR /etc/dendrite
|
||||||
|
|
||||||
|
ENTRYPOINT ["/usr/bin/dendrite-demo-pinecone"]
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
FROM docker.io/golang:1.18-alpine AS base
|
FROM docker.io/golang:1.19-alpine AS base
|
||||||
|
|
||||||
RUN apk --update --no-cache add bash build-base
|
RUN apk --update --no-cache add bash build-base
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
FROM docker.io/golang:1.18-alpine AS base
|
FROM docker.io/golang:1.19-alpine AS base
|
||||||
|
|
||||||
RUN apk --update --no-cache add bash build-base
|
RUN apk --update --no-cache add bash build-base
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
#!/bin/bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
cd $(git rev-parse --show-toplevel)
|
cd $(git rev-parse --show-toplevel)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
#!/bin/bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
TAG=${1:-latest}
|
TAG=${1:-latest}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
#!/bin/bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
TAG=${1:-latest}
|
TAG=${1:-latest}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -22,14 +22,16 @@ import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"go.uber.org/atomic"
|
||||||
|
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
"github.com/matrix-org/dendrite/appservice"
|
"github.com/matrix-org/dendrite/appservice"
|
||||||
"github.com/matrix-org/dendrite/clientapi/userutil"
|
"github.com/matrix-org/dendrite/clientapi/userutil"
|
||||||
|
|
@ -45,6 +47,7 @@ import (
|
||||||
"github.com/matrix-org/dendrite/setup/base"
|
"github.com/matrix-org/dendrite/setup/base"
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
"github.com/matrix-org/dendrite/setup/config"
|
||||||
"github.com/matrix-org/dendrite/setup/process"
|
"github.com/matrix-org/dendrite/setup/process"
|
||||||
|
"github.com/matrix-org/dendrite/test"
|
||||||
"github.com/matrix-org/dendrite/userapi"
|
"github.com/matrix-org/dendrite/userapi"
|
||||||
userapiAPI "github.com/matrix-org/dendrite/userapi/api"
|
userapiAPI "github.com/matrix-org/dendrite/userapi/api"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
|
@ -65,6 +68,7 @@ const (
|
||||||
PeerTypeRemote = pineconeRouter.PeerTypeRemote
|
PeerTypeRemote = pineconeRouter.PeerTypeRemote
|
||||||
PeerTypeMulticast = pineconeRouter.PeerTypeMulticast
|
PeerTypeMulticast = pineconeRouter.PeerTypeMulticast
|
||||||
PeerTypeBluetooth = pineconeRouter.PeerTypeBluetooth
|
PeerTypeBluetooth = pineconeRouter.PeerTypeBluetooth
|
||||||
|
PeerTypeBonjour = pineconeRouter.PeerTypeBonjour
|
||||||
)
|
)
|
||||||
|
|
||||||
type DendriteMonolith struct {
|
type DendriteMonolith struct {
|
||||||
|
|
@ -81,6 +85,10 @@ type DendriteMonolith struct {
|
||||||
userAPI userapiAPI.UserInternalAPI
|
userAPI userapiAPI.UserInternalAPI
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *DendriteMonolith) PublicKey() string {
|
||||||
|
return m.PineconeRouter.PublicKey().String()
|
||||||
|
}
|
||||||
|
|
||||||
func (m *DendriteMonolith) BaseURL() string {
|
func (m *DendriteMonolith) BaseURL() string {
|
||||||
return fmt.Sprintf("http://%s", m.listener.Addr().String())
|
return fmt.Sprintf("http://%s", m.listener.Addr().String())
|
||||||
}
|
}
|
||||||
|
|
@ -93,6 +101,20 @@ func (m *DendriteMonolith) SessionCount() int {
|
||||||
return len(m.PineconeQUIC.Protocol("matrix").Sessions())
|
return len(m.PineconeQUIC.Protocol("matrix").Sessions())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *DendriteMonolith) RegisterNetworkInterface(name string, index int, mtu int, up bool, broadcast bool, loopback bool, pointToPoint bool, multicast bool, addrs string) {
|
||||||
|
m.PineconeMulticast.RegisterInterface(pineconeMulticast.InterfaceInfo{
|
||||||
|
Name: name,
|
||||||
|
Index: index,
|
||||||
|
Mtu: mtu,
|
||||||
|
Up: up,
|
||||||
|
Broadcast: broadcast,
|
||||||
|
Loopback: loopback,
|
||||||
|
PointToPoint: pointToPoint,
|
||||||
|
Multicast: multicast,
|
||||||
|
Addrs: addrs,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func (m *DendriteMonolith) SetMulticastEnabled(enabled bool) {
|
func (m *DendriteMonolith) SetMulticastEnabled(enabled bool) {
|
||||||
if enabled {
|
if enabled {
|
||||||
m.PineconeMulticast.Start()
|
m.PineconeMulticast.Start()
|
||||||
|
|
@ -104,7 +126,9 @@ func (m *DendriteMonolith) SetMulticastEnabled(enabled bool) {
|
||||||
|
|
||||||
func (m *DendriteMonolith) SetStaticPeer(uri string) {
|
func (m *DendriteMonolith) SetStaticPeer(uri string) {
|
||||||
m.PineconeManager.RemovePeers()
|
m.PineconeManager.RemovePeers()
|
||||||
m.PineconeManager.AddPeer(strings.TrimSpace(uri))
|
for _, uri := range strings.Split(uri, ",") {
|
||||||
|
m.PineconeManager.AddPeer(strings.TrimSpace(uri))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *DendriteMonolith) DisconnectType(peertype int) {
|
func (m *DendriteMonolith) DisconnectType(peertype int) {
|
||||||
|
|
@ -133,32 +157,21 @@ func (m *DendriteMonolith) Conduit(zone string, peertype int) (*Conduit, error)
|
||||||
go func() {
|
go func() {
|
||||||
conduit.portMutex.Lock()
|
conduit.portMutex.Lock()
|
||||||
defer conduit.portMutex.Unlock()
|
defer conduit.portMutex.Unlock()
|
||||||
loop:
|
|
||||||
for i := 1; i <= 10; i++ {
|
logrus.Errorf("Attempting authenticated connect")
|
||||||
logrus.Errorf("Attempting authenticated connect (attempt %d)", i)
|
var err error
|
||||||
var err error
|
if conduit.port, err = m.PineconeRouter.Connect(
|
||||||
conduit.port, err = m.PineconeRouter.Connect(
|
l,
|
||||||
l,
|
pineconeRouter.ConnectionZone(zone),
|
||||||
pineconeRouter.ConnectionZone(zone),
|
pineconeRouter.ConnectionPeerType(peertype),
|
||||||
pineconeRouter.ConnectionPeerType(peertype),
|
); err != nil {
|
||||||
)
|
logrus.Errorf("Authenticated connect failed: %s", err)
|
||||||
switch err {
|
_ = l.Close()
|
||||||
case io.ErrClosedPipe:
|
_ = r.Close()
|
||||||
logrus.Errorf("Authenticated connect failed due to closed pipe (attempt %d)", i)
|
_ = conduit.Close()
|
||||||
return
|
return
|
||||||
case io.EOF:
|
|
||||||
logrus.Errorf("Authenticated connect failed due to EOF (attempt %d)", i)
|
|
||||||
break loop
|
|
||||||
case nil:
|
|
||||||
logrus.Errorf("Authenticated connect succeeded, connected to port %d (attempt %d)", conduit.port, i)
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
logrus.WithError(err).Errorf("Authenticated connect failed (attempt %d)", i)
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
_ = l.Close()
|
logrus.Infof("Authenticated connect succeeded (port %d)", conduit.port)
|
||||||
_ = r.Close()
|
|
||||||
}()
|
}()
|
||||||
return conduit, nil
|
return conduit, nil
|
||||||
}
|
}
|
||||||
|
|
@ -204,27 +217,45 @@ func (m *DendriteMonolith) RegisterDevice(localpart, deviceID string) (string, e
|
||||||
|
|
||||||
// nolint:gocyclo
|
// nolint:gocyclo
|
||||||
func (m *DendriteMonolith) Start() {
|
func (m *DendriteMonolith) Start() {
|
||||||
var err error
|
|
||||||
var sk ed25519.PrivateKey
|
var sk ed25519.PrivateKey
|
||||||
var pk ed25519.PublicKey
|
var pk ed25519.PublicKey
|
||||||
keyfile := fmt.Sprintf("%s/p2p.key", m.StorageDirectory)
|
|
||||||
if _, err = os.Stat(keyfile); os.IsNotExist(err) {
|
keyfile := filepath.Join(m.StorageDirectory, "p2p.pem")
|
||||||
if pk, sk, err = ed25519.GenerateKey(nil); err != nil {
|
if _, err := os.Stat(keyfile); os.IsNotExist(err) {
|
||||||
panic(err)
|
oldkeyfile := filepath.Join(m.StorageDirectory, "p2p.key")
|
||||||
|
if _, err = os.Stat(oldkeyfile); os.IsNotExist(err) {
|
||||||
|
if err = test.NewMatrixKey(keyfile); err != nil {
|
||||||
|
panic("failed to generate a new PEM key: " + err.Error())
|
||||||
|
}
|
||||||
|
if _, sk, err = config.LoadMatrixKey(keyfile, os.ReadFile); err != nil {
|
||||||
|
panic("failed to load PEM key: " + err.Error())
|
||||||
|
}
|
||||||
|
if len(sk) != ed25519.PrivateKeySize {
|
||||||
|
panic("the private key is not long enough")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if sk, err = os.ReadFile(oldkeyfile); err != nil {
|
||||||
|
panic("failed to read the old private key: " + err.Error())
|
||||||
|
}
|
||||||
|
if len(sk) != ed25519.PrivateKeySize {
|
||||||
|
panic("the private key is not long enough")
|
||||||
|
}
|
||||||
|
if err = test.SaveMatrixKey(keyfile, sk); err != nil {
|
||||||
|
panic("failed to convert the private key to PEM format: " + err.Error())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if err = ioutil.WriteFile(keyfile, sk, 0644); err != nil {
|
} else {
|
||||||
panic(err)
|
if _, sk, err = config.LoadMatrixKey(keyfile, os.ReadFile); err != nil {
|
||||||
}
|
panic("failed to load PEM key: " + err.Error())
|
||||||
} else if err == nil {
|
|
||||||
if sk, err = ioutil.ReadFile(keyfile); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
}
|
||||||
if len(sk) != ed25519.PrivateKeySize {
|
if len(sk) != ed25519.PrivateKeySize {
|
||||||
panic("the private key is not long enough")
|
panic("the private key is not long enough")
|
||||||
}
|
}
|
||||||
pk = sk.Public().(ed25519.PublicKey)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pk = sk.Public().(ed25519.PublicKey)
|
||||||
|
|
||||||
|
var err error
|
||||||
m.listener, err = net.Listen("tcp", "localhost:65432")
|
m.listener, err = net.Listen("tcp", "localhost:65432")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
|
|
@ -236,31 +267,35 @@ func (m *DendriteMonolith) Start() {
|
||||||
m.logger.SetOutput(BindLogger{})
|
m.logger.SetOutput(BindLogger{})
|
||||||
logrus.SetOutput(BindLogger{})
|
logrus.SetOutput(BindLogger{})
|
||||||
|
|
||||||
m.PineconeRouter = pineconeRouter.NewRouter(logrus.WithField("pinecone", "router"), sk, false)
|
m.PineconeRouter = pineconeRouter.NewRouter(logrus.WithField("pinecone", "router"), sk)
|
||||||
m.PineconeQUIC = pineconeSessions.NewSessions(logrus.WithField("pinecone", "sessions"), m.PineconeRouter, []string{"matrix"})
|
m.PineconeQUIC = pineconeSessions.NewSessions(logrus.WithField("pinecone", "sessions"), m.PineconeRouter, []string{"matrix"})
|
||||||
m.PineconeMulticast = pineconeMulticast.NewMulticast(logrus.WithField("pinecone", "multicast"), m.PineconeRouter)
|
m.PineconeMulticast = pineconeMulticast.NewMulticast(logrus.WithField("pinecone", "multicast"), m.PineconeRouter)
|
||||||
m.PineconeManager = pineconeConnections.NewConnectionManager(m.PineconeRouter, nil)
|
m.PineconeManager = pineconeConnections.NewConnectionManager(m.PineconeRouter, nil)
|
||||||
|
|
||||||
prefix := hex.EncodeToString(pk)
|
prefix := hex.EncodeToString(pk)
|
||||||
cfg := &config.Dendrite{}
|
cfg := &config.Dendrite{}
|
||||||
cfg.Defaults(true)
|
cfg.Defaults(config.DefaultOpts{
|
||||||
|
Generate: true,
|
||||||
|
Monolithic: true,
|
||||||
|
})
|
||||||
cfg.Global.ServerName = gomatrixserverlib.ServerName(hex.EncodeToString(pk))
|
cfg.Global.ServerName = gomatrixserverlib.ServerName(hex.EncodeToString(pk))
|
||||||
cfg.Global.PrivateKey = sk
|
cfg.Global.PrivateKey = sk
|
||||||
cfg.Global.KeyID = gomatrixserverlib.KeyID(signing.KeyID)
|
cfg.Global.KeyID = gomatrixserverlib.KeyID(signing.KeyID)
|
||||||
cfg.Global.JetStream.InMemory = true
|
cfg.Global.JetStream.InMemory = false
|
||||||
cfg.Global.JetStream.StoragePath = config.Path(fmt.Sprintf("%s/%s", m.StorageDirectory, prefix))
|
cfg.Global.JetStream.StoragePath = config.Path(filepath.Join(m.CacheDirectory, prefix))
|
||||||
cfg.UserAPI.AccountDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-account.db", m.StorageDirectory, prefix))
|
cfg.UserAPI.AccountDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-account.db", filepath.Join(m.StorageDirectory, prefix)))
|
||||||
cfg.MediaAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/dendrite-p2p-mediaapi.db", m.StorageDirectory))
|
cfg.MediaAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-mediaapi.db", filepath.Join(m.StorageDirectory, prefix)))
|
||||||
cfg.SyncAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-syncapi.db", m.StorageDirectory, prefix))
|
cfg.SyncAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-syncapi.db", filepath.Join(m.StorageDirectory, prefix)))
|
||||||
cfg.RoomServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-roomserver.db", m.StorageDirectory, prefix))
|
cfg.RoomServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-roomserver.db", filepath.Join(m.StorageDirectory, prefix)))
|
||||||
cfg.KeyServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-keyserver.db", m.StorageDirectory, prefix))
|
cfg.KeyServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-keyserver.db", filepath.Join(m.StorageDirectory, prefix)))
|
||||||
cfg.FederationAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-federationsender.db", m.StorageDirectory, prefix))
|
cfg.FederationAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-federationsender.db", filepath.Join(m.StorageDirectory, prefix)))
|
||||||
cfg.AppServiceAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-appservice.db", m.StorageDirectory, prefix))
|
cfg.MediaAPI.BasePath = config.Path(filepath.Join(m.CacheDirectory, "media"))
|
||||||
cfg.MediaAPI.BasePath = config.Path(fmt.Sprintf("%s/media", m.CacheDirectory))
|
cfg.MediaAPI.AbsBasePath = config.Path(filepath.Join(m.CacheDirectory, "media"))
|
||||||
cfg.MediaAPI.AbsBasePath = config.Path(fmt.Sprintf("%s/media", m.CacheDirectory))
|
|
||||||
cfg.MSCs.MSCs = []string{"msc2836", "msc2946"}
|
cfg.MSCs.MSCs = []string{"msc2836", "msc2946"}
|
||||||
cfg.ClientAPI.RegistrationDisabled = false
|
cfg.ClientAPI.RegistrationDisabled = false
|
||||||
cfg.ClientAPI.OpenRegistrationWithoutVerificationEnabled = true
|
cfg.ClientAPI.OpenRegistrationWithoutVerificationEnabled = true
|
||||||
|
cfg.SyncAPI.Fulltext.Enabled = true
|
||||||
|
cfg.SyncAPI.Fulltext.IndexPath = config.Path(filepath.Join(m.CacheDirectory, "search"))
|
||||||
if err = cfg.Derive(); err != nil {
|
if err = cfg.Derive(); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
@ -374,6 +409,7 @@ func (m *DendriteMonolith) Stop() {
|
||||||
const MaxFrameSize = types.MaxFrameSize
|
const MaxFrameSize = types.MaxFrameSize
|
||||||
|
|
||||||
type Conduit struct {
|
type Conduit struct {
|
||||||
|
closed atomic.Bool
|
||||||
conn net.Conn
|
conn net.Conn
|
||||||
port types.SwitchPortID
|
port types.SwitchPortID
|
||||||
portMutex sync.Mutex
|
portMutex sync.Mutex
|
||||||
|
|
@ -386,10 +422,16 @@ func (c *Conduit) Port() int {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Conduit) Read(b []byte) (int, error) {
|
func (c *Conduit) Read(b []byte) (int, error) {
|
||||||
|
if c.closed.Load() {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
return c.conn.Read(b)
|
return c.conn.Read(b)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Conduit) ReadCopy() ([]byte, error) {
|
func (c *Conduit) ReadCopy() ([]byte, error) {
|
||||||
|
if c.closed.Load() {
|
||||||
|
return nil, io.EOF
|
||||||
|
}
|
||||||
var buf [65535 * 2]byte
|
var buf [65535 * 2]byte
|
||||||
n, err := c.conn.Read(buf[:])
|
n, err := c.conn.Read(buf[:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -399,9 +441,16 @@ func (c *Conduit) ReadCopy() ([]byte, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Conduit) Write(b []byte) (int, error) {
|
func (c *Conduit) Write(b []byte) (int, error) {
|
||||||
|
if c.closed.Load() {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
return c.conn.Write(b)
|
return c.conn.Write(b)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Conduit) Close() error {
|
func (c *Conduit) Close() error {
|
||||||
|
if c.closed.Load() {
|
||||||
|
return io.ErrClosedPipe
|
||||||
|
}
|
||||||
|
c.closed.Store(true)
|
||||||
return c.conn.Close()
|
return c.conn.Close()
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -2,10 +2,14 @@ package gobind
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"crypto/ed25519"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
|
|
@ -22,6 +26,7 @@ import (
|
||||||
"github.com/matrix-org/dendrite/setup/base"
|
"github.com/matrix-org/dendrite/setup/base"
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
"github.com/matrix-org/dendrite/setup/config"
|
||||||
"github.com/matrix-org/dendrite/setup/process"
|
"github.com/matrix-org/dendrite/setup/process"
|
||||||
|
"github.com/matrix-org/dendrite/test"
|
||||||
"github.com/matrix-org/dendrite/userapi"
|
"github.com/matrix-org/dendrite/userapi"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
@ -63,28 +68,70 @@ func (m *DendriteMonolith) DisconnectMulticastPeers() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *DendriteMonolith) Start() {
|
func (m *DendriteMonolith) Start() {
|
||||||
|
var pk ed25519.PublicKey
|
||||||
|
var sk ed25519.PrivateKey
|
||||||
|
|
||||||
m.logger = logrus.Logger{
|
m.logger = logrus.Logger{
|
||||||
Out: BindLogger{},
|
Out: BindLogger{},
|
||||||
}
|
}
|
||||||
m.logger.SetOutput(BindLogger{})
|
m.logger.SetOutput(BindLogger{})
|
||||||
logrus.SetOutput(BindLogger{})
|
logrus.SetOutput(BindLogger{})
|
||||||
|
|
||||||
|
keyfile := filepath.Join(m.StorageDirectory, "p2p.pem")
|
||||||
|
if _, err := os.Stat(keyfile); os.IsNotExist(err) {
|
||||||
|
oldkeyfile := filepath.Join(m.StorageDirectory, "p2p.key")
|
||||||
|
if _, err = os.Stat(oldkeyfile); os.IsNotExist(err) {
|
||||||
|
if err = test.NewMatrixKey(keyfile); err != nil {
|
||||||
|
panic("failed to generate a new PEM key: " + err.Error())
|
||||||
|
}
|
||||||
|
if _, sk, err = config.LoadMatrixKey(keyfile, os.ReadFile); err != nil {
|
||||||
|
panic("failed to load PEM key: " + err.Error())
|
||||||
|
}
|
||||||
|
if len(sk) != ed25519.PrivateKeySize {
|
||||||
|
panic("the private key is not long enough")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if sk, err = os.ReadFile(oldkeyfile); err != nil {
|
||||||
|
panic("failed to read the old private key: " + err.Error())
|
||||||
|
}
|
||||||
|
if len(sk) != ed25519.PrivateKeySize {
|
||||||
|
panic("the private key is not long enough")
|
||||||
|
}
|
||||||
|
if err := test.SaveMatrixKey(keyfile, sk); err != nil {
|
||||||
|
panic("failed to convert the private key to PEM format: " + err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
var err error
|
||||||
|
if _, sk, err = config.LoadMatrixKey(keyfile, os.ReadFile); err != nil {
|
||||||
|
panic("failed to load PEM key: " + err.Error())
|
||||||
|
}
|
||||||
|
if len(sk) != ed25519.PrivateKeySize {
|
||||||
|
panic("the private key is not long enough")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pk = sk.Public().(ed25519.PublicKey)
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
m.listener, err = net.Listen("tcp", "localhost:65432")
|
m.listener, err = net.Listen("tcp", "localhost:65432")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ygg, err := yggconn.Setup("dendrite", m.StorageDirectory, "")
|
ygg, err := yggconn.Setup(sk, "dendrite", m.StorageDirectory, "", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
m.YggdrasilNode = ygg
|
m.YggdrasilNode = ygg
|
||||||
|
|
||||||
cfg := &config.Dendrite{}
|
cfg := &config.Dendrite{}
|
||||||
cfg.Defaults(true)
|
cfg.Defaults(config.DefaultOpts{
|
||||||
cfg.Global.ServerName = gomatrixserverlib.ServerName(ygg.DerivedServerName())
|
Generate: true,
|
||||||
cfg.Global.PrivateKey = ygg.PrivateKey()
|
Monolithic: true,
|
||||||
|
})
|
||||||
|
cfg.Global.ServerName = gomatrixserverlib.ServerName(hex.EncodeToString(pk))
|
||||||
|
cfg.Global.PrivateKey = sk
|
||||||
cfg.Global.KeyID = gomatrixserverlib.KeyID(signing.KeyID)
|
cfg.Global.KeyID = gomatrixserverlib.KeyID(signing.KeyID)
|
||||||
cfg.Global.JetStream.StoragePath = config.Path(fmt.Sprintf("%s/", m.StorageDirectory))
|
cfg.Global.JetStream.StoragePath = config.Path(fmt.Sprintf("%s/", m.StorageDirectory))
|
||||||
cfg.Global.JetStream.InMemory = true
|
cfg.Global.JetStream.InMemory = true
|
||||||
|
|
@ -94,7 +141,6 @@ func (m *DendriteMonolith) Start() {
|
||||||
cfg.RoomServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/dendrite-p2p-roomserver.db", m.StorageDirectory))
|
cfg.RoomServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/dendrite-p2p-roomserver.db", m.StorageDirectory))
|
||||||
cfg.KeyServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/dendrite-p2p-keyserver.db", m.StorageDirectory))
|
cfg.KeyServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/dendrite-p2p-keyserver.db", m.StorageDirectory))
|
||||||
cfg.FederationAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/dendrite-p2p-federationsender.db", m.StorageDirectory))
|
cfg.FederationAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/dendrite-p2p-federationsender.db", m.StorageDirectory))
|
||||||
cfg.AppServiceAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/dendrite-p2p-appservice.db", m.StorageDirectory))
|
|
||||||
cfg.MediaAPI.BasePath = config.Path(fmt.Sprintf("%s/tmp", m.StorageDirectory))
|
cfg.MediaAPI.BasePath = config.Path(fmt.Sprintf("%s/tmp", m.StorageDirectory))
|
||||||
cfg.MediaAPI.AbsBasePath = config.Path(fmt.Sprintf("%s/tmp", m.StorageDirectory))
|
cfg.MediaAPI.AbsBasePath = config.Path(fmt.Sprintf("%s/tmp", m.StorageDirectory))
|
||||||
cfg.ClientAPI.RegistrationDisabled = false
|
cfg.ClientAPI.RegistrationDisabled = false
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,5 @@
|
||||||
|
#syntax=docker/dockerfile:1.2
|
||||||
|
|
||||||
FROM golang:1.18-stretch as build
|
FROM golang:1.18-stretch as build
|
||||||
RUN apt-get update && apt-get install -y sqlite3
|
RUN apt-get update && apt-get install -y sqlite3
|
||||||
WORKDIR /build
|
WORKDIR /build
|
||||||
|
|
@ -8,14 +10,12 @@ RUN mkdir /dendrite
|
||||||
|
|
||||||
# Utilise Docker caching when downloading dependencies, this stops us needlessly
|
# Utilise Docker caching when downloading dependencies, this stops us needlessly
|
||||||
# downloading dependencies every time.
|
# downloading dependencies every time.
|
||||||
COPY go.mod .
|
RUN --mount=target=. \
|
||||||
COPY go.sum .
|
--mount=type=cache,target=/go/pkg/mod \
|
||||||
RUN go mod download
|
--mount=type=cache,target=/root/.cache/go-build \
|
||||||
|
go build -o /dendrite ./cmd/generate-config && \
|
||||||
COPY . .
|
go build -o /dendrite ./cmd/generate-keys && \
|
||||||
RUN go build -o /dendrite ./cmd/dendrite-monolith-server
|
go build -o /dendrite ./cmd/dendrite-monolith-server
|
||||||
RUN go build -o /dendrite ./cmd/generate-keys
|
|
||||||
RUN go build -o /dendrite ./cmd/generate-config
|
|
||||||
|
|
||||||
WORKDIR /dendrite
|
WORKDIR /dendrite
|
||||||
RUN ./generate-keys --private-key matrix_key.pem
|
RUN ./generate-keys --private-key matrix_key.pem
|
||||||
|
|
@ -26,7 +26,7 @@ EXPOSE 8008 8448
|
||||||
|
|
||||||
# At runtime, generate TLS cert based on the CA now mounted at /ca
|
# At runtime, generate TLS cert based on the CA now mounted at /ca
|
||||||
# At runtime, replace the SERVER_NAME with what we are told
|
# At runtime, replace the SERVER_NAME with what we are told
|
||||||
CMD ./generate-keys --server $SERVER_NAME --tls-cert server.crt --tls-key server.key --tls-authority-cert /complement/ca/ca.crt --tls-authority-key /complement/ca/ca.key && \
|
CMD ./generate-keys -keysize 1024 --server $SERVER_NAME --tls-cert server.crt --tls-key server.key --tls-authority-cert /complement/ca/ca.crt --tls-authority-key /complement/ca/ca.key && \
|
||||||
./generate-config -server $SERVER_NAME --ci > dendrite.yaml && \
|
./generate-config -server $SERVER_NAME --ci > dendrite.yaml && \
|
||||||
cp /complement/ca/ca.crt /usr/local/share/ca-certificates/ && update-ca-certificates && \
|
cp /complement/ca/ca.crt /usr/local/share/ca-certificates/ && update-ca-certificates && \
|
||||||
./dendrite-monolith-server --really-enable-open-registration --tls-cert server.crt --tls-key server.key --config dendrite.yaml -api=${API:-0}
|
exec ./dendrite-monolith-server --really-enable-open-registration --tls-cert server.crt --tls-key server.key --config dendrite.yaml -api=${API:-0}
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,5 @@
|
||||||
|
#syntax=docker/dockerfile:1.2
|
||||||
|
|
||||||
# A local development Complement dockerfile, to be used with host mounts
|
# A local development Complement dockerfile, to be used with host mounts
|
||||||
# /cache -> Contains the entire dendrite code at Dockerfile build time. Builds binaries but only keeps the generate-* ones. Pre-compilation saves time.
|
# /cache -> Contains the entire dendrite code at Dockerfile build time. Builds binaries but only keeps the generate-* ones. Pre-compilation saves time.
|
||||||
# /dendrite -> Host-mounted sources
|
# /dendrite -> Host-mounted sources
|
||||||
|
|
@ -9,11 +11,10 @@
|
||||||
FROM golang:1.18-stretch
|
FROM golang:1.18-stretch
|
||||||
RUN apt-get update && apt-get install -y sqlite3
|
RUN apt-get update && apt-get install -y sqlite3
|
||||||
|
|
||||||
WORKDIR /runtime
|
|
||||||
|
|
||||||
ENV SERVER_NAME=localhost
|
ENV SERVER_NAME=localhost
|
||||||
EXPOSE 8008 8448
|
EXPOSE 8008 8448
|
||||||
|
|
||||||
|
WORKDIR /runtime
|
||||||
# This script compiles Dendrite for us.
|
# This script compiles Dendrite for us.
|
||||||
RUN echo '\
|
RUN echo '\
|
||||||
#!/bin/bash -eux \n\
|
#!/bin/bash -eux \n\
|
||||||
|
|
@ -29,25 +30,23 @@ RUN echo '\
|
||||||
RUN echo '\
|
RUN echo '\
|
||||||
#!/bin/bash -eu \n\
|
#!/bin/bash -eu \n\
|
||||||
./generate-keys --private-key matrix_key.pem \n\
|
./generate-keys --private-key matrix_key.pem \n\
|
||||||
./generate-keys --server $SERVER_NAME --tls-cert server.crt --tls-key server.key --tls-authority-cert /complement/ca/ca.crt --tls-authority-key /complement/ca/ca.key \n\
|
./generate-keys -keysize 1024 --server $SERVER_NAME --tls-cert server.crt --tls-key server.key --tls-authority-cert /complement/ca/ca.crt --tls-authority-key /complement/ca/ca.key \n\
|
||||||
./generate-config -server $SERVER_NAME --ci > dendrite.yaml \n\
|
./generate-config -server $SERVER_NAME --ci > dendrite.yaml \n\
|
||||||
cp /complement/ca/ca.crt /usr/local/share/ca-certificates/ && update-ca-certificates \n\
|
cp /complement/ca/ca.crt /usr/local/share/ca-certificates/ && update-ca-certificates \n\
|
||||||
./dendrite-monolith-server --really-enable-open-registration --tls-cert server.crt --tls-key server.key --config dendrite.yaml \n\
|
exec ./dendrite-monolith-server --really-enable-open-registration --tls-cert server.crt --tls-key server.key --config dendrite.yaml \n\
|
||||||
' > run.sh && chmod +x run.sh
|
' > run.sh && chmod +x run.sh
|
||||||
|
|
||||||
|
|
||||||
WORKDIR /cache
|
WORKDIR /cache
|
||||||
# Pre-download deps; we don't need to do this if the GOPATH is mounted.
|
|
||||||
COPY go.mod .
|
|
||||||
COPY go.sum .
|
|
||||||
RUN go mod download
|
|
||||||
|
|
||||||
# Build the monolith in /cache - we won't actually use this but will rely on build artifacts to speed
|
# Build the monolith in /cache - we won't actually use this but will rely on build artifacts to speed
|
||||||
# up the real compilation. Build the generate-* binaries in the true /runtime locations.
|
# up the real compilation. Build the generate-* binaries in the true /runtime locations.
|
||||||
# If the generate-* source is changed, this dockerfile needs re-running.
|
# If the generate-* source is changed, this dockerfile needs re-running.
|
||||||
COPY . .
|
RUN --mount=target=. \
|
||||||
RUN go build ./cmd/dendrite-monolith-server && go build -o /runtime ./cmd/generate-keys && go build -o /runtime ./cmd/generate-config
|
--mount=type=cache,target=/go/pkg/mod \
|
||||||
|
--mount=type=cache,target=/root/.cache/go-build \
|
||||||
|
go build -o /runtime ./cmd/generate-config && \
|
||||||
|
go build -o /runtime ./cmd/generate-keys
|
||||||
|
|
||||||
|
|
||||||
WORKDIR /runtime
|
WORKDIR /runtime
|
||||||
CMD /runtime/compile.sh && /runtime/run.sh
|
CMD /runtime/compile.sh && exec /runtime/run.sh
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,5 @@
|
||||||
|
#syntax=docker/dockerfile:1.2
|
||||||
|
|
||||||
FROM golang:1.18-stretch as build
|
FROM golang:1.18-stretch as build
|
||||||
RUN apt-get update && apt-get install -y postgresql
|
RUN apt-get update && apt-get install -y postgresql
|
||||||
WORKDIR /build
|
WORKDIR /build
|
||||||
|
|
@ -26,14 +28,12 @@ RUN mkdir /dendrite
|
||||||
|
|
||||||
# Utilise Docker caching when downloading dependencies, this stops us needlessly
|
# Utilise Docker caching when downloading dependencies, this stops us needlessly
|
||||||
# downloading dependencies every time.
|
# downloading dependencies every time.
|
||||||
COPY go.mod .
|
RUN --mount=target=. \
|
||||||
COPY go.sum .
|
--mount=type=cache,target=/go/pkg/mod \
|
||||||
RUN go mod download
|
--mount=type=cache,target=/root/.cache/go-build \
|
||||||
|
go build -o /dendrite ./cmd/generate-config && \
|
||||||
COPY . .
|
go build -o /dendrite ./cmd/generate-keys && \
|
||||||
RUN go build -o /dendrite ./cmd/dendrite-monolith-server
|
go build -o /dendrite ./cmd/dendrite-monolith-server
|
||||||
RUN go build -o /dendrite ./cmd/generate-keys
|
|
||||||
RUN go build -o /dendrite ./cmd/generate-config
|
|
||||||
|
|
||||||
WORKDIR /dendrite
|
WORKDIR /dendrite
|
||||||
RUN ./generate-keys --private-key matrix_key.pem
|
RUN ./generate-keys --private-key matrix_key.pem
|
||||||
|
|
@ -45,10 +45,9 @@ EXPOSE 8008 8448
|
||||||
|
|
||||||
# At runtime, generate TLS cert based on the CA now mounted at /ca
|
# At runtime, generate TLS cert based on the CA now mounted at /ca
|
||||||
# At runtime, replace the SERVER_NAME with what we are told
|
# At runtime, replace the SERVER_NAME with what we are told
|
||||||
CMD /build/run_postgres.sh && ./generate-keys --server $SERVER_NAME --tls-cert server.crt --tls-key server.key --tls-authority-cert /complement/ca/ca.crt --tls-authority-key /complement/ca/ca.key && \
|
CMD /build/run_postgres.sh && ./generate-keys --keysize 1024 --server $SERVER_NAME --tls-cert server.crt --tls-key server.key --tls-authority-cert /complement/ca/ca.crt --tls-authority-key /complement/ca/ca.key && \
|
||||||
./generate-config -server $SERVER_NAME --ci > dendrite.yaml && \
|
./generate-config -server $SERVER_NAME --ci --db postgresql://postgres@localhost/postgres?sslmode=disable > dendrite.yaml && \
|
||||||
# Replace the connection string with a single postgres DB, using user/db = 'postgres' and no password, bump max_conns
|
# Bump max_open_conns up here in the global database config
|
||||||
sed -i "s%connection_string:.*$%connection_string: postgresql://postgres@localhost/postgres?sslmode=disable%g" dendrite.yaml && \
|
sed -i 's/max_open_conns:.*$/max_open_conns: 1990/g' dendrite.yaml && \
|
||||||
sed -i 's/max_open_conns:.*$/max_open_conns: 100/g' dendrite.yaml && \
|
|
||||||
cp /complement/ca/ca.crt /usr/local/share/ca-certificates/ && update-ca-certificates && \
|
cp /complement/ca/ca.crt /usr/local/share/ca-certificates/ && update-ca-certificates && \
|
||||||
./dendrite-monolith-server --really-enable-open-registration --tls-cert server.crt --tls-key server.key --config dendrite.yaml -api=${API:-0}
|
exec ./dendrite-monolith-server --really-enable-open-registration --tls-cert server.crt --tls-key server.key --config dendrite.yaml -api=${API:-0}
|
||||||
|
|
@ -68,6 +68,12 @@ func (t *LoginTypePassword) Login(ctx context.Context, req interface{}) (*Login,
|
||||||
JSON: jsonerror.BadJSON("A username must be supplied."),
|
JSON: jsonerror.BadJSON("A username must be supplied."),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if len(r.Password) == 0 {
|
||||||
|
return nil, &util.JSONResponse{
|
||||||
|
Code: http.StatusUnauthorized,
|
||||||
|
JSON: jsonerror.BadJSON("A password must be supplied."),
|
||||||
|
}
|
||||||
|
}
|
||||||
localpart, err := userutil.ParseUsernameParam(username, &t.Config.Matrix.ServerName)
|
localpart, err := userutil.ParseUsernameParam(username, &t.Config.Matrix.ServerName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, &util.JSONResponse{
|
return nil, &util.JSONResponse{
|
||||||
|
|
|
||||||
|
|
@ -21,12 +21,13 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
|
||||||
"github.com/matrix-org/dendrite/syncapi/types"
|
|
||||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
"github.com/nats-io/nats.go"
|
"github.com/nats-io/nats.go"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||||
|
"github.com/matrix-org/dendrite/syncapi/types"
|
||||||
|
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SyncAPIProducer produces events for the sync API server to consume
|
// SyncAPIProducer produces events for the sync API server to consume
|
||||||
|
|
@ -61,7 +62,7 @@ func (p *SyncAPIProducer) SendReceipt(
|
||||||
|
|
||||||
func (p *SyncAPIProducer) SendToDevice(
|
func (p *SyncAPIProducer) SendToDevice(
|
||||||
ctx context.Context, sender, userID, deviceID, eventType string,
|
ctx context.Context, sender, userID, deviceID, eventType string,
|
||||||
message interface{},
|
message json.RawMessage,
|
||||||
) error {
|
) error {
|
||||||
devices := []string{}
|
devices := []string{}
|
||||||
_, domain, err := gomatrixserverlib.SplitID('@', userID)
|
_, domain, err := gomatrixserverlib.SplitID('@', userID)
|
||||||
|
|
@ -89,24 +90,19 @@ func (p *SyncAPIProducer) SendToDevice(
|
||||||
devices = append(devices, deviceID)
|
devices = append(devices, deviceID)
|
||||||
}
|
}
|
||||||
|
|
||||||
js, err := json.Marshal(message)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.WithFields(log.Fields{
|
log.WithFields(log.Fields{
|
||||||
"user_id": userID,
|
"user_id": userID,
|
||||||
"num_devices": len(devices),
|
"num_devices": len(devices),
|
||||||
"type": eventType,
|
"type": eventType,
|
||||||
}).Tracef("Producing to topic '%s'", p.TopicSendToDeviceEvent)
|
}).Tracef("Producing to topic '%s'", p.TopicSendToDeviceEvent)
|
||||||
for _, device := range devices {
|
for i, device := range devices {
|
||||||
ote := &types.OutputSendToDeviceEvent{
|
ote := &types.OutputSendToDeviceEvent{
|
||||||
UserID: userID,
|
UserID: userID,
|
||||||
DeviceID: device,
|
DeviceID: device,
|
||||||
SendToDeviceEvent: gomatrixserverlib.SendToDeviceEvent{
|
SendToDeviceEvent: gomatrixserverlib.SendToDeviceEvent{
|
||||||
Sender: sender,
|
Sender: sender,
|
||||||
Type: eventType,
|
Type: eventType,
|
||||||
Content: js,
|
Content: message,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -115,15 +111,17 @@ func (p *SyncAPIProducer) SendToDevice(
|
||||||
log.WithError(err).Error("sendToDevice failed json.Marshal")
|
log.WithError(err).Error("sendToDevice failed json.Marshal")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
m := &nats.Msg{
|
m := nats.NewMsg(p.TopicSendToDeviceEvent)
|
||||||
Subject: p.TopicSendToDeviceEvent,
|
m.Data = eventJSON
|
||||||
Data: eventJSON,
|
|
||||||
Header: nats.Header{},
|
|
||||||
}
|
|
||||||
m.Header.Set("sender", sender)
|
m.Header.Set("sender", sender)
|
||||||
m.Header.Set(jetstream.UserID, userID)
|
m.Header.Set(jetstream.UserID, userID)
|
||||||
|
|
||||||
if _, err = p.JetStream.PublishMsg(m, nats.Context(ctx)); err != nil {
|
if _, err = p.JetStream.PublishMsg(m, nats.Context(ctx)); err != nil {
|
||||||
log.WithError(err).Error("sendToDevice failed t.Producer.SendMessage")
|
if i < len(devices)-1 {
|
||||||
|
log.WithError(err).Warn("sendToDevice failed to PublishMsg, trying further devices")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
log.WithError(err).Error("sendToDevice failed to PublishMsg for all devices")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -154,33 +154,31 @@ func SaveReadMarker(
|
||||||
return *resErr
|
return *resErr
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.FullyRead == "" {
|
if r.FullyRead != "" {
|
||||||
return util.JSONResponse{
|
data, err := json.Marshal(fullyReadEvent{EventID: r.FullyRead})
|
||||||
Code: http.StatusBadRequest,
|
if err != nil {
|
||||||
JSON: jsonerror.BadJSON("Missing m.fully_read mandatory field"),
|
return jsonerror.InternalServerError()
|
||||||
|
}
|
||||||
|
|
||||||
|
dataReq := api.InputAccountDataRequest{
|
||||||
|
UserID: device.UserID,
|
||||||
|
DataType: "m.fully_read",
|
||||||
|
RoomID: roomID,
|
||||||
|
AccountData: data,
|
||||||
|
}
|
||||||
|
dataRes := api.InputAccountDataResponse{}
|
||||||
|
if err := userAPI.InputAccountData(req.Context(), &dataReq, &dataRes); err != nil {
|
||||||
|
util.GetLogger(req.Context()).WithError(err).Error("userAPI.InputAccountData failed")
|
||||||
|
return util.ErrorResponse(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err := json.Marshal(fullyReadEvent{EventID: r.FullyRead})
|
// Handle the read receipts that may be included in the read marker.
|
||||||
if err != nil {
|
|
||||||
return jsonerror.InternalServerError()
|
|
||||||
}
|
|
||||||
|
|
||||||
dataReq := api.InputAccountDataRequest{
|
|
||||||
UserID: device.UserID,
|
|
||||||
DataType: "m.fully_read",
|
|
||||||
RoomID: roomID,
|
|
||||||
AccountData: data,
|
|
||||||
}
|
|
||||||
dataRes := api.InputAccountDataResponse{}
|
|
||||||
if err := userAPI.InputAccountData(req.Context(), &dataReq, &dataRes); err != nil {
|
|
||||||
util.GetLogger(req.Context()).WithError(err).Error("userAPI.InputAccountData failed")
|
|
||||||
return util.ErrorResponse(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle the read receipt that may be included in the read marker
|
|
||||||
if r.Read != "" {
|
if r.Read != "" {
|
||||||
return SetReceipt(req, syncProducer, device, roomID, "m.read", r.Read)
|
return SetReceipt(req, userAPI, syncProducer, device, roomID, "m.read", r.Read)
|
||||||
|
}
|
||||||
|
if r.ReadPrivate != "" {
|
||||||
|
return SetReceipt(req, userAPI, syncProducer, device, roomID, "m.read.private", r.ReadPrivate)
|
||||||
}
|
}
|
||||||
|
|
||||||
return util.JSONResponse{
|
return util.JSONResponse{
|
||||||
|
|
|
||||||
|
|
@ -2,16 +2,23 @@ package routing
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
|
||||||
"github.com/matrix-org/dendrite/internal/httputil"
|
|
||||||
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
|
||||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
"github.com/matrix-org/util"
|
"github.com/matrix-org/util"
|
||||||
|
"github.com/nats-io/nats.go"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||||
|
"github.com/matrix-org/dendrite/internal/httputil"
|
||||||
|
"github.com/matrix-org/dendrite/keyserver/api"
|
||||||
|
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
||||||
|
"github.com/matrix-org/dendrite/setup/config"
|
||||||
|
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||||
|
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
func AdminEvacuateRoom(req *http.Request, cfg *config.ClientAPI, device *userapi.Device, rsAPI roomserverAPI.ClientRoomserverAPI) util.JSONResponse {
|
func AdminEvacuateRoom(req *http.Request, cfg *config.ClientAPI, device *userapi.Device, rsAPI roomserverAPI.ClientRoomserverAPI) util.JSONResponse {
|
||||||
|
|
@ -138,3 +145,49 @@ func AdminResetPassword(req *http.Request, cfg *config.ClientAPI, device *userap
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func AdminReindex(req *http.Request, cfg *config.ClientAPI, device *userapi.Device, natsClient *nats.Conn) util.JSONResponse {
|
||||||
|
_, err := natsClient.RequestMsg(nats.NewMsg(cfg.Matrix.JetStream.Prefixed(jetstream.InputFulltextReindex)), time.Second*10)
|
||||||
|
if err != nil {
|
||||||
|
logrus.WithError(err).Error("failed to publish nats message")
|
||||||
|
return jsonerror.InternalServerError()
|
||||||
|
}
|
||||||
|
return util.JSONResponse{
|
||||||
|
Code: http.StatusOK,
|
||||||
|
JSON: struct{}{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func AdminMarkAsStale(req *http.Request, cfg *config.ClientAPI, keyAPI api.ClientKeyAPI) util.JSONResponse {
|
||||||
|
vars, err := httputil.URLDecodeMapValues(mux.Vars(req))
|
||||||
|
if err != nil {
|
||||||
|
return util.ErrorResponse(err)
|
||||||
|
}
|
||||||
|
userID := vars["userID"]
|
||||||
|
|
||||||
|
_, domain, err := gomatrixserverlib.SplitID('@', userID)
|
||||||
|
if err != nil {
|
||||||
|
return util.MessageResponse(http.StatusBadRequest, err.Error())
|
||||||
|
}
|
||||||
|
if domain == cfg.Matrix.ServerName {
|
||||||
|
return util.JSONResponse{
|
||||||
|
Code: http.StatusBadRequest,
|
||||||
|
JSON: jsonerror.InvalidParam("Can not mark local device list as stale"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = keyAPI.PerformMarkAsStaleIfNeeded(req.Context(), &api.PerformMarkAsStaleRequest{
|
||||||
|
UserID: userID,
|
||||||
|
Domain: domain,
|
||||||
|
}, &struct{}{})
|
||||||
|
if err != nil {
|
||||||
|
return util.JSONResponse{
|
||||||
|
Code: http.StatusInternalServerError,
|
||||||
|
JSON: jsonerror.Unknown(fmt.Sprintf("Failed to mark device list as stale: %s", err)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return util.JSONResponse{
|
||||||
|
Code: http.StatusOK,
|
||||||
|
JSON: struct{}{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -49,6 +49,7 @@ type createRoomRequest struct {
|
||||||
GuestCanJoin bool `json:"guest_can_join"`
|
GuestCanJoin bool `json:"guest_can_join"`
|
||||||
RoomVersion gomatrixserverlib.RoomVersion `json:"room_version"`
|
RoomVersion gomatrixserverlib.RoomVersion `json:"room_version"`
|
||||||
PowerLevelContentOverride json.RawMessage `json:"power_level_content_override"`
|
PowerLevelContentOverride json.RawMessage `json:"power_level_content_override"`
|
||||||
|
IsDirect bool `json:"is_direct"`
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -499,9 +500,17 @@ func createRoom(
|
||||||
// Build some stripped state for the invite.
|
// Build some stripped state for the invite.
|
||||||
var globalStrippedState []gomatrixserverlib.InviteV2StrippedState
|
var globalStrippedState []gomatrixserverlib.InviteV2StrippedState
|
||||||
for _, event := range builtEvents {
|
for _, event := range builtEvents {
|
||||||
|
// Chosen events from the spec:
|
||||||
|
// https://spec.matrix.org/v1.3/client-server-api/#stripped-state
|
||||||
switch event.Type() {
|
switch event.Type() {
|
||||||
|
case gomatrixserverlib.MRoomCreate:
|
||||||
|
fallthrough
|
||||||
case gomatrixserverlib.MRoomName:
|
case gomatrixserverlib.MRoomName:
|
||||||
fallthrough
|
fallthrough
|
||||||
|
case gomatrixserverlib.MRoomAvatar:
|
||||||
|
fallthrough
|
||||||
|
case gomatrixserverlib.MRoomTopic:
|
||||||
|
fallthrough
|
||||||
case gomatrixserverlib.MRoomCanonicalAlias:
|
case gomatrixserverlib.MRoomCanonicalAlias:
|
||||||
fallthrough
|
fallthrough
|
||||||
case gomatrixserverlib.MRoomEncryption:
|
case gomatrixserverlib.MRoomEncryption:
|
||||||
|
|
@ -522,7 +531,7 @@ func createRoom(
|
||||||
// Build the invite event.
|
// Build the invite event.
|
||||||
inviteEvent, err := buildMembershipEvent(
|
inviteEvent, err := buildMembershipEvent(
|
||||||
ctx, invitee, "", profileAPI, device, gomatrixserverlib.Invite,
|
ctx, invitee, "", profileAPI, device, gomatrixserverlib.Invite,
|
||||||
roomID, true, cfg, evTime, rsAPI, asAPI,
|
roomID, r.IsDirect, cfg, evTime, rsAPI, asAPI,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
util.GetLogger(ctx).WithError(err).Error("buildMembershipEvent failed")
|
util.GetLogger(ctx).WithError(err).Error("buildMembershipEvent failed")
|
||||||
|
|
|
||||||
|
|
@ -1,138 +0,0 @@
|
||||||
// Copyright 2019 Alex Chen
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package routing
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
|
||||||
"github.com/matrix-org/dendrite/roomserver/api"
|
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
|
||||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
|
||||||
"github.com/matrix-org/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
type getEventRequest struct {
|
|
||||||
req *http.Request
|
|
||||||
device *userapi.Device
|
|
||||||
roomID string
|
|
||||||
eventID string
|
|
||||||
cfg *config.ClientAPI
|
|
||||||
requestedEvent *gomatrixserverlib.Event
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetEvent implements GET /_matrix/client/r0/rooms/{roomId}/event/{eventId}
|
|
||||||
// https://matrix.org/docs/spec/client_server/r0.4.0.html#get-matrix-client-r0-rooms-roomid-event-eventid
|
|
||||||
func GetEvent(
|
|
||||||
req *http.Request,
|
|
||||||
device *userapi.Device,
|
|
||||||
roomID string,
|
|
||||||
eventID string,
|
|
||||||
cfg *config.ClientAPI,
|
|
||||||
rsAPI api.ClientRoomserverAPI,
|
|
||||||
) util.JSONResponse {
|
|
||||||
eventsReq := api.QueryEventsByIDRequest{
|
|
||||||
EventIDs: []string{eventID},
|
|
||||||
}
|
|
||||||
var eventsResp api.QueryEventsByIDResponse
|
|
||||||
err := rsAPI.QueryEventsByID(req.Context(), &eventsReq, &eventsResp)
|
|
||||||
if err != nil {
|
|
||||||
util.GetLogger(req.Context()).WithError(err).Error("queryAPI.QueryEventsByID failed")
|
|
||||||
return jsonerror.InternalServerError()
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(eventsResp.Events) == 0 {
|
|
||||||
// Event not found locally
|
|
||||||
return util.JSONResponse{
|
|
||||||
Code: http.StatusNotFound,
|
|
||||||
JSON: jsonerror.NotFound("The event was not found or you do not have permission to read this event"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
requestedEvent := eventsResp.Events[0].Event
|
|
||||||
|
|
||||||
r := getEventRequest{
|
|
||||||
req: req,
|
|
||||||
device: device,
|
|
||||||
roomID: roomID,
|
|
||||||
eventID: eventID,
|
|
||||||
cfg: cfg,
|
|
||||||
requestedEvent: requestedEvent,
|
|
||||||
}
|
|
||||||
|
|
||||||
stateReq := api.QueryStateAfterEventsRequest{
|
|
||||||
RoomID: r.requestedEvent.RoomID(),
|
|
||||||
PrevEventIDs: r.requestedEvent.PrevEventIDs(),
|
|
||||||
StateToFetch: []gomatrixserverlib.StateKeyTuple{{
|
|
||||||
EventType: gomatrixserverlib.MRoomMember,
|
|
||||||
StateKey: device.UserID,
|
|
||||||
}},
|
|
||||||
}
|
|
||||||
var stateResp api.QueryStateAfterEventsResponse
|
|
||||||
if err := rsAPI.QueryStateAfterEvents(req.Context(), &stateReq, &stateResp); err != nil {
|
|
||||||
util.GetLogger(req.Context()).WithError(err).Error("queryAPI.QueryStateAfterEvents failed")
|
|
||||||
return jsonerror.InternalServerError()
|
|
||||||
}
|
|
||||||
|
|
||||||
if !stateResp.RoomExists {
|
|
||||||
util.GetLogger(req.Context()).Errorf("Expected to find room for event %s but failed", r.requestedEvent.EventID())
|
|
||||||
return jsonerror.InternalServerError()
|
|
||||||
}
|
|
||||||
|
|
||||||
if !stateResp.PrevEventsExist {
|
|
||||||
// Missing some events locally; stateResp.StateEvents unavailable.
|
|
||||||
return util.JSONResponse{
|
|
||||||
Code: http.StatusNotFound,
|
|
||||||
JSON: jsonerror.NotFound("The event was not found or you do not have permission to read this event"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var appService *config.ApplicationService
|
|
||||||
if device.AppserviceID != "" {
|
|
||||||
for _, as := range cfg.Derived.ApplicationServices {
|
|
||||||
if as.ID == device.AppserviceID {
|
|
||||||
appService = &as
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, stateEvent := range stateResp.StateEvents {
|
|
||||||
if appService != nil {
|
|
||||||
if !appService.IsInterestedInUserID(*stateEvent.StateKey()) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
} else if !stateEvent.StateKeyEquals(device.UserID) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
membership, err := stateEvent.Membership()
|
|
||||||
if err != nil {
|
|
||||||
util.GetLogger(req.Context()).WithError(err).Error("stateEvent.Membership failed")
|
|
||||||
return jsonerror.InternalServerError()
|
|
||||||
}
|
|
||||||
if membership == gomatrixserverlib.Join {
|
|
||||||
return util.JSONResponse{
|
|
||||||
Code: http.StatusOK,
|
|
||||||
JSON: gomatrixserverlib.ToClientEvent(r.requestedEvent, gomatrixserverlib.FormatAll),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return util.JSONResponse{
|
|
||||||
Code: http.StatusNotFound,
|
|
||||||
JSON: jsonerror.NotFound("The event was not found or you do not have permission to read this event"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -19,11 +19,12 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/matrix-org/util"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/clientapi/httputil"
|
"github.com/matrix-org/dendrite/clientapi/httputil"
|
||||||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||||
"github.com/matrix-org/dendrite/keyserver/api"
|
"github.com/matrix-org/dendrite/keyserver/api"
|
||||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||||
"github.com/matrix-org/util"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type uploadKeysRequest struct {
|
type uploadKeysRequest struct {
|
||||||
|
|
@ -77,7 +78,6 @@ func UploadKeys(req *http.Request, keyAPI api.ClientKeyAPI, device *userapi.Devi
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
keyCount := make(map[string]int)
|
keyCount := make(map[string]int)
|
||||||
// we only return key counts when the client uploads OTKs
|
|
||||||
if len(uploadRes.OneTimeKeyCounts) > 0 {
|
if len(uploadRes.OneTimeKeyCounts) > 0 {
|
||||||
keyCount = uploadRes.OneTimeKeyCounts[0].KeyCount
|
keyCount = uploadRes.OneTimeKeyCounts[0].KeyCount
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -19,6 +19,8 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
|
||||||
appserviceAPI "github.com/matrix-org/dendrite/appservice/api"
|
appserviceAPI "github.com/matrix-org/dendrite/appservice/api"
|
||||||
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
|
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
|
||||||
"github.com/matrix-org/dendrite/clientapi/httputil"
|
"github.com/matrix-org/dendrite/clientapi/httputil"
|
||||||
|
|
@ -27,7 +29,6 @@ import (
|
||||||
"github.com/matrix-org/dendrite/roomserver/api"
|
"github.com/matrix-org/dendrite/roomserver/api"
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
"github.com/matrix-org/dendrite/setup/config"
|
||||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
|
||||||
|
|
||||||
"github.com/matrix-org/gomatrix"
|
"github.com/matrix-org/gomatrix"
|
||||||
"github.com/matrix-org/util"
|
"github.com/matrix-org/util"
|
||||||
|
|
@ -126,20 +127,6 @@ func SetAvatarURL(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
res := &userapi.QueryProfileResponse{}
|
|
||||||
err = profileAPI.QueryProfile(req.Context(), &userapi.QueryProfileRequest{
|
|
||||||
UserID: userID,
|
|
||||||
}, res)
|
|
||||||
if err != nil {
|
|
||||||
util.GetLogger(req.Context()).WithError(err).Error("profileAPI.QueryProfile failed")
|
|
||||||
return jsonerror.InternalServerError()
|
|
||||||
}
|
|
||||||
oldProfile := &authtypes.Profile{
|
|
||||||
Localpart: localpart,
|
|
||||||
DisplayName: res.DisplayName,
|
|
||||||
AvatarURL: res.AvatarURL,
|
|
||||||
}
|
|
||||||
|
|
||||||
setRes := &userapi.PerformSetAvatarURLResponse{}
|
setRes := &userapi.PerformSetAvatarURLResponse{}
|
||||||
if err = profileAPI.SetAvatarURL(req.Context(), &userapi.PerformSetAvatarURLRequest{
|
if err = profileAPI.SetAvatarURL(req.Context(), &userapi.PerformSetAvatarURLRequest{
|
||||||
Localpart: localpart,
|
Localpart: localpart,
|
||||||
|
|
@ -148,41 +135,17 @@ func SetAvatarURL(
|
||||||
util.GetLogger(req.Context()).WithError(err).Error("profileAPI.SetAvatarURL failed")
|
util.GetLogger(req.Context()).WithError(err).Error("profileAPI.SetAvatarURL failed")
|
||||||
return jsonerror.InternalServerError()
|
return jsonerror.InternalServerError()
|
||||||
}
|
}
|
||||||
|
// No need to build new membership events, since nothing changed
|
||||||
var roomsRes api.QueryRoomsForUserResponse
|
if !setRes.Changed {
|
||||||
err = rsAPI.QueryRoomsForUser(req.Context(), &api.QueryRoomsForUserRequest{
|
|
||||||
UserID: device.UserID,
|
|
||||||
WantMembership: "join",
|
|
||||||
}, &roomsRes)
|
|
||||||
if err != nil {
|
|
||||||
util.GetLogger(req.Context()).WithError(err).Error("QueryRoomsForUser failed")
|
|
||||||
return jsonerror.InternalServerError()
|
|
||||||
}
|
|
||||||
|
|
||||||
newProfile := authtypes.Profile{
|
|
||||||
Localpart: localpart,
|
|
||||||
DisplayName: oldProfile.DisplayName,
|
|
||||||
AvatarURL: r.AvatarURL,
|
|
||||||
}
|
|
||||||
|
|
||||||
events, err := buildMembershipEvents(
|
|
||||||
req.Context(), roomsRes.RoomIDs, newProfile, userID, cfg, evTime, rsAPI,
|
|
||||||
)
|
|
||||||
switch e := err.(type) {
|
|
||||||
case nil:
|
|
||||||
case gomatrixserverlib.BadJSONError:
|
|
||||||
return util.JSONResponse{
|
return util.JSONResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusOK,
|
||||||
JSON: jsonerror.BadJSON(e.Error()),
|
JSON: struct{}{},
|
||||||
}
|
}
|
||||||
default:
|
|
||||||
util.GetLogger(req.Context()).WithError(err).Error("buildMembershipEvents failed")
|
|
||||||
return jsonerror.InternalServerError()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := api.SendEvents(req.Context(), rsAPI, api.KindNew, events, cfg.Matrix.ServerName, cfg.Matrix.ServerName, nil, true); err != nil {
|
response, err := updateProfile(req.Context(), rsAPI, device, setRes.Profile, userID, cfg, evTime)
|
||||||
util.GetLogger(req.Context()).WithError(err).Error("SendEvents failed")
|
if err != nil {
|
||||||
return jsonerror.InternalServerError()
|
return response
|
||||||
}
|
}
|
||||||
|
|
||||||
return util.JSONResponse{
|
return util.JSONResponse{
|
||||||
|
|
@ -255,47 +218,51 @@ func SetDisplayName(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pRes := &userapi.QueryProfileResponse{}
|
profileRes := &userapi.PerformUpdateDisplayNameResponse{}
|
||||||
err = profileAPI.QueryProfile(req.Context(), &userapi.QueryProfileRequest{
|
|
||||||
UserID: userID,
|
|
||||||
}, pRes)
|
|
||||||
if err != nil {
|
|
||||||
util.GetLogger(req.Context()).WithError(err).Error("profileAPI.QueryProfile failed")
|
|
||||||
return jsonerror.InternalServerError()
|
|
||||||
}
|
|
||||||
oldProfile := &authtypes.Profile{
|
|
||||||
Localpart: localpart,
|
|
||||||
DisplayName: pRes.DisplayName,
|
|
||||||
AvatarURL: pRes.AvatarURL,
|
|
||||||
}
|
|
||||||
|
|
||||||
err = profileAPI.SetDisplayName(req.Context(), &userapi.PerformUpdateDisplayNameRequest{
|
err = profileAPI.SetDisplayName(req.Context(), &userapi.PerformUpdateDisplayNameRequest{
|
||||||
Localpart: localpart,
|
Localpart: localpart,
|
||||||
DisplayName: r.DisplayName,
|
DisplayName: r.DisplayName,
|
||||||
}, &struct{}{})
|
}, profileRes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
util.GetLogger(req.Context()).WithError(err).Error("profileAPI.SetDisplayName failed")
|
util.GetLogger(req.Context()).WithError(err).Error("profileAPI.SetDisplayName failed")
|
||||||
return jsonerror.InternalServerError()
|
return jsonerror.InternalServerError()
|
||||||
}
|
}
|
||||||
|
// No need to build new membership events, since nothing changed
|
||||||
|
if !profileRes.Changed {
|
||||||
|
return util.JSONResponse{
|
||||||
|
Code: http.StatusOK,
|
||||||
|
JSON: struct{}{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := updateProfile(req.Context(), rsAPI, device, profileRes.Profile, userID, cfg, evTime)
|
||||||
|
if err != nil {
|
||||||
|
return response
|
||||||
|
}
|
||||||
|
|
||||||
|
return util.JSONResponse{
|
||||||
|
Code: http.StatusOK,
|
||||||
|
JSON: struct{}{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateProfile(
|
||||||
|
ctx context.Context, rsAPI api.ClientRoomserverAPI, device *userapi.Device,
|
||||||
|
profile *authtypes.Profile,
|
||||||
|
userID string, cfg *config.ClientAPI, evTime time.Time,
|
||||||
|
) (util.JSONResponse, error) {
|
||||||
var res api.QueryRoomsForUserResponse
|
var res api.QueryRoomsForUserResponse
|
||||||
err = rsAPI.QueryRoomsForUser(req.Context(), &api.QueryRoomsForUserRequest{
|
err := rsAPI.QueryRoomsForUser(ctx, &api.QueryRoomsForUserRequest{
|
||||||
UserID: device.UserID,
|
UserID: device.UserID,
|
||||||
WantMembership: "join",
|
WantMembership: "join",
|
||||||
}, &res)
|
}, &res)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
util.GetLogger(req.Context()).WithError(err).Error("QueryRoomsForUser failed")
|
util.GetLogger(ctx).WithError(err).Error("QueryRoomsForUser failed")
|
||||||
return jsonerror.InternalServerError()
|
return jsonerror.InternalServerError(), err
|
||||||
}
|
|
||||||
|
|
||||||
newProfile := authtypes.Profile{
|
|
||||||
Localpart: localpart,
|
|
||||||
DisplayName: r.DisplayName,
|
|
||||||
AvatarURL: oldProfile.AvatarURL,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
events, err := buildMembershipEvents(
|
events, err := buildMembershipEvents(
|
||||||
req.Context(), res.RoomIDs, newProfile, userID, cfg, evTime, rsAPI,
|
ctx, res.RoomIDs, *profile, userID, cfg, evTime, rsAPI,
|
||||||
)
|
)
|
||||||
switch e := err.(type) {
|
switch e := err.(type) {
|
||||||
case nil:
|
case nil:
|
||||||
|
|
@ -303,21 +270,17 @@ func SetDisplayName(
|
||||||
return util.JSONResponse{
|
return util.JSONResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
JSON: jsonerror.BadJSON(e.Error()),
|
JSON: jsonerror.BadJSON(e.Error()),
|
||||||
}
|
}, e
|
||||||
default:
|
default:
|
||||||
util.GetLogger(req.Context()).WithError(err).Error("buildMembershipEvents failed")
|
util.GetLogger(ctx).WithError(err).Error("buildMembershipEvents failed")
|
||||||
return jsonerror.InternalServerError()
|
return jsonerror.InternalServerError(), e
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := api.SendEvents(req.Context(), rsAPI, api.KindNew, events, cfg.Matrix.ServerName, cfg.Matrix.ServerName, nil, true); err != nil {
|
if err := api.SendEvents(ctx, rsAPI, api.KindNew, events, cfg.Matrix.ServerName, cfg.Matrix.ServerName, nil, true); err != nil {
|
||||||
util.GetLogger(req.Context()).WithError(err).Error("SendEvents failed")
|
util.GetLogger(ctx).WithError(err).Error("SendEvents failed")
|
||||||
return jsonerror.InternalServerError()
|
return jsonerror.InternalServerError(), err
|
||||||
}
|
|
||||||
|
|
||||||
return util.JSONResponse{
|
|
||||||
Code: http.StatusOK,
|
|
||||||
JSON: struct{}{},
|
|
||||||
}
|
}
|
||||||
|
return util.JSONResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getProfile gets the full profile of a user by querying the database or a
|
// getProfile gets the full profile of a user by querying the database or a
|
||||||
|
|
|
||||||
|
|
@ -15,19 +15,22 @@
|
||||||
package routing
|
package routing
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||||
"github.com/matrix-org/dendrite/clientapi/producers"
|
"github.com/matrix-org/dendrite/clientapi/producers"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
|
||||||
|
"github.com/matrix-org/dendrite/userapi/api"
|
||||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||||
"github.com/matrix-org/util"
|
"github.com/matrix-org/util"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
func SetReceipt(req *http.Request, syncProducer *producers.SyncAPIProducer, device *userapi.Device, roomID, receiptType, eventID string) util.JSONResponse {
|
func SetReceipt(req *http.Request, userAPI api.ClientUserAPI, syncProducer *producers.SyncAPIProducer, device *userapi.Device, roomID, receiptType, eventID string) util.JSONResponse {
|
||||||
timestamp := gomatrixserverlib.AsTimestamp(time.Now())
|
timestamp := gomatrixserverlib.AsTimestamp(time.Now())
|
||||||
logrus.WithFields(logrus.Fields{
|
logrus.WithFields(logrus.Fields{
|
||||||
"roomID": roomID,
|
"roomID": roomID,
|
||||||
|
|
@ -37,13 +40,32 @@ func SetReceipt(req *http.Request, syncProducer *producers.SyncAPIProducer, devi
|
||||||
"timestamp": timestamp,
|
"timestamp": timestamp,
|
||||||
}).Debug("Setting receipt")
|
}).Debug("Setting receipt")
|
||||||
|
|
||||||
// currently only m.read is accepted
|
switch receiptType {
|
||||||
if receiptType != "m.read" {
|
case "m.read", "m.read.private":
|
||||||
return util.MessageResponse(400, fmt.Sprintf("receipt type must be m.read not '%s'", receiptType))
|
if err := syncProducer.SendReceipt(req.Context(), device.UserID, roomID, eventID, receiptType, timestamp); err != nil {
|
||||||
}
|
return util.ErrorResponse(err)
|
||||||
|
}
|
||||||
|
|
||||||
if err := syncProducer.SendReceipt(req.Context(), device.UserID, roomID, eventID, receiptType, timestamp); err != nil {
|
case "m.fully_read":
|
||||||
return util.ErrorResponse(err)
|
data, err := json.Marshal(fullyReadEvent{EventID: eventID})
|
||||||
|
if err != nil {
|
||||||
|
return jsonerror.InternalServerError()
|
||||||
|
}
|
||||||
|
|
||||||
|
dataReq := api.InputAccountDataRequest{
|
||||||
|
UserID: device.UserID,
|
||||||
|
DataType: "m.fully_read",
|
||||||
|
RoomID: roomID,
|
||||||
|
AccountData: data,
|
||||||
|
}
|
||||||
|
dataRes := api.InputAccountDataResponse{}
|
||||||
|
if err := userAPI.InputAccountData(req.Context(), &dataReq, &dataRes); err != nil {
|
||||||
|
util.GetLogger(req.Context()).WithError(err).Error("userAPI.InputAccountData failed")
|
||||||
|
return util.ErrorResponse(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return util.MessageResponse(400, fmt.Sprintf("Receipt type '%s' not known", receiptType))
|
||||||
}
|
}
|
||||||
|
|
||||||
return util.JSONResponse{
|
return util.JSONResponse{
|
||||||
|
|
|
||||||
|
|
@ -19,6 +19,9 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
"github.com/matrix-org/util"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/clientapi/httputil"
|
"github.com/matrix-org/dendrite/clientapi/httputil"
|
||||||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||||
"github.com/matrix-org/dendrite/internal/eventutil"
|
"github.com/matrix-org/dendrite/internal/eventutil"
|
||||||
|
|
@ -26,8 +29,6 @@ import (
|
||||||
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
"github.com/matrix-org/dendrite/setup/config"
|
||||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
|
||||||
"github.com/matrix-org/util"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type redactionContent struct {
|
type redactionContent struct {
|
||||||
|
|
@ -51,7 +52,7 @@ func SendRedaction(
|
||||||
|
|
||||||
if txnID != nil {
|
if txnID != nil {
|
||||||
// Try to fetch response from transactionsCache
|
// Try to fetch response from transactionsCache
|
||||||
if res, ok := txnCache.FetchTransaction(device.AccessToken, *txnID); ok {
|
if res, ok := txnCache.FetchTransaction(device.AccessToken, *txnID, req.URL); ok {
|
||||||
return *res
|
return *res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -144,7 +145,7 @@ func SendRedaction(
|
||||||
|
|
||||||
// Add response to transactionsCache
|
// Add response to transactionsCache
|
||||||
if txnID != nil {
|
if txnID != nil {
|
||||||
txnCache.AddTransaction(device.AccessToken, *txnID, &res)
|
txnCache.AddTransaction(device.AccessToken, *txnID, req.URL, &res)
|
||||||
}
|
}
|
||||||
|
|
||||||
return res
|
return res
|
||||||
|
|
|
||||||
|
|
@ -293,19 +293,19 @@ type recaptchaResponse struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateUsername returns an error response if the username is invalid
|
// validateUsername returns an error response if the username is invalid
|
||||||
func validateUsername(username string) *util.JSONResponse {
|
func validateUsername(localpart string, domain gomatrixserverlib.ServerName) *util.JSONResponse {
|
||||||
// https://github.com/matrix-org/synapse/blob/v0.20.0/synapse/rest/client/v2_alpha/register.py#L161
|
// https://github.com/matrix-org/synapse/blob/v0.20.0/synapse/rest/client/v2_alpha/register.py#L161
|
||||||
if len(username) > maxUsernameLength {
|
if id := fmt.Sprintf("@%s:%s", localpart, domain); len(id) > maxUsernameLength {
|
||||||
return &util.JSONResponse{
|
return &util.JSONResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
JSON: jsonerror.BadJSON(fmt.Sprintf("'username' >%d characters", maxUsernameLength)),
|
JSON: jsonerror.BadJSON(fmt.Sprintf("%q exceeds the maximum length of %d characters", id, maxUsernameLength)),
|
||||||
}
|
}
|
||||||
} else if !validUsernameRegex.MatchString(username) {
|
} else if !validUsernameRegex.MatchString(localpart) {
|
||||||
return &util.JSONResponse{
|
return &util.JSONResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
JSON: jsonerror.InvalidUsername("Username can only contain characters a-z, 0-9, or '_-./='"),
|
JSON: jsonerror.InvalidUsername("Username can only contain characters a-z, 0-9, or '_-./='"),
|
||||||
}
|
}
|
||||||
} else if username[0] == '_' { // Regex checks its not a zero length string
|
} else if localpart[0] == '_' { // Regex checks its not a zero length string
|
||||||
return &util.JSONResponse{
|
return &util.JSONResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
JSON: jsonerror.InvalidUsername("Username cannot start with a '_'"),
|
JSON: jsonerror.InvalidUsername("Username cannot start with a '_'"),
|
||||||
|
|
@ -315,13 +315,13 @@ func validateUsername(username string) *util.JSONResponse {
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateApplicationServiceUsername returns an error response if the username is invalid for an application service
|
// validateApplicationServiceUsername returns an error response if the username is invalid for an application service
|
||||||
func validateApplicationServiceUsername(username string) *util.JSONResponse {
|
func validateApplicationServiceUsername(localpart string, domain gomatrixserverlib.ServerName) *util.JSONResponse {
|
||||||
if len(username) > maxUsernameLength {
|
if id := fmt.Sprintf("@%s:%s", localpart, domain); len(id) > maxUsernameLength {
|
||||||
return &util.JSONResponse{
|
return &util.JSONResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
JSON: jsonerror.BadJSON(fmt.Sprintf("'username' >%d characters", maxUsernameLength)),
|
JSON: jsonerror.BadJSON(fmt.Sprintf("%q exceeds the maximum length of %d characters", id, maxUsernameLength)),
|
||||||
}
|
}
|
||||||
} else if !validUsernameRegex.MatchString(username) {
|
} else if !validUsernameRegex.MatchString(localpart) {
|
||||||
return &util.JSONResponse{
|
return &util.JSONResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
JSON: jsonerror.InvalidUsername("Username can only contain characters a-z, 0-9, or '_-./='"),
|
JSON: jsonerror.InvalidUsername("Username can only contain characters a-z, 0-9, or '_-./='"),
|
||||||
|
|
@ -540,7 +540,7 @@ func validateApplicationService(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check username application service is trying to register is valid
|
// Check username application service is trying to register is valid
|
||||||
if err := validateApplicationServiceUsername(username); err != nil {
|
if err := validateApplicationServiceUsername(username, cfg.Matrix.ServerName); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -621,7 +621,7 @@ func Register(
|
||||||
case r.Type == authtypes.LoginTypeApplicationService && accessTokenErr == nil:
|
case r.Type == authtypes.LoginTypeApplicationService && accessTokenErr == nil:
|
||||||
// Spec-compliant case (the access_token is specified and the login type
|
// Spec-compliant case (the access_token is specified and the login type
|
||||||
// is correctly set, so it's an appservice registration)
|
// is correctly set, so it's an appservice registration)
|
||||||
if resErr := validateApplicationServiceUsername(r.Username); resErr != nil {
|
if resErr := validateApplicationServiceUsername(r.Username, cfg.Matrix.ServerName); resErr != nil {
|
||||||
return *resErr
|
return *resErr
|
||||||
}
|
}
|
||||||
case accessTokenErr == nil:
|
case accessTokenErr == nil:
|
||||||
|
|
@ -638,7 +638,7 @@ func Register(
|
||||||
default:
|
default:
|
||||||
// Spec-compliant case (neither the access_token nor the login type are
|
// Spec-compliant case (neither the access_token nor the login type are
|
||||||
// specified, so it's a normal user registration)
|
// specified, so it's a normal user registration)
|
||||||
if resErr := validateUsername(r.Username); resErr != nil {
|
if resErr := validateUsername(r.Username, cfg.Matrix.ServerName); resErr != nil {
|
||||||
return *resErr
|
return *resErr
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -1050,7 +1050,7 @@ func RegisterAvailable(
|
||||||
// Squash username to all lowercase letters
|
// Squash username to all lowercase letters
|
||||||
username = strings.ToLower(username)
|
username = strings.ToLower(username)
|
||||||
|
|
||||||
if err := validateUsername(username); err != nil {
|
if err := validateUsername(username, cfg.Matrix.ServerName); err != nil {
|
||||||
return *err
|
return *err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1091,7 +1091,7 @@ func RegisterAvailable(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleSharedSecretRegistration(userAPI userapi.ClientUserAPI, sr *SharedSecretRegistration, req *http.Request) util.JSONResponse {
|
func handleSharedSecretRegistration(cfg *config.ClientAPI, userAPI userapi.ClientUserAPI, sr *SharedSecretRegistration, req *http.Request) util.JSONResponse {
|
||||||
ssrr, err := NewSharedSecretRegistrationRequest(req.Body)
|
ssrr, err := NewSharedSecretRegistrationRequest(req.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return util.JSONResponse{
|
return util.JSONResponse{
|
||||||
|
|
@ -1112,7 +1112,7 @@ func handleSharedSecretRegistration(userAPI userapi.ClientUserAPI, sr *SharedSec
|
||||||
// downcase capitals
|
// downcase capitals
|
||||||
ssrr.User = strings.ToLower(ssrr.User)
|
ssrr.User = strings.ToLower(ssrr.User)
|
||||||
|
|
||||||
if resErr := validateUsername(ssrr.User); resErr != nil {
|
if resErr := validateUsername(ssrr.User, cfg.Matrix.ServerName); resErr != nil {
|
||||||
return *resErr
|
return *resErr
|
||||||
}
|
}
|
||||||
if resErr := validatePassword(ssrr.Password); resErr != nil {
|
if resErr := validatePassword(ssrr.Password); resErr != nil {
|
||||||
|
|
|
||||||
|
|
@ -181,7 +181,10 @@ func TestValidationOfApplicationServices(t *testing.T) {
|
||||||
|
|
||||||
// Set up a config
|
// Set up a config
|
||||||
fakeConfig := &config.Dendrite{}
|
fakeConfig := &config.Dendrite{}
|
||||||
fakeConfig.Defaults(true)
|
fakeConfig.Defaults(config.DefaultOpts{
|
||||||
|
Generate: true,
|
||||||
|
Monolithic: true,
|
||||||
|
})
|
||||||
fakeConfig.Global.ServerName = "localhost"
|
fakeConfig.Global.ServerName = "localhost"
|
||||||
fakeConfig.ClientAPI.Derived.ApplicationServices = []config.ApplicationService{fakeApplicationService}
|
fakeConfig.ClientAPI.Derived.ApplicationServices = []config.ApplicationService{fakeApplicationService}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -20,6 +20,12 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
"github.com/matrix-org/util"
|
||||||
|
"github.com/nats-io/nats.go"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
appserviceAPI "github.com/matrix-org/dendrite/appservice/api"
|
appserviceAPI "github.com/matrix-org/dendrite/appservice/api"
|
||||||
"github.com/matrix-org/dendrite/clientapi/api"
|
"github.com/matrix-org/dendrite/clientapi/api"
|
||||||
"github.com/matrix-org/dendrite/clientapi/auth"
|
"github.com/matrix-org/dendrite/clientapi/auth"
|
||||||
|
|
@ -34,11 +40,6 @@ import (
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
"github.com/matrix-org/dendrite/setup/config"
|
||||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
|
||||||
"github.com/matrix-org/util"
|
|
||||||
"github.com/nats-io/nats.go"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Setup registers HTTP handlers with the given ServeMux. It also supplies the given http.Client
|
// Setup registers HTTP handlers with the given ServeMux. It also supplies the given http.Client
|
||||||
|
|
@ -69,6 +70,7 @@ func Setup(
|
||||||
|
|
||||||
unstableFeatures := map[string]bool{
|
unstableFeatures := map[string]bool{
|
||||||
"org.matrix.e2e_cross_signing": true,
|
"org.matrix.e2e_cross_signing": true,
|
||||||
|
"org.matrix.msc2285.stable": true,
|
||||||
}
|
}
|
||||||
for _, msc := range cfg.MSCs.MSCs {
|
for _, msc := range cfg.MSCs.MSCs {
|
||||||
unstableFeatures["org.matrix."+msc] = true
|
unstableFeatures["org.matrix."+msc] = true
|
||||||
|
|
@ -133,7 +135,7 @@ func Setup(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if req.Method == http.MethodPost {
|
if req.Method == http.MethodPost {
|
||||||
return handleSharedSecretRegistration(userAPI, sr, req)
|
return handleSharedSecretRegistration(cfg, userAPI, sr, req)
|
||||||
}
|
}
|
||||||
return util.JSONResponse{
|
return util.JSONResponse{
|
||||||
Code: http.StatusMethodNotAllowed,
|
Code: http.StatusMethodNotAllowed,
|
||||||
|
|
@ -161,10 +163,22 @@ func Setup(
|
||||||
}),
|
}),
|
||||||
).Methods(http.MethodPost, http.MethodOptions)
|
).Methods(http.MethodPost, http.MethodOptions)
|
||||||
|
|
||||||
|
dendriteAdminRouter.Handle("/admin/fulltext/reindex",
|
||||||
|
httputil.MakeAdminAPI("admin_fultext_reindex", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||||
|
return AdminReindex(req, cfg, device, natsClient)
|
||||||
|
}),
|
||||||
|
).Methods(http.MethodGet, http.MethodOptions)
|
||||||
|
|
||||||
|
dendriteAdminRouter.Handle("/admin/refreshDevices/{userID}",
|
||||||
|
httputil.MakeAdminAPI("admin_refresh_devices", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||||
|
return AdminMarkAsStale(req, cfg, keyAPI)
|
||||||
|
}),
|
||||||
|
).Methods(http.MethodPost, http.MethodOptions)
|
||||||
|
|
||||||
// server notifications
|
// server notifications
|
||||||
if cfg.Matrix.ServerNotices.Enabled {
|
if cfg.Matrix.ServerNotices.Enabled {
|
||||||
logrus.Info("Enabling server notices at /_synapse/admin/v1/send_server_notice")
|
logrus.Info("Enabling server notices at /_synapse/admin/v1/send_server_notice")
|
||||||
serverNotificationSender, err := getSenderDevice(context.Background(), userAPI, cfg)
|
serverNotificationSender, err := getSenderDevice(context.Background(), rsAPI, userAPI, cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.WithError(err).Fatal("unable to get account for sending sending server notices")
|
logrus.WithError(err).Fatal("unable to get account for sending sending server notices")
|
||||||
}
|
}
|
||||||
|
|
@ -354,15 +368,6 @@ func Setup(
|
||||||
nil, cfg, rsAPI, transactionsCache)
|
nil, cfg, rsAPI, transactionsCache)
|
||||||
}),
|
}),
|
||||||
).Methods(http.MethodPut, http.MethodOptions)
|
).Methods(http.MethodPut, http.MethodOptions)
|
||||||
v3mux.Handle("/rooms/{roomID}/event/{eventID}",
|
|
||||||
httputil.MakeAuthAPI("rooms_get_event", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
|
||||||
vars, err := httputil.URLDecodeMapValues(mux.Vars(req))
|
|
||||||
if err != nil {
|
|
||||||
return util.ErrorResponse(err)
|
|
||||||
}
|
|
||||||
return GetEvent(req, device, vars["roomID"], vars["eventID"], cfg, rsAPI)
|
|
||||||
}),
|
|
||||||
).Methods(http.MethodGet, http.MethodOptions)
|
|
||||||
|
|
||||||
v3mux.Handle("/rooms/{roomID}/state", httputil.MakeAuthAPI("room_state", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
v3mux.Handle("/rooms/{roomID}/state", httputil.MakeAuthAPI("room_state", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||||
vars, err := httputil.URLDecodeMapValues(mux.Vars(req))
|
vars, err := httputil.URLDecodeMapValues(mux.Vars(req))
|
||||||
|
|
@ -1339,7 +1344,7 @@ func Setup(
|
||||||
return util.ErrorResponse(err)
|
return util.ErrorResponse(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return SetReceipt(req, syncProducer, device, vars["roomId"], vars["receiptType"], vars["eventId"])
|
return SetReceipt(req, userAPI, syncProducer, device, vars["roomId"], vars["receiptType"], vars["eventId"])
|
||||||
}),
|
}),
|
||||||
).Methods(http.MethodPost, http.MethodOptions)
|
).Methods(http.MethodPost, http.MethodOptions)
|
||||||
v3mux.Handle("/presence/{userId}/status",
|
v3mux.Handle("/presence/{userId}/status",
|
||||||
|
|
|
||||||
|
|
@ -86,7 +86,7 @@ func SendEvent(
|
||||||
|
|
||||||
if txnID != nil {
|
if txnID != nil {
|
||||||
// Try to fetch response from transactionsCache
|
// Try to fetch response from transactionsCache
|
||||||
if res, ok := txnCache.FetchTransaction(device.AccessToken, *txnID); ok {
|
if res, ok := txnCache.FetchTransaction(device.AccessToken, *txnID, req.URL); ok {
|
||||||
return *res
|
return *res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -206,7 +206,7 @@ func SendEvent(
|
||||||
}
|
}
|
||||||
// Add response to transactionsCache
|
// Add response to transactionsCache
|
||||||
if txnID != nil {
|
if txnID != nil {
|
||||||
txnCache.AddTransaction(device.AccessToken, *txnID, &res)
|
txnCache.AddTransaction(device.AccessToken, *txnID, req.URL, &res)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Take a note of how long it took to generate the event vs submit
|
// Take a note of how long it took to generate the event vs submit
|
||||||
|
|
|
||||||
|
|
@ -16,12 +16,13 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/matrix-org/util"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/clientapi/httputil"
|
"github.com/matrix-org/dendrite/clientapi/httputil"
|
||||||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||||
"github.com/matrix-org/dendrite/clientapi/producers"
|
"github.com/matrix-org/dendrite/clientapi/producers"
|
||||||
"github.com/matrix-org/dendrite/internal/transactions"
|
"github.com/matrix-org/dendrite/internal/transactions"
|
||||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||||
"github.com/matrix-org/util"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// SendToDevice handles PUT /_matrix/client/r0/sendToDevice/{eventType}/{txnId}
|
// SendToDevice handles PUT /_matrix/client/r0/sendToDevice/{eventType}/{txnId}
|
||||||
|
|
@ -33,7 +34,7 @@ func SendToDevice(
|
||||||
eventType string, txnID *string,
|
eventType string, txnID *string,
|
||||||
) util.JSONResponse {
|
) util.JSONResponse {
|
||||||
if txnID != nil {
|
if txnID != nil {
|
||||||
if res, ok := txnCache.FetchTransaction(device.AccessToken, *txnID); ok {
|
if res, ok := txnCache.FetchTransaction(device.AccessToken, *txnID, req.URL); ok {
|
||||||
return *res
|
return *res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -63,7 +64,7 @@ func SendToDevice(
|
||||||
}
|
}
|
||||||
|
|
||||||
if txnID != nil {
|
if txnID != nil {
|
||||||
txnCache.AddTransaction(device.AccessToken, *txnID, &res)
|
txnCache.AddTransaction(device.AccessToken, *txnID, req.URL, &res)
|
||||||
}
|
}
|
||||||
|
|
||||||
return res
|
return res
|
||||||
|
|
|
||||||
|
|
@ -21,7 +21,6 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/roomserver/version"
|
|
||||||
"github.com/matrix-org/gomatrix"
|
"github.com/matrix-org/gomatrix"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
"github.com/matrix-org/gomatrixserverlib/tokens"
|
"github.com/matrix-org/gomatrixserverlib/tokens"
|
||||||
|
|
@ -29,6 +28,8 @@ import (
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
"github.com/matrix-org/dendrite/roomserver/version"
|
||||||
|
|
||||||
appserviceAPI "github.com/matrix-org/dendrite/appservice/api"
|
appserviceAPI "github.com/matrix-org/dendrite/appservice/api"
|
||||||
"github.com/matrix-org/dendrite/clientapi/httputil"
|
"github.com/matrix-org/dendrite/clientapi/httputil"
|
||||||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||||
|
|
@ -73,7 +74,7 @@ func SendServerNotice(
|
||||||
|
|
||||||
if txnID != nil {
|
if txnID != nil {
|
||||||
// Try to fetch response from transactionsCache
|
// Try to fetch response from transactionsCache
|
||||||
if res, ok := txnCache.FetchTransaction(device.AccessToken, *txnID); ok {
|
if res, ok := txnCache.FetchTransaction(device.AccessToken, *txnID, req.URL); ok {
|
||||||
return *res
|
return *res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -251,7 +252,7 @@ func SendServerNotice(
|
||||||
}
|
}
|
||||||
// Add response to transactionsCache
|
// Add response to transactionsCache
|
||||||
if txnID != nil {
|
if txnID != nil {
|
||||||
txnCache.AddTransaction(device.AccessToken, *txnID, &res)
|
txnCache.AddTransaction(device.AccessToken, *txnID, req.URL, &res)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Take a note of how long it took to generate the event vs submit
|
// Take a note of how long it took to generate the event vs submit
|
||||||
|
|
@ -276,6 +277,7 @@ func (r sendServerNoticeRequest) valid() (ok bool) {
|
||||||
// It returns an userapi.Device, which is used for building the event
|
// It returns an userapi.Device, which is used for building the event
|
||||||
func getSenderDevice(
|
func getSenderDevice(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
|
rsAPI api.ClientRoomserverAPI,
|
||||||
userAPI userapi.ClientUserAPI,
|
userAPI userapi.ClientUserAPI,
|
||||||
cfg *config.ClientAPI,
|
cfg *config.ClientAPI,
|
||||||
) (*userapi.Device, error) {
|
) (*userapi.Device, error) {
|
||||||
|
|
@ -290,16 +292,32 @@ func getSenderDevice(
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// set the avatarurl for the user
|
// Set the avatarurl for the user
|
||||||
res := &userapi.PerformSetAvatarURLResponse{}
|
avatarRes := &userapi.PerformSetAvatarURLResponse{}
|
||||||
if err = userAPI.SetAvatarURL(ctx, &userapi.PerformSetAvatarURLRequest{
|
if err = userAPI.SetAvatarURL(ctx, &userapi.PerformSetAvatarURLRequest{
|
||||||
Localpart: cfg.Matrix.ServerNotices.LocalPart,
|
Localpart: cfg.Matrix.ServerNotices.LocalPart,
|
||||||
AvatarURL: cfg.Matrix.ServerNotices.AvatarURL,
|
AvatarURL: cfg.Matrix.ServerNotices.AvatarURL,
|
||||||
}, res); err != nil {
|
}, avatarRes); err != nil {
|
||||||
util.GetLogger(ctx).WithError(err).Error("userAPI.SetAvatarURL failed")
|
util.GetLogger(ctx).WithError(err).Error("userAPI.SetAvatarURL failed")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
profile := avatarRes.Profile
|
||||||
|
|
||||||
|
// Set the displayname for the user
|
||||||
|
displayNameRes := &userapi.PerformUpdateDisplayNameResponse{}
|
||||||
|
if err = userAPI.SetDisplayName(ctx, &userapi.PerformUpdateDisplayNameRequest{
|
||||||
|
Localpart: cfg.Matrix.ServerNotices.LocalPart,
|
||||||
|
DisplayName: cfg.Matrix.ServerNotices.DisplayName,
|
||||||
|
}, displayNameRes); err != nil {
|
||||||
|
util.GetLogger(ctx).WithError(err).Error("userAPI.SetDisplayName failed")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if displayNameRes.Changed {
|
||||||
|
profile.DisplayName = cfg.Matrix.ServerNotices.DisplayName
|
||||||
|
}
|
||||||
|
|
||||||
// Check if we got existing devices
|
// Check if we got existing devices
|
||||||
deviceRes := &userapi.QueryDevicesResponse{}
|
deviceRes := &userapi.QueryDevicesResponse{}
|
||||||
err = userAPI.QueryDevices(ctx, &userapi.QueryDevicesRequest{
|
err = userAPI.QueryDevices(ctx, &userapi.QueryDevicesRequest{
|
||||||
|
|
@ -309,7 +327,15 @@ func getSenderDevice(
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We've got an existing account, return the first device of it
|
||||||
if len(deviceRes.Devices) > 0 {
|
if len(deviceRes.Devices) > 0 {
|
||||||
|
// If there were changes to the profile, create a new membership event
|
||||||
|
if displayNameRes.Changed || avatarRes.Changed {
|
||||||
|
_, err = updateProfile(ctx, rsAPI, &deviceRes.Devices[0], profile, accRes.Account.UserID, cfg, time.Now())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
return &deviceRes.Devices[0], nil
|
return &deviceRes.Devices[0], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -64,12 +64,13 @@ var (
|
||||||
pwdStdin = flag.Bool("passwordstdin", false, "Reads the password from stdin")
|
pwdStdin = flag.Bool("passwordstdin", false, "Reads the password from stdin")
|
||||||
isAdmin = flag.Bool("admin", false, "Create an admin account")
|
isAdmin = flag.Bool("admin", false, "Create an admin account")
|
||||||
resetPassword = flag.Bool("reset-password", false, "Deprecated")
|
resetPassword = flag.Bool("reset-password", false, "Deprecated")
|
||||||
serverURL = flag.String("url", "https://localhost:8448", "The URL to connect to.")
|
serverURL = flag.String("url", "http://localhost:8008", "The URL to connect to.")
|
||||||
validUsernameRegex = regexp.MustCompile(`^[0-9a-z_\-=./]+$`)
|
validUsernameRegex = regexp.MustCompile(`^[0-9a-z_\-=./]+$`)
|
||||||
|
timeout = flag.Duration("timeout", time.Second*30, "Timeout for the http client when connecting to the server")
|
||||||
)
|
)
|
||||||
|
|
||||||
var cl = http.Client{
|
var cl = http.Client{
|
||||||
Timeout: time.Second * 10,
|
Timeout: time.Second * 30,
|
||||||
Transport: http.DefaultTransport,
|
Transport: http.DefaultTransport,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -85,6 +86,10 @@ func main() {
|
||||||
logrus.Fatalf("The reset-password flag has been replaced by the POST /_dendrite/admin/resetPassword/{localpart} admin API.")
|
logrus.Fatalf("The reset-password flag has been replaced by the POST /_dendrite/admin/resetPassword/{localpart} admin API.")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if cfg.ClientAPI.RegistrationSharedSecret == "" {
|
||||||
|
logrus.Fatalln("Shared secret registration is not enabled, enable it by setting a shared secret in the config: 'client_api.registration_shared_secret'")
|
||||||
|
}
|
||||||
|
|
||||||
if *username == "" {
|
if *username == "" {
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
|
|
@ -104,6 +109,8 @@ func main() {
|
||||||
logrus.Fatalln(err)
|
logrus.Fatalln(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cl.Timeout = *timeout
|
||||||
|
|
||||||
accessToken, err := sharedSecretRegister(cfg.ClientAPI.RegistrationSharedSecret, *serverURL, *username, pass, *isAdmin)
|
accessToken, err := sharedSecretRegister(cfg.ClientAPI.RegistrationSharedSecret, *serverURL, *username, pass, *isAdmin)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Fatalln("Failed to create the account:", err.Error())
|
logrus.Fatalln("Failed to create the account:", err.Error())
|
||||||
|
|
@ -120,8 +127,8 @@ type sharedSecretRegistrationRequest struct {
|
||||||
Admin bool `json:"admin"`
|
Admin bool `json:"admin"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func sharedSecretRegister(sharedSecret, serverURL, localpart, password string, admin bool) (accesToken string, err error) {
|
func sharedSecretRegister(sharedSecret, serverURL, localpart, password string, admin bool) (accessToken string, err error) {
|
||||||
registerURL := fmt.Sprintf("%s/_synapse/admin/v1/register", serverURL)
|
registerURL := fmt.Sprintf("%s/_synapse/admin/v1/register", strings.Trim(serverURL, "/"))
|
||||||
nonceReq, err := http.NewRequest(http.MethodGet, registerURL, nil)
|
nonceReq, err := http.NewRequest(http.MethodGet, registerURL, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("unable to create http request: %w", err)
|
return "", fmt.Errorf("unable to create http request: %w", err)
|
||||||
|
|
@ -172,7 +179,10 @@ func sharedSecretRegister(sharedSecret, serverURL, localpart, password string, a
|
||||||
body, _ = io.ReadAll(regResp.Body)
|
body, _ = io.ReadAll(regResp.Body)
|
||||||
return "", fmt.Errorf(gjson.GetBytes(body, "error").Str)
|
return "", fmt.Errorf(gjson.GetBytes(body, "error").Str)
|
||||||
}
|
}
|
||||||
r, _ := io.ReadAll(regResp.Body)
|
r, err := io.ReadAll(regResp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("failed to read response body (HTTP %d): %w", regResp.StatusCode, err)
|
||||||
|
}
|
||||||
|
|
||||||
return gjson.GetBytes(r, "access_token").Str, nil
|
return gjson.GetBytes(r, "access_token").Str, nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -24,6 +24,8 @@ import (
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
|
|
@ -42,6 +44,7 @@ import (
|
||||||
"github.com/matrix-org/dendrite/setup"
|
"github.com/matrix-org/dendrite/setup"
|
||||||
"github.com/matrix-org/dendrite/setup/base"
|
"github.com/matrix-org/dendrite/setup/base"
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
"github.com/matrix-org/dendrite/setup/config"
|
||||||
|
"github.com/matrix-org/dendrite/test"
|
||||||
"github.com/matrix-org/dendrite/userapi"
|
"github.com/matrix-org/dendrite/userapi"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
|
||||||
|
|
@ -60,6 +63,7 @@ var (
|
||||||
instancePort = flag.Int("port", 8008, "the port that the client API will listen on")
|
instancePort = flag.Int("port", 8008, "the port that the client API will listen on")
|
||||||
instancePeer = flag.String("peer", "", "the static Pinecone peers to connect to, comma separated-list")
|
instancePeer = flag.String("peer", "", "the static Pinecone peers to connect to, comma separated-list")
|
||||||
instanceListen = flag.String("listen", ":0", "the port Pinecone peers can connect to")
|
instanceListen = flag.String("listen", ":0", "the port Pinecone peers can connect to")
|
||||||
|
instanceDir = flag.String("dir", ".", "the directory to store the databases in (if --config not specified)")
|
||||||
)
|
)
|
||||||
|
|
||||||
// nolint:gocyclo
|
// nolint:gocyclo
|
||||||
|
|
@ -70,31 +74,98 @@ func main() {
|
||||||
var pk ed25519.PublicKey
|
var pk ed25519.PublicKey
|
||||||
var sk ed25519.PrivateKey
|
var sk ed25519.PrivateKey
|
||||||
|
|
||||||
keyfile := *instanceName + ".key"
|
// iterate through the cli args and check if the config flag was set
|
||||||
if _, err := os.Stat(keyfile); os.IsNotExist(err) {
|
configFlagSet := false
|
||||||
if pk, sk, err = ed25519.GenerateKey(nil); err != nil {
|
for _, arg := range os.Args {
|
||||||
panic(err)
|
if arg == "--config" || arg == "-config" {
|
||||||
|
configFlagSet = true
|
||||||
|
break
|
||||||
}
|
}
|
||||||
if err = os.WriteFile(keyfile, sk, 0644); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
} else if err == nil {
|
|
||||||
if sk, err = os.ReadFile(keyfile); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
if len(sk) != ed25519.PrivateKeySize {
|
|
||||||
panic("the private key is not long enough")
|
|
||||||
}
|
|
||||||
pk = sk.Public().(ed25519.PublicKey)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pRouter := pineconeRouter.NewRouter(logrus.WithField("pinecone", "router"), sk, false)
|
cfg := &config.Dendrite{}
|
||||||
|
|
||||||
|
// use custom config if config flag is set
|
||||||
|
if configFlagSet {
|
||||||
|
cfg = setup.ParseFlags(true)
|
||||||
|
sk = cfg.Global.PrivateKey
|
||||||
|
pk = sk.Public().(ed25519.PublicKey)
|
||||||
|
} else {
|
||||||
|
keyfile := filepath.Join(*instanceDir, *instanceName) + ".pem"
|
||||||
|
if _, err := os.Stat(keyfile); os.IsNotExist(err) {
|
||||||
|
oldkeyfile := *instanceName + ".key"
|
||||||
|
if _, err = os.Stat(oldkeyfile); os.IsNotExist(err) {
|
||||||
|
if err = test.NewMatrixKey(keyfile); err != nil {
|
||||||
|
panic("failed to generate a new PEM key: " + err.Error())
|
||||||
|
}
|
||||||
|
if _, sk, err = config.LoadMatrixKey(keyfile, os.ReadFile); err != nil {
|
||||||
|
panic("failed to load PEM key: " + err.Error())
|
||||||
|
}
|
||||||
|
if len(sk) != ed25519.PrivateKeySize {
|
||||||
|
panic("the private key is not long enough")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if sk, err = os.ReadFile(oldkeyfile); err != nil {
|
||||||
|
panic("failed to read the old private key: " + err.Error())
|
||||||
|
}
|
||||||
|
if len(sk) != ed25519.PrivateKeySize {
|
||||||
|
panic("the private key is not long enough")
|
||||||
|
}
|
||||||
|
if err := test.SaveMatrixKey(keyfile, sk); err != nil {
|
||||||
|
panic("failed to convert the private key to PEM format: " + err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
var err error
|
||||||
|
if _, sk, err = config.LoadMatrixKey(keyfile, os.ReadFile); err != nil {
|
||||||
|
panic("failed to load PEM key: " + err.Error())
|
||||||
|
}
|
||||||
|
if len(sk) != ed25519.PrivateKeySize {
|
||||||
|
panic("the private key is not long enough")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pk = sk.Public().(ed25519.PublicKey)
|
||||||
|
|
||||||
|
cfg.Defaults(config.DefaultOpts{
|
||||||
|
Generate: true,
|
||||||
|
Monolithic: true,
|
||||||
|
})
|
||||||
|
cfg.Global.PrivateKey = sk
|
||||||
|
cfg.Global.JetStream.StoragePath = config.Path(fmt.Sprintf("%s/", filepath.Join(*instanceDir, *instanceName)))
|
||||||
|
cfg.UserAPI.AccountDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-account.db", filepath.Join(*instanceDir, *instanceName)))
|
||||||
|
cfg.MediaAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-mediaapi.db", filepath.Join(*instanceDir, *instanceName)))
|
||||||
|
cfg.SyncAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-syncapi.db", filepath.Join(*instanceDir, *instanceName)))
|
||||||
|
cfg.RoomServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-roomserver.db", filepath.Join(*instanceDir, *instanceName)))
|
||||||
|
cfg.KeyServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-keyserver.db", filepath.Join(*instanceDir, *instanceName)))
|
||||||
|
cfg.FederationAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-federationapi.db", filepath.Join(*instanceDir, *instanceName)))
|
||||||
|
cfg.MSCs.MSCs = []string{"msc2836", "msc2946"}
|
||||||
|
cfg.MSCs.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-mscs.db", filepath.Join(*instanceDir, *instanceName)))
|
||||||
|
cfg.ClientAPI.RegistrationDisabled = false
|
||||||
|
cfg.ClientAPI.OpenRegistrationWithoutVerificationEnabled = true
|
||||||
|
cfg.MediaAPI.BasePath = config.Path(*instanceDir)
|
||||||
|
cfg.SyncAPI.Fulltext.Enabled = true
|
||||||
|
cfg.SyncAPI.Fulltext.IndexPath = config.Path(*instanceDir)
|
||||||
|
if err := cfg.Derive(); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg.Global.ServerName = gomatrixserverlib.ServerName(hex.EncodeToString(pk))
|
||||||
|
cfg.Global.KeyID = gomatrixserverlib.KeyID(signing.KeyID)
|
||||||
|
|
||||||
|
base := base.NewBaseDendrite(cfg, "Monolith")
|
||||||
|
defer base.Close() // nolint: errcheck
|
||||||
|
|
||||||
|
pRouter := pineconeRouter.NewRouter(logrus.WithField("pinecone", "router"), sk)
|
||||||
pQUIC := pineconeSessions.NewSessions(logrus.WithField("pinecone", "sessions"), pRouter, []string{"matrix"})
|
pQUIC := pineconeSessions.NewSessions(logrus.WithField("pinecone", "sessions"), pRouter, []string{"matrix"})
|
||||||
pMulticast := pineconeMulticast.NewMulticast(logrus.WithField("pinecone", "multicast"), pRouter)
|
pMulticast := pineconeMulticast.NewMulticast(logrus.WithField("pinecone", "multicast"), pRouter)
|
||||||
pManager := pineconeConnections.NewConnectionManager(pRouter, nil)
|
pManager := pineconeConnections.NewConnectionManager(pRouter, nil)
|
||||||
pMulticast.Start()
|
pMulticast.Start()
|
||||||
if instancePeer != nil && *instancePeer != "" {
|
if instancePeer != nil && *instancePeer != "" {
|
||||||
pManager.AddPeer(*instancePeer)
|
for _, peer := range strings.Split(*instancePeer, ",") {
|
||||||
|
pManager.AddPeer(strings.Trim(peer, " \t\r\n"))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
|
|
@ -125,29 +196,6 @@ func main() {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
cfg := &config.Dendrite{}
|
|
||||||
cfg.Defaults(true)
|
|
||||||
cfg.Global.ServerName = gomatrixserverlib.ServerName(hex.EncodeToString(pk))
|
|
||||||
cfg.Global.PrivateKey = sk
|
|
||||||
cfg.Global.KeyID = gomatrixserverlib.KeyID(signing.KeyID)
|
|
||||||
cfg.Global.JetStream.StoragePath = config.Path(fmt.Sprintf("%s/", *instanceName))
|
|
||||||
cfg.UserAPI.AccountDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-account.db", *instanceName))
|
|
||||||
cfg.MediaAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-mediaapi.db", *instanceName))
|
|
||||||
cfg.SyncAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-syncapi.db", *instanceName))
|
|
||||||
cfg.RoomServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-roomserver.db", *instanceName))
|
|
||||||
cfg.KeyServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-keyserver.db", *instanceName))
|
|
||||||
cfg.FederationAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-federationapi.db", *instanceName))
|
|
||||||
cfg.AppServiceAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-appservice.db", *instanceName))
|
|
||||||
cfg.MSCs.MSCs = []string{"msc2836", "msc2946"}
|
|
||||||
cfg.ClientAPI.RegistrationDisabled = false
|
|
||||||
cfg.ClientAPI.OpenRegistrationWithoutVerificationEnabled = true
|
|
||||||
if err := cfg.Derive(); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
base := base.NewBaseDendrite(cfg, "Monolith")
|
|
||||||
defer base.Close() // nolint: errcheck
|
|
||||||
|
|
||||||
federation := conn.CreateFederationClient(base, pQUIC)
|
federation := conn.CreateFederationClient(base, pQUIC)
|
||||||
|
|
||||||
serverKeyAPI := &signing.YggdrasilKeys{}
|
serverKeyAPI := &signing.YggdrasilKeys{}
|
||||||
|
|
|
||||||
|
|
@ -16,12 +16,15 @@ package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"crypto/ed25519"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
|
"encoding/hex"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
|
@ -42,6 +45,7 @@ import (
|
||||||
"github.com/matrix-org/dendrite/setup/base"
|
"github.com/matrix-org/dendrite/setup/base"
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
"github.com/matrix-org/dendrite/setup/config"
|
||||||
"github.com/matrix-org/dendrite/setup/mscs"
|
"github.com/matrix-org/dendrite/setup/mscs"
|
||||||
|
"github.com/matrix-org/dendrite/test"
|
||||||
"github.com/matrix-org/dendrite/userapi"
|
"github.com/matrix-org/dendrite/userapi"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
|
@ -49,19 +53,19 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
instanceName = flag.String("name", "dendrite-p2p-ygg", "the name of this P2P demo instance")
|
instanceName = flag.String("name", "dendrite-p2p-ygg", "the name of this P2P demo instance")
|
||||||
instancePort = flag.Int("port", 8008, "the port that the client API will listen on")
|
instancePort = flag.Int("port", 8008, "the port that the client API will listen on")
|
||||||
instancePeer = flag.String("peer", "", "an internet Yggdrasil peer to connect to")
|
instancePeer = flag.String("peer", "", "the static Yggdrasil peers to connect to, comma separated-list")
|
||||||
|
instanceListen = flag.String("listen", "tcp://:0", "the port Yggdrasil peers can connect to")
|
||||||
|
instanceDir = flag.String("dir", ".", "the directory to store the databases in (if --config not specified)")
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
internal.SetupPprof()
|
internal.SetupPprof()
|
||||||
|
|
||||||
ygg, err := yggconn.Setup(*instanceName, ".", *instancePeer)
|
var pk ed25519.PublicKey
|
||||||
if err != nil {
|
var sk ed25519.PrivateKey
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// iterate through the cli args and check if the config flag was set
|
// iterate through the cli args and check if the config flag was set
|
||||||
configFlagSet := false
|
configFlagSet := false
|
||||||
|
|
@ -74,36 +78,81 @@ func main() {
|
||||||
|
|
||||||
cfg := &config.Dendrite{}
|
cfg := &config.Dendrite{}
|
||||||
|
|
||||||
|
keyfile := filepath.Join(*instanceDir, *instanceName) + ".pem"
|
||||||
|
if _, err := os.Stat(keyfile); os.IsNotExist(err) {
|
||||||
|
oldkeyfile := *instanceName + ".key"
|
||||||
|
if _, err = os.Stat(oldkeyfile); os.IsNotExist(err) {
|
||||||
|
if err = test.NewMatrixKey(keyfile); err != nil {
|
||||||
|
panic("failed to generate a new PEM key: " + err.Error())
|
||||||
|
}
|
||||||
|
if _, sk, err = config.LoadMatrixKey(keyfile, os.ReadFile); err != nil {
|
||||||
|
panic("failed to load PEM key: " + err.Error())
|
||||||
|
}
|
||||||
|
if len(sk) != ed25519.PrivateKeySize {
|
||||||
|
panic("the private key is not long enough")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if sk, err = os.ReadFile(oldkeyfile); err != nil {
|
||||||
|
panic("failed to read the old private key: " + err.Error())
|
||||||
|
}
|
||||||
|
if len(sk) != ed25519.PrivateKeySize {
|
||||||
|
panic("the private key is not long enough")
|
||||||
|
}
|
||||||
|
if err := test.SaveMatrixKey(keyfile, sk); err != nil {
|
||||||
|
panic("failed to convert the private key to PEM format: " + err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
var err error
|
||||||
|
if _, sk, err = config.LoadMatrixKey(keyfile, os.ReadFile); err != nil {
|
||||||
|
panic("failed to load PEM key: " + err.Error())
|
||||||
|
}
|
||||||
|
if len(sk) != ed25519.PrivateKeySize {
|
||||||
|
panic("the private key is not long enough")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pk = sk.Public().(ed25519.PublicKey)
|
||||||
|
|
||||||
// use custom config if config flag is set
|
// use custom config if config flag is set
|
||||||
if configFlagSet {
|
if configFlagSet {
|
||||||
cfg = setup.ParseFlags(true)
|
cfg = setup.ParseFlags(true)
|
||||||
} else {
|
} else {
|
||||||
cfg.Defaults(true)
|
cfg.Defaults(config.DefaultOpts{
|
||||||
cfg.Global.JetStream.StoragePath = config.Path(fmt.Sprintf("%s/", *instanceName))
|
Generate: true,
|
||||||
cfg.UserAPI.AccountDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-account.db", *instanceName))
|
Monolithic: true,
|
||||||
cfg.MediaAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-mediaapi.db", *instanceName))
|
})
|
||||||
cfg.SyncAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-syncapi.db", *instanceName))
|
cfg.Global.PrivateKey = sk
|
||||||
cfg.RoomServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-roomserver.db", *instanceName))
|
cfg.Global.JetStream.StoragePath = config.Path(filepath.Join(*instanceDir, *instanceName))
|
||||||
cfg.KeyServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-keyserver.db", *instanceName))
|
cfg.UserAPI.AccountDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-account.db", filepath.Join(*instanceDir, *instanceName)))
|
||||||
cfg.FederationAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-federationapi.db", *instanceName))
|
cfg.MediaAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-mediaapi.db", filepath.Join(*instanceDir, *instanceName)))
|
||||||
cfg.AppServiceAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-appservice.db", *instanceName))
|
cfg.SyncAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-syncapi.db", filepath.Join(*instanceDir, *instanceName)))
|
||||||
cfg.MSCs.MSCs = []string{"msc2836"}
|
cfg.RoomServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-roomserver.db", filepath.Join(*instanceDir, *instanceName)))
|
||||||
cfg.MSCs.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-mscs.db", *instanceName))
|
cfg.KeyServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-keyserver.db", filepath.Join(*instanceDir, *instanceName)))
|
||||||
|
cfg.FederationAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-federationapi.db", filepath.Join(*instanceDir, *instanceName)))
|
||||||
|
cfg.MSCs.MSCs = []string{"msc2836", "msc2946"}
|
||||||
|
cfg.MSCs.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-mscs.db", filepath.Join(*instanceDir, *instanceName)))
|
||||||
cfg.ClientAPI.RegistrationDisabled = false
|
cfg.ClientAPI.RegistrationDisabled = false
|
||||||
cfg.ClientAPI.OpenRegistrationWithoutVerificationEnabled = true
|
cfg.ClientAPI.OpenRegistrationWithoutVerificationEnabled = true
|
||||||
if err = cfg.Derive(); err != nil {
|
cfg.MediaAPI.BasePath = config.Path(*instanceDir)
|
||||||
|
cfg.SyncAPI.Fulltext.Enabled = true
|
||||||
|
cfg.SyncAPI.Fulltext.IndexPath = config.Path(*instanceDir)
|
||||||
|
if err := cfg.Derive(); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// always override ServerName, PrivateKey and KeyID
|
cfg.Global.ServerName = gomatrixserverlib.ServerName(hex.EncodeToString(pk))
|
||||||
cfg.Global.ServerName = gomatrixserverlib.ServerName(ygg.DerivedServerName())
|
cfg.Global.KeyID = gomatrixserverlib.KeyID(signing.KeyID)
|
||||||
cfg.Global.PrivateKey = ygg.PrivateKey()
|
|
||||||
cfg.Global.KeyID = signing.KeyID
|
|
||||||
|
|
||||||
base := base.NewBaseDendrite(cfg, "Monolith")
|
base := base.NewBaseDendrite(cfg, "Monolith")
|
||||||
defer base.Close() // nolint: errcheck
|
defer base.Close() // nolint: errcheck
|
||||||
|
|
||||||
|
ygg, err := yggconn.Setup(sk, *instanceName, ".", *instancePeer, *instanceListen)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
federation := ygg.CreateFederationClient(base)
|
federation := ygg.CreateFederationClient(base)
|
||||||
|
|
||||||
serverKeyAPI := &signing.YggdrasilKeys{}
|
serverKeyAPI := &signing.YggdrasilKeys{}
|
||||||
|
|
|
||||||
|
|
@ -18,15 +18,13 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/ed25519"
|
"crypto/ed25519"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"net"
|
"net"
|
||||||
"os"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
"github.com/neilalexander/utp"
|
"github.com/neilalexander/utp"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
ironwoodtypes "github.com/Arceliar/ironwood/types"
|
ironwoodtypes "github.com/Arceliar/ironwood/types"
|
||||||
yggdrasilconfig "github.com/yggdrasil-network/yggdrasil-go/src/config"
|
yggdrasilconfig "github.com/yggdrasil-network/yggdrasil-go/src/config"
|
||||||
|
|
@ -57,48 +55,38 @@ func (n *Node) DialerContext(ctx context.Context, _, address string) (net.Conn,
|
||||||
return n.utpSocket.DialAddrContext(ctx, pk)
|
return n.utpSocket.DialAddrContext(ctx, pk)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Setup(instanceName, storageDirectory, peerURI string) (*Node, error) {
|
func Setup(sk ed25519.PrivateKey, instanceName, storageDirectory, peerURI, listenURI string) (*Node, error) {
|
||||||
n := &Node{
|
n := &Node{
|
||||||
core: &yggdrasilcore.Core{},
|
core: &yggdrasilcore.Core{},
|
||||||
config: yggdrasildefaults.GenerateConfig(),
|
config: yggdrasildefaults.GenerateConfig(),
|
||||||
multicast: &yggdrasilmulticast.Multicast{},
|
multicast: &yggdrasilmulticast.Multicast{},
|
||||||
log: gologme.New(os.Stdout, "YGG ", log.Flags()),
|
log: gologme.New(logrus.StandardLogger().Writer(), "", 0),
|
||||||
incoming: make(chan net.Conn),
|
incoming: make(chan net.Conn),
|
||||||
}
|
}
|
||||||
|
|
||||||
yggfile := fmt.Sprintf("%s/%s-yggdrasil.conf", storageDirectory, instanceName)
|
options := []yggdrasilcore.SetupOption{
|
||||||
if _, err := os.Stat(yggfile); !os.IsNotExist(err) {
|
yggdrasilcore.AdminListenAddress("none"),
|
||||||
yggconf, e := os.ReadFile(yggfile)
|
}
|
||||||
if e != nil {
|
if listenURI != "" {
|
||||||
panic(err)
|
options = append(options, yggdrasilcore.ListenAddress(listenURI))
|
||||||
}
|
|
||||||
if err := json.Unmarshal([]byte(yggconf), &n.config); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
n.config.Peers = []string{}
|
|
||||||
if peerURI != "" {
|
if peerURI != "" {
|
||||||
n.config.Peers = append(n.config.Peers, peerURI)
|
for _, uri := range strings.Split(peerURI, ",") {
|
||||||
|
options = append(options, yggdrasilcore.Peer{
|
||||||
|
URI: uri,
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
n.config.AdminListen = "none"
|
|
||||||
|
|
||||||
j, err := json.MarshalIndent(n.config, "", " ")
|
var err error
|
||||||
if err != nil {
|
if n.core, err = yggdrasilcore.New(sk, options...); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
if e := os.WriteFile(yggfile, j, 0600); e != nil {
|
|
||||||
n.log.Printf("Couldn't write private key to file '%s': %s\n", yggfile, e)
|
|
||||||
}
|
|
||||||
|
|
||||||
n.log.EnableLevel("error")
|
n.log.EnableLevel("error")
|
||||||
n.log.EnableLevel("warn")
|
n.log.EnableLevel("warn")
|
||||||
n.log.EnableLevel("info")
|
n.log.EnableLevel("info")
|
||||||
if err = n.core.Start(n.config, n.log); err != nil {
|
n.core.SetLogger(n.log)
|
||||||
panic(err)
|
if n.utpSocket, err = utp.NewSocketFromPacketConnNoClose(n.core); err != nil {
|
||||||
}
|
|
||||||
n.utpSocket, err = utp.NewSocketFromPacketConnNoClose(n.core)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
if err = n.multicast.Init(n.core, n.config, n.log, nil); err != nil {
|
if err = n.multicast.Init(n.core, n.config, n.log, nil); err != nil {
|
||||||
|
|
@ -108,7 +96,7 @@ func Setup(instanceName, storageDirectory, peerURI string) (*Node, error) {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
n.log.Println("Public key:", n.core.PublicKey())
|
n.log.Printf("Public key: %x", n.core.PublicKey())
|
||||||
go n.listenFromYgg()
|
go n.listenFromYgg()
|
||||||
|
|
||||||
return n, nil
|
return n, nil
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/matrix-org/gomatrix"
|
"github.com/matrix-org/gomatrix"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
|
@ -81,11 +82,14 @@ func runTests(baseURL, branchName string) error {
|
||||||
client: users[1].client, text: "4: " + branchName,
|
client: users[1].client, text: "4: " + branchName,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
wantEventIDs := make(map[string]struct{}, 8)
|
||||||
for _, msg := range msgs {
|
for _, msg := range msgs {
|
||||||
_, err = msg.client.SendText(dmRoomID, msg.text)
|
var resp *gomatrix.RespSendEvent
|
||||||
|
resp, err = msg.client.SendText(dmRoomID, msg.text)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to send text in dm room: %s", err)
|
return fmt.Errorf("failed to send text in dm room: %s", err)
|
||||||
}
|
}
|
||||||
|
wantEventIDs[resp.EventID] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// attempt to create/join the shared public room
|
// attempt to create/join the shared public room
|
||||||
|
|
@ -113,11 +117,48 @@ func runTests(baseURL, branchName string) error {
|
||||||
}
|
}
|
||||||
// send messages
|
// send messages
|
||||||
for _, msg := range msgs {
|
for _, msg := range msgs {
|
||||||
_, err = msg.client.SendText(publicRoomID, "public "+msg.text)
|
resp, err := msg.client.SendText(publicRoomID, "public "+msg.text)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to send text in public room: %s", err)
|
return fmt.Errorf("failed to send text in public room: %s", err)
|
||||||
}
|
}
|
||||||
|
wantEventIDs[resp.EventID] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Sync until we have all expected messages
|
||||||
|
doneCh := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
syncClient := users[0].client
|
||||||
|
since := ""
|
||||||
|
for len(wantEventIDs) > 0 {
|
||||||
|
select {
|
||||||
|
case <-doneCh:
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
syncResp, err := syncClient.SyncRequest(1000, since, "1", false, "")
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, room := range syncResp.Rooms.Join {
|
||||||
|
for _, ev := range room.Timeline.Events {
|
||||||
|
if ev.Type != "m.room.message" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
delete(wantEventIDs, ev.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
since = syncResp.NextBatch
|
||||||
|
}
|
||||||
|
close(doneCh)
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-time.After(time.Second * 10):
|
||||||
|
close(doneCh)
|
||||||
|
return fmt.Errorf("failed to receive all expected messages: %+v", wantEventIDs)
|
||||||
|
case <-doneCh:
|
||||||
|
}
|
||||||
|
|
||||||
log.Printf("OK! rooms(public=%s, dm=%s) users(%s, %s)\n", publicRoomID, dmRoomID, users[0].userID, users[1].userID)
|
log.Printf("OK! rooms(public=%s, dm=%s) users(%s, %s)\n", publicRoomID, dmRoomID, users[0].userID, users[1].userID)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -3,99 +3,97 @@ package main
|
||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
"golang.org/x/crypto/bcrypt"
|
"golang.org/x/crypto/bcrypt"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
|
||||||
|
"github.com/matrix-org/dendrite/setup/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
defaultsForCI := flag.Bool("ci", false, "sane defaults for CI testing")
|
defaultsForCI := flag.Bool("ci", false, "Populate the configuration with sane defaults for use in CI")
|
||||||
serverName := flag.String("server", "", "The domain name of the server if not 'localhost'")
|
serverName := flag.String("server", "", "The domain name of the server if not 'localhost'")
|
||||||
dbURI := flag.String("db", "", "The DB URI to use for all components if not SQLite files")
|
dbURI := flag.String("db", "", "The DB URI to use for all components (PostgreSQL only)")
|
||||||
|
dirPath := flag.String("dir", "./", "The folder to use for paths (like SQLite databases, media storage)")
|
||||||
|
normalise := flag.String("normalise", "", "Normalise an existing configuration file by adding new/missing options and defaults")
|
||||||
|
polylith := flag.Bool("polylith", false, "Generate a config that makes sense for polylith deployments")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
cfg := &config.Dendrite{
|
var cfg *config.Dendrite
|
||||||
Version: config.Version,
|
if *normalise == "" {
|
||||||
}
|
cfg = &config.Dendrite{
|
||||||
cfg.Defaults(true)
|
Version: config.Version,
|
||||||
if *serverName != "" {
|
}
|
||||||
cfg.Global.ServerName = gomatrixserverlib.ServerName(*serverName)
|
cfg.Defaults(config.DefaultOpts{
|
||||||
}
|
Generate: true,
|
||||||
if *dbURI != "" {
|
Monolithic: !*polylith,
|
||||||
cfg.AppServiceAPI.Database.ConnectionString = config.DataSource(*dbURI)
|
})
|
||||||
cfg.FederationAPI.Database.ConnectionString = config.DataSource(*dbURI)
|
if *serverName != "" {
|
||||||
cfg.KeyServer.Database.ConnectionString = config.DataSource(*dbURI)
|
cfg.Global.ServerName = gomatrixserverlib.ServerName(*serverName)
|
||||||
cfg.MSCs.Database.ConnectionString = config.DataSource(*dbURI)
|
}
|
||||||
cfg.MediaAPI.Database.ConnectionString = config.DataSource(*dbURI)
|
uri := config.DataSource(*dbURI)
|
||||||
cfg.RoomServer.Database.ConnectionString = config.DataSource(*dbURI)
|
if *polylith || uri.IsSQLite() || uri == "" {
|
||||||
cfg.SyncAPI.Database.ConnectionString = config.DataSource(*dbURI)
|
for name, db := range map[string]*config.DatabaseOptions{
|
||||||
cfg.UserAPI.AccountDatabase.ConnectionString = config.DataSource(*dbURI)
|
"federationapi": &cfg.FederationAPI.Database,
|
||||||
}
|
"keyserver": &cfg.KeyServer.Database,
|
||||||
cfg.Global.TrustedIDServers = []string{
|
"mscs": &cfg.MSCs.Database,
|
||||||
"matrix.org",
|
"mediaapi": &cfg.MediaAPI.Database,
|
||||||
"vector.im",
|
"roomserver": &cfg.RoomServer.Database,
|
||||||
}
|
"syncapi": &cfg.SyncAPI.Database,
|
||||||
cfg.Logging = []config.LogrusHook{
|
"userapi": &cfg.UserAPI.AccountDatabase,
|
||||||
{
|
} {
|
||||||
Type: "file",
|
if uri == "" {
|
||||||
Level: "info",
|
path := filepath.Join(*dirPath, fmt.Sprintf("dendrite_%s.db", name))
|
||||||
Params: map[string]interface{}{
|
db.ConnectionString = config.DataSource(fmt.Sprintf("file:%s", path))
|
||||||
"path": "/var/log/dendrite",
|
} else {
|
||||||
},
|
db.ConnectionString = uri
|
||||||
},
|
}
|
||||||
}
|
}
|
||||||
cfg.FederationAPI.KeyPerspectives = config.KeyPerspectives{
|
} else {
|
||||||
{
|
cfg.Global.DatabaseOptions.ConnectionString = uri
|
||||||
ServerName: "matrix.org",
|
}
|
||||||
Keys: []config.KeyPerspectiveTrustKey{
|
cfg.Logging = []config.LogrusHook{
|
||||||
{
|
{
|
||||||
KeyID: "ed25519:auto",
|
Type: "file",
|
||||||
PublicKey: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw",
|
Level: "info",
|
||||||
},
|
Params: map[string]interface{}{
|
||||||
{
|
"path": filepath.Join(*dirPath, "log"),
|
||||||
KeyID: "ed25519:a_RXGa",
|
|
||||||
PublicKey: "l8Hft5qXKn1vfHrg3p4+W8gELQVo8N13JkluMfmn2sQ",
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
}
|
||||||
}
|
if *defaultsForCI {
|
||||||
cfg.MediaAPI.ThumbnailSizes = []config.ThumbnailSize{
|
cfg.AppServiceAPI.DisableTLSValidation = true
|
||||||
{
|
cfg.ClientAPI.RateLimiting.Enabled = false
|
||||||
Width: 32,
|
cfg.FederationAPI.DisableTLSValidation = false
|
||||||
Height: 32,
|
// don't hit matrix.org when running tests!!!
|
||||||
ResizeMethod: "crop",
|
cfg.FederationAPI.KeyPerspectives = config.KeyPerspectives{}
|
||||||
},
|
cfg.MediaAPI.BasePath = config.Path(filepath.Join(*dirPath, "media"))
|
||||||
{
|
cfg.MSCs.MSCs = []string{"msc2836", "msc2946", "msc2444", "msc2753"}
|
||||||
Width: 96,
|
cfg.Logging[0].Level = "trace"
|
||||||
Height: 96,
|
cfg.Logging[0].Type = "std"
|
||||||
ResizeMethod: "crop",
|
cfg.UserAPI.BCryptCost = bcrypt.MinCost
|
||||||
},
|
cfg.Global.JetStream.InMemory = true
|
||||||
{
|
cfg.Global.JetStream.StoragePath = config.Path(*dirPath)
|
||||||
Width: 640,
|
cfg.ClientAPI.RegistrationDisabled = false
|
||||||
Height: 480,
|
cfg.ClientAPI.OpenRegistrationWithoutVerificationEnabled = true
|
||||||
ResizeMethod: "scale",
|
cfg.ClientAPI.RegistrationSharedSecret = "complement"
|
||||||
},
|
cfg.Global.Presence = config.PresenceOptions{
|
||||||
}
|
EnableInbound: true,
|
||||||
|
EnableOutbound: true,
|
||||||
if *defaultsForCI {
|
}
|
||||||
cfg.AppServiceAPI.DisableTLSValidation = true
|
cfg.SyncAPI.Fulltext = config.Fulltext{
|
||||||
cfg.ClientAPI.RateLimiting.Enabled = false
|
Enabled: true,
|
||||||
cfg.FederationAPI.DisableTLSValidation = false
|
IndexPath: config.Path(filepath.Join(*dirPath, "searchindex")),
|
||||||
// don't hit matrix.org when running tests!!!
|
InMemory: true,
|
||||||
cfg.FederationAPI.KeyPerspectives = config.KeyPerspectives{}
|
Language: "en",
|
||||||
cfg.MSCs.MSCs = []string{"msc2836", "msc2946", "msc2444", "msc2753"}
|
}
|
||||||
cfg.Logging[0].Level = "trace"
|
}
|
||||||
cfg.Logging[0].Type = "std"
|
} else {
|
||||||
cfg.UserAPI.BCryptCost = bcrypt.MinCost
|
var err error
|
||||||
cfg.Global.JetStream.InMemory = true
|
if cfg, err = config.Load(*normalise, !*polylith); err != nil {
|
||||||
cfg.ClientAPI.RegistrationDisabled = false
|
panic(err)
|
||||||
cfg.ClientAPI.OpenRegistrationWithoutVerificationEnabled = true
|
|
||||||
cfg.ClientAPI.RegistrationSharedSecret = "complement"
|
|
||||||
cfg.Global.Presence = config.PresenceOptions{
|
|
||||||
EnableInbound: true,
|
|
||||||
EnableOutbound: true,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -38,6 +38,7 @@ var (
|
||||||
authorityCertFile = flag.String("tls-authority-cert", "", "Optional: Create TLS certificate/keys based on this CA authority. Useful for integration testing.")
|
authorityCertFile = flag.String("tls-authority-cert", "", "Optional: Create TLS certificate/keys based on this CA authority. Useful for integration testing.")
|
||||||
authorityKeyFile = flag.String("tls-authority-key", "", "Optional: Create TLS certificate/keys based on this CA authority. Useful for integration testing.")
|
authorityKeyFile = flag.String("tls-authority-key", "", "Optional: Create TLS certificate/keys based on this CA authority. Useful for integration testing.")
|
||||||
serverName = flag.String("server", "", "Optional: Create TLS certificate/keys with this domain name set. Useful for integration testing.")
|
serverName = flag.String("server", "", "Optional: Create TLS certificate/keys with this domain name set. Useful for integration testing.")
|
||||||
|
keySize = flag.Int("keysize", 4096, "Optional: Create TLS RSA private key with the given key size")
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
|
@ -58,12 +59,12 @@ func main() {
|
||||||
log.Fatal("Zero or both of --tls-key and --tls-cert must be supplied")
|
log.Fatal("Zero or both of --tls-key and --tls-cert must be supplied")
|
||||||
}
|
}
|
||||||
if *authorityCertFile == "" && *authorityKeyFile == "" {
|
if *authorityCertFile == "" && *authorityKeyFile == "" {
|
||||||
if err := test.NewTLSKey(*tlsKeyFile, *tlsCertFile); err != nil {
|
if err := test.NewTLSKey(*tlsKeyFile, *tlsCertFile, *keySize); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// generate the TLS cert/key based on the authority given.
|
// generate the TLS cert/key based on the authority given.
|
||||||
if err := test.NewTLSKeyWithAuthority(*serverName, *tlsKeyFile, *tlsCertFile, *authorityKeyFile, *authorityCertFile); err != nil {
|
if err := test.NewTLSKeyWithAuthority(*serverName, *tlsKeyFile, *tlsCertFile, *authorityKeyFile, *authorityCertFile, *keySize); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -76,9 +76,14 @@ func main() {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var eventNIDs []types.EventNID
|
eventNIDMap := map[types.EventNID]struct{}{}
|
||||||
for _, entry := range append(removed, added...) {
|
for _, entry := range append(removed, added...) {
|
||||||
eventNIDs = append(eventNIDs, entry.EventNID)
|
eventNIDMap[entry.EventNID] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
eventNIDs := make([]types.EventNID, 0, len(eventNIDMap))
|
||||||
|
for eventNID := range eventNIDMap {
|
||||||
|
eventNIDs = append(eventNIDs, eventNID)
|
||||||
}
|
}
|
||||||
|
|
||||||
var eventEntries []types.Event
|
var eventEntries []types.Event
|
||||||
|
|
@ -129,12 +134,17 @@ func main() {
|
||||||
stateEntries = append(stateEntries, entries...)
|
stateEntries = append(stateEntries, entries...)
|
||||||
}
|
}
|
||||||
|
|
||||||
var eventNIDs []types.EventNID
|
eventNIDMap := map[types.EventNID]struct{}{}
|
||||||
for _, entry := range stateEntries {
|
for _, entry := range stateEntries {
|
||||||
eventNIDs = append(eventNIDs, entry.EventNID)
|
eventNIDMap[entry.EventNID] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("Fetching", len(eventNIDs), "state events")
|
eventNIDs := make([]types.EventNID, 0, len(eventNIDMap))
|
||||||
|
for eventNID := range eventNIDMap {
|
||||||
|
eventNIDs = append(eventNIDs, eventNID)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("Fetching", len(eventNIDMap), "state events")
|
||||||
eventEntries, err := roomserverDB.Events(ctx, eventNIDs)
|
eventEntries, err := roomserverDB.Events(ctx, eventNIDs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
|
|
|
||||||
|
|
@ -18,12 +18,17 @@ global:
|
||||||
private_key: matrix_key.pem
|
private_key: matrix_key.pem
|
||||||
|
|
||||||
# The paths and expiry timestamps (as a UNIX timestamp in millisecond precision)
|
# The paths and expiry timestamps (as a UNIX timestamp in millisecond precision)
|
||||||
# to old signing private keys that were formerly in use on this domain. These
|
# to old signing keys that were formerly in use on this domain name. These
|
||||||
# keys will not be used for federation request or event signing, but will be
|
# keys will not be used for federation request or event signing, but will be
|
||||||
# provided to any other homeserver that asks when trying to verify old events.
|
# provided to any other homeserver that asks when trying to verify old events.
|
||||||
old_private_keys:
|
old_private_keys:
|
||||||
|
# If the old private key file is available:
|
||||||
# - private_key: old_matrix_key.pem
|
# - private_key: old_matrix_key.pem
|
||||||
# expired_at: 1601024554498
|
# expired_at: 1601024554498
|
||||||
|
# If only the public key (in base64 format) and key ID are known:
|
||||||
|
# - public_key: mn59Kxfdq9VziYHSBzI7+EDPDcBS2Xl7jeUdiiQcOnM=
|
||||||
|
# key_id: ed25519:mykeyid
|
||||||
|
# expired_at: 1601024554498
|
||||||
|
|
||||||
# How long a remote server can cache our server signing key before requesting it
|
# How long a remote server can cache our server signing key before requesting it
|
||||||
# again. Increasing this number will reduce the number of requests made by other
|
# again. Increasing this number will reduce the number of requests made by other
|
||||||
|
|
@ -37,7 +42,7 @@ global:
|
||||||
# you must configure the "database" block for each component instead.
|
# you must configure the "database" block for each component instead.
|
||||||
database:
|
database:
|
||||||
connection_string: postgresql://username:password@hostname/dendrite?sslmode=disable
|
connection_string: postgresql://username:password@hostname/dendrite?sslmode=disable
|
||||||
max_open_conns: 100
|
max_open_conns: 90
|
||||||
max_idle_conns: 5
|
max_idle_conns: 5
|
||||||
conn_max_lifetime: -1
|
conn_max_lifetime: -1
|
||||||
|
|
||||||
|
|
@ -222,6 +227,13 @@ federation_api:
|
||||||
# enable this option in production as it presents a security risk!
|
# enable this option in production as it presents a security risk!
|
||||||
disable_tls_validation: false
|
disable_tls_validation: false
|
||||||
|
|
||||||
|
# Disable HTTP keepalives, which also prevents connection reuse. Dendrite will typically
|
||||||
|
# keep HTTP connections open to remote hosts for 5 minutes as they can be reused much
|
||||||
|
# more quickly than opening new connections each time. Disabling keepalives will close
|
||||||
|
# HTTP connections immediately after a successful request but may result in more CPU and
|
||||||
|
# memory being used on TLS handshakes for each new connection instead.
|
||||||
|
disable_http_keepalives: false
|
||||||
|
|
||||||
# Perspective keyservers to use as a backup when direct key fetches fail. This may
|
# Perspective keyservers to use as a backup when direct key fetches fail. This may
|
||||||
# be required to satisfy key requests for servers that are no longer online when
|
# be required to satisfy key requests for servers that are no longer online when
|
||||||
# joining some rooms.
|
# joining some rooms.
|
||||||
|
|
@ -279,6 +291,19 @@ sync_api:
|
||||||
# a reverse proxy server.
|
# a reverse proxy server.
|
||||||
# real_ip_header: X-Real-IP
|
# real_ip_header: X-Real-IP
|
||||||
|
|
||||||
|
# Configuration for the full-text search engine.
|
||||||
|
search:
|
||||||
|
# Whether or not search is enabled.
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
# The path where the search index will be created in.
|
||||||
|
index_path: "./searchindex"
|
||||||
|
|
||||||
|
# The language most likely to be used on the server - used when indexing, to
|
||||||
|
# ensure the returned results match expectations. A full list of possible languages
|
||||||
|
# can be found at https://github.com/blevesearch/bleve/tree/master/analysis/lang
|
||||||
|
language: "en"
|
||||||
|
|
||||||
# Configuration for the User API.
|
# Configuration for the User API.
|
||||||
user_api:
|
user_api:
|
||||||
# The cost when hashing passwords on registration/login. Default: 10. Min: 4, Max: 31
|
# The cost when hashing passwords on registration/login. Default: 10. Min: 4, Max: 31
|
||||||
|
|
|
||||||
|
|
@ -18,12 +18,17 @@ global:
|
||||||
private_key: matrix_key.pem
|
private_key: matrix_key.pem
|
||||||
|
|
||||||
# The paths and expiry timestamps (as a UNIX timestamp in millisecond precision)
|
# The paths and expiry timestamps (as a UNIX timestamp in millisecond precision)
|
||||||
# to old signing private keys that were formerly in use on this domain. These
|
# to old signing keys that were formerly in use on this domain name. These
|
||||||
# keys will not be used for federation request or event signing, but will be
|
# keys will not be used for federation request or event signing, but will be
|
||||||
# provided to any other homeserver that asks when trying to verify old events.
|
# provided to any other homeserver that asks when trying to verify old events.
|
||||||
old_private_keys:
|
old_private_keys:
|
||||||
|
# If the old private key file is available:
|
||||||
# - private_key: old_matrix_key.pem
|
# - private_key: old_matrix_key.pem
|
||||||
# expired_at: 1601024554498
|
# expired_at: 1601024554498
|
||||||
|
# If only the public key (in base64 format) and key ID are known:
|
||||||
|
# - public_key: mn59Kxfdq9VziYHSBzI7+EDPDcBS2Xl7jeUdiiQcOnM=
|
||||||
|
# key_id: ed25519:mykeyid
|
||||||
|
# expired_at: 1601024554498
|
||||||
|
|
||||||
# How long a remote server can cache our server signing key before requesting it
|
# How long a remote server can cache our server signing key before requesting it
|
||||||
# again. Increasing this number will reduce the number of requests made by other
|
# again. Increasing this number will reduce the number of requests made by other
|
||||||
|
|
@ -132,13 +137,6 @@ app_service_api:
|
||||||
listen: http://[::]:7777 # The listen address for incoming API requests
|
listen: http://[::]:7777 # The listen address for incoming API requests
|
||||||
connect: http://app_service_api:7777 # The connect address for other components to use
|
connect: http://app_service_api:7777 # The connect address for other components to use
|
||||||
|
|
||||||
# Database configuration for this component.
|
|
||||||
database:
|
|
||||||
connection_string: postgresql://username:password@hostname/dendrite_appservice?sslmode=disable
|
|
||||||
max_open_conns: 10
|
|
||||||
max_idle_conns: 2
|
|
||||||
conn_max_lifetime: -1
|
|
||||||
|
|
||||||
# Disable the validation of TLS certificates of appservices. This is
|
# Disable the validation of TLS certificates of appservices. This is
|
||||||
# not recommended in production since it may allow appservice traffic
|
# not recommended in production since it may allow appservice traffic
|
||||||
# to be sent to an insecure endpoint.
|
# to be sent to an insecure endpoint.
|
||||||
|
|
@ -236,6 +234,13 @@ federation_api:
|
||||||
# enable this option in production as it presents a security risk!
|
# enable this option in production as it presents a security risk!
|
||||||
disable_tls_validation: false
|
disable_tls_validation: false
|
||||||
|
|
||||||
|
# Disable HTTP keepalives, which also prevents connection reuse. Dendrite will typically
|
||||||
|
# keep HTTP connections open to remote hosts for 5 minutes as they can be reused much
|
||||||
|
# more quickly than opening new connections each time. Disabling keepalives will close
|
||||||
|
# HTTP connections immediately after a successful request but may result in more CPU and
|
||||||
|
# memory being used on TLS handshakes for each new connection instead.
|
||||||
|
disable_http_keepalives: false
|
||||||
|
|
||||||
# Perspective keyservers to use as a backup when direct key fetches fail. This may
|
# Perspective keyservers to use as a backup when direct key fetches fail. This may
|
||||||
# be required to satisfy key requests for servers that are no longer online when
|
# be required to satisfy key requests for servers that are no longer online when
|
||||||
# joining some rooms.
|
# joining some rooms.
|
||||||
|
|
@ -337,6 +342,19 @@ sync_api:
|
||||||
max_idle_conns: 2
|
max_idle_conns: 2
|
||||||
conn_max_lifetime: -1
|
conn_max_lifetime: -1
|
||||||
|
|
||||||
|
# Configuration for the full-text search engine.
|
||||||
|
search:
|
||||||
|
# Whether or not search is enabled.
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
# The path where the search index will be created in.
|
||||||
|
index_path: "./searchindex"
|
||||||
|
|
||||||
|
# The language most likely to be used on the server - used when indexing, to
|
||||||
|
# ensure the returned results match expectations. A full list of possible languages
|
||||||
|
# can be found at https://github.com/blevesearch/bleve/tree/master/analysis/lang
|
||||||
|
language: "en"
|
||||||
|
|
||||||
# This option controls which HTTP header to inspect to find the real remote IP
|
# This option controls which HTTP header to inspect to find the real remote IP
|
||||||
# address of the client. This is likely required if Dendrite is running behind
|
# address of the client. This is likely required if Dendrite is running behind
|
||||||
# a reverse proxy server.
|
# a reverse proxy server.
|
||||||
|
|
|
||||||
|
|
@ -11,8 +11,8 @@ possible to get started.
|
||||||
|
|
||||||
## Sign off
|
## Sign off
|
||||||
|
|
||||||
We ask that everyone who contributes to the project signs off their contributions
|
We require that everyone who contributes to the project signs off their contributions
|
||||||
in accordance with the [DCO](https://github.com/matrix-org/matrix-spec/blob/main/CONTRIBUTING.rst#sign-off).
|
in accordance with the [Developer Certificate of Origin](https://github.com/matrix-org/matrix-spec/blob/main/CONTRIBUTING.rst#sign-off).
|
||||||
In effect, this means adding a statement to your pull requests or commit messages
|
In effect, this means adding a statement to your pull requests or commit messages
|
||||||
along the lines of:
|
along the lines of:
|
||||||
|
|
||||||
|
|
@ -20,7 +20,18 @@ along the lines of:
|
||||||
Signed-off-by: Full Name <email address>
|
Signed-off-by: Full Name <email address>
|
||||||
```
|
```
|
||||||
|
|
||||||
Unfortunately we can't accept contributions without it.
|
Unfortunately we can't accept contributions without a sign-off.
|
||||||
|
|
||||||
|
Please note that we can only accept contributions under a legally identifiable name,
|
||||||
|
such as your name as it appears on government-issued documentation or common-law names
|
||||||
|
(claimed by legitimate usage or repute). We cannot accept sign-offs from a pseudonym or
|
||||||
|
alias and cannot accept anonymous contributions.
|
||||||
|
|
||||||
|
If you would prefer to sign off privately instead (so as to not reveal your full
|
||||||
|
name on a public pull request), you can do so by emailing a sign-off declaration
|
||||||
|
and a link to your pull request directly to the [Matrix.org Foundation](https://matrix.org/foundation/)
|
||||||
|
at `dco@matrix.org`. Once a private sign-off has been made, you will not be required
|
||||||
|
to do so for future contributions.
|
||||||
|
|
||||||
## Getting up and running
|
## Getting up and running
|
||||||
|
|
||||||
|
|
|
||||||
26
docs/FAQ.md
26
docs/FAQ.md
|
|
@ -12,7 +12,13 @@ Mostly, although there are still bugs and missing features. If you are a confide
|
||||||
|
|
||||||
## Is Dendrite feature-complete?
|
## Is Dendrite feature-complete?
|
||||||
|
|
||||||
No, although a good portion of the Matrix specification has been implemented. Mostly missing are client features - see the readme at the root of the repository for more information.
|
No, although a good portion of the Matrix specification has been implemented. Mostly missing are client features - see the [readme](../README.md) at the root of the repository for more information.
|
||||||
|
|
||||||
|
## Why doesn't Dendrite have "x" yet?
|
||||||
|
|
||||||
|
Dendrite development is currently supported by a small team of developers and due to those limited resources, the majority of the effort is focused on getting Dendrite to be
|
||||||
|
specification complete. If there are major features you're requesting (e.g. new administration endpoints), we'd like to strongly encourage you to join the community in supporting
|
||||||
|
the development efforts through [contributing](https://matrix-org.github.io/dendrite/development/contributing).
|
||||||
|
|
||||||
## Is there a migration path from Synapse to Dendrite?
|
## Is there a migration path from Synapse to Dendrite?
|
||||||
|
|
||||||
|
|
@ -43,6 +49,20 @@ It should do, although we are aware of some minor issues:
|
||||||
* **Element Android**: registration does not work, but logging in with an existing account does
|
* **Element Android**: registration does not work, but logging in with an existing account does
|
||||||
* **Hydrogen**: occasionally sync can fail due to gaps in the `since` parameter, but clearing the cache fixes this
|
* **Hydrogen**: occasionally sync can fail due to gaps in the `since` parameter, but clearing the cache fixes this
|
||||||
|
|
||||||
|
## Does Dendrite support Space Summaries?
|
||||||
|
|
||||||
|
Yes, [Space Summaries](https://github.com/matrix-org/matrix-spec-proposals/pull/2946) were merged into the Matrix Spec as of 2022-01-17 however, they are still treated as an MSC (Matrix Specification Change) in Dendrite. In order to enable Space Summaries in Dendrite, you must add the MSC to the MSC configuration section in the configuration YAML. If the MSC is not enabled, a user will typically see a perpetual loading icon on the summary page. See below for a demonstration of how to add to the Dendrite configuration:
|
||||||
|
|
||||||
|
```
|
||||||
|
mscs:
|
||||||
|
mscs:
|
||||||
|
- msc2946
|
||||||
|
```
|
||||||
|
|
||||||
|
Similarly, [msc2836](https://github.com/matrix-org/matrix-spec-proposals/pull/2836) would need to be added to mscs configuration in order to support Threading. Other MSCs are not currently supported.
|
||||||
|
|
||||||
|
Please note that MSCs should be considered experimental and can result in significant usability issues when enabled. If you'd like more details on how MSCs are ratified or the current status of MSCs, please see the [Matrix specification documentation](https://spec.matrix.org/proposals/) on the subject.
|
||||||
|
|
||||||
## Does Dendrite support push notifications?
|
## Does Dendrite support push notifications?
|
||||||
|
|
||||||
Yes, we have experimental support for push notifications. Configure them in the usual way in your Matrix client.
|
Yes, we have experimental support for push notifications. Configure them in the usual way in your Matrix client.
|
||||||
|
|
@ -86,6 +106,10 @@ would be a huge help too, as that will help us to understand where the memory us
|
||||||
|
|
||||||
You may need to revisit the connection limit of your PostgreSQL server and/or make changes to the `max_connections` lines in your Dendrite configuration. Be aware that each Dendrite component opens its own database connections and has its own connection limit, even in monolith mode!
|
You may need to revisit the connection limit of your PostgreSQL server and/or make changes to the `max_connections` lines in your Dendrite configuration. Be aware that each Dendrite component opens its own database connections and has its own connection limit, even in monolith mode!
|
||||||
|
|
||||||
|
## VOIP and Video Calls don't appear to work on Dendrite
|
||||||
|
|
||||||
|
There is likely an issue with your STUN/TURN configuration on the server. If you believe your configuration to be correct, please see the [troubleshooting](administration/5_troubleshooting.md) for troubleshooting recommendations.
|
||||||
|
|
||||||
## What is being reported when enabling phone-home statistics?
|
## What is being reported when enabling phone-home statistics?
|
||||||
|
|
||||||
Phone-home statistics contain your server's domain name, some configuration information about
|
Phone-home statistics contain your server's domain name, some configuration information about
|
||||||
|
|
|
||||||
|
|
@ -14,7 +14,7 @@ GEM
|
||||||
execjs
|
execjs
|
||||||
coffee-script-source (1.11.1)
|
coffee-script-source (1.11.1)
|
||||||
colorator (1.1.0)
|
colorator (1.1.0)
|
||||||
commonmarker (0.23.4)
|
commonmarker (0.23.6)
|
||||||
concurrent-ruby (1.1.10)
|
concurrent-ruby (1.1.10)
|
||||||
dnsruby (1.61.9)
|
dnsruby (1.61.9)
|
||||||
simpleidn (~> 0.1)
|
simpleidn (~> 0.1)
|
||||||
|
|
@ -231,9 +231,9 @@ GEM
|
||||||
jekyll-seo-tag (~> 2.1)
|
jekyll-seo-tag (~> 2.1)
|
||||||
minitest (5.15.0)
|
minitest (5.15.0)
|
||||||
multipart-post (2.1.1)
|
multipart-post (2.1.1)
|
||||||
nokogiri (1.13.6-arm64-darwin)
|
nokogiri (1.13.9-arm64-darwin)
|
||||||
racc (~> 1.4)
|
racc (~> 1.4)
|
||||||
nokogiri (1.13.6-x86_64-linux)
|
nokogiri (1.13.9-x86_64-linux)
|
||||||
racc (~> 1.4)
|
racc (~> 1.4)
|
||||||
octokit (4.22.0)
|
octokit (4.22.0)
|
||||||
faraday (>= 0.9)
|
faraday (>= 0.9)
|
||||||
|
|
|
||||||
|
|
@ -31,11 +31,11 @@ To create a new **admin account**, add the `-admin` flag:
|
||||||
./bin/create-account -config /path/to/dendrite.yaml -username USERNAME -admin
|
./bin/create-account -config /path/to/dendrite.yaml -username USERNAME -admin
|
||||||
```
|
```
|
||||||
|
|
||||||
By default `create-account` uses `https://localhost:8448` to connect to Dendrite, this can be overwritten using
|
By default `create-account` uses `http://localhost:8008` to connect to Dendrite, this can be overwritten using
|
||||||
the `-url` flag:
|
the `-url` flag:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./bin/create-account -config /path/to/dendrite.yaml -username USERNAME -url http://localhost:8008
|
./bin/create-account -config /path/to/dendrite.yaml -username USERNAME -url https://localhost:8448
|
||||||
```
|
```
|
||||||
|
|
||||||
An example of using `create-account` when running in **Docker**, having found the `CONTAINERNAME` from `docker ps`:
|
An example of using `create-account` when running in **Docker**, having found the `CONTAINERNAME` from `docker ps`:
|
||||||
|
|
@ -43,6 +43,7 @@ An example of using `create-account` when running in **Docker**, having found th
|
||||||
```bash
|
```bash
|
||||||
docker exec -it CONTAINERNAME /usr/bin/create-account -config /path/to/dendrite.yaml -username USERNAME
|
docker exec -it CONTAINERNAME /usr/bin/create-account -config /path/to/dendrite.yaml -username USERNAME
|
||||||
```
|
```
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker exec -it CONTAINERNAME /usr/bin/create-account -config /path/to/dendrite.yaml -username USERNAME -admin
|
docker exec -it CONTAINERNAME /usr/bin/create-account -config /path/to/dendrite.yaml -username USERNAME -admin
|
||||||
```
|
```
|
||||||
|
|
|
||||||
|
|
@ -13,6 +13,25 @@ without warning.
|
||||||
|
|
||||||
More endpoints will be added in the future.
|
More endpoints will be added in the future.
|
||||||
|
|
||||||
|
Endpoints may be used directly through curl:
|
||||||
|
|
||||||
|
```
|
||||||
|
curl --header "Authorization: Bearer <access_token>" -X <POST|GET|PUT> <Endpoint URI> -d '<Request Body Contents>'
|
||||||
|
```
|
||||||
|
|
||||||
|
An `access_token` can be obtained through most Element-based matrix clients by going to `Settings` -> `Help & About` -> `Advanced` -> `Access Token`.
|
||||||
|
Be aware that an `access_token` allows a client to perform actions as an user and should be kept **secret**.
|
||||||
|
|
||||||
|
The user must be an administrator in the `account_accounts` table in order to use these endpoints.
|
||||||
|
|
||||||
|
Existing user accounts can be set to administrative accounts by changing `account_type` to `3` in `account_accounts`
|
||||||
|
|
||||||
|
```
|
||||||
|
UPDATE account_accounts SET account_type = 3 WHERE localpart = '$localpart';
|
||||||
|
```
|
||||||
|
|
||||||
|
Where `$localpart` is the username only (e.g. `alice`).
|
||||||
|
|
||||||
## GET `/_dendrite/admin/evacuateRoom/{roomID}`
|
## GET `/_dendrite/admin/evacuateRoom/{roomID}`
|
||||||
|
|
||||||
This endpoint will instruct Dendrite to part all local users from the given `roomID`
|
This endpoint will instruct Dendrite to part all local users from the given `roomID`
|
||||||
|
|
@ -38,7 +57,44 @@ Request body format:
|
||||||
Reset the password of a local user. The `localpart` is the username only, i.e. if
|
Reset the password of a local user. The `localpart` is the username only, i.e. if
|
||||||
the full user ID is `@alice:domain.com` then the local part is `alice`.
|
the full user ID is `@alice:domain.com` then the local part is `alice`.
|
||||||
|
|
||||||
|
## GET `/_dendrite/admin/fulltext/reindex`
|
||||||
|
|
||||||
|
This endpoint instructs Dendrite to reindex all searchable events (`m.room.message`, `m.room.topic` and `m.room.name`). An empty JSON body will be returned immediately.
|
||||||
|
Indexing is done in the background, the server logs every 1000 events (or below) when they are being indexed. Once reindexing is done, you'll see something along the lines `Indexed 69586 events in 53.68223182s` in your debug logs.
|
||||||
|
|
||||||
|
## POST `/_dendrite/admin/refreshDevices/{userID}`
|
||||||
|
|
||||||
|
This endpoint instructs Dendrite to immediately query `/devices/{userID}` on a federated server. An empty JSON body will be returned on success, updating all locally stored user devices/keys. This can be used to possibly resolve E2EE issues, where the remote user can't decrypt messages.
|
||||||
|
|
||||||
|
|
||||||
|
## POST `/_synapse/admin/v1/send_server_notice`
|
||||||
|
|
||||||
|
Request body format:
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"user_id": "@target_user:server_name",
|
||||||
|
"content": {
|
||||||
|
"msgtype": "m.text",
|
||||||
|
"body": "This is my message"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Send a server notice to a specific user. See the [Matrix Spec](https://spec.matrix.org/v1.3/client-server-api/#server-notices) for additional details on server notice behaviour.
|
||||||
|
If successfully sent, the API will return the following response:
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"event_id": "<event_id>"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
## GET `/_synapse/admin/v1/register`
|
## GET `/_synapse/admin/v1/register`
|
||||||
|
|
||||||
Shared secret registration — please see the [user creation page](createusers) for
|
Shared secret registration — please see the [user creation page](createusers) for
|
||||||
guidance on configuring and using this endpoint.
|
guidance on configuring and using this endpoint.
|
||||||
|
|
||||||
|
## GET `/_matrix/client/v3/admin/whois/{userId}`
|
||||||
|
|
||||||
|
From the [Matrix Spec](https://spec.matrix.org/v1.3/client-server-api/#get_matrixclientv3adminwhoisuserid).
|
||||||
|
Gets information about a particular user. `userId` is the full user ID (e.g. `@alice:domain.com`)
|
||||||
|
|
|
||||||
|
|
@ -77,5 +77,12 @@ If there aren't, you will see a log lines like this:
|
||||||
level=warning msg="IMPORTANT: Process file descriptor limit is currently 65535, it is recommended to raise the limit for Dendrite to at least 65535 to avoid issues"
|
level=warning msg="IMPORTANT: Process file descriptor limit is currently 65535, it is recommended to raise the limit for Dendrite to at least 65535 to avoid issues"
|
||||||
```
|
```
|
||||||
|
|
||||||
Follow the [Optimisation](../installation/10_optimisation.md) instructions to correct the
|
Follow the [Optimisation](../installation/11_optimisation.md) instructions to correct the
|
||||||
available number of file descriptors.
|
available number of file descriptors.
|
||||||
|
|
||||||
|
## 6. STUN/TURN Server tester
|
||||||
|
|
||||||
|
If you are experiencing problems with VoIP or video calls, you should check that Dendrite
|
||||||
|
is able to successfully connect your TURN server using
|
||||||
|
[Matrix VoIP Tester](https://test.voip.librepush.net/). This can highlight any issues
|
||||||
|
that the server may encounter so that you can begin the troubleshooting process.
|
||||||
|
|
|
||||||
|
|
@ -1,66 +1,85 @@
|
||||||
# Sample Caddyfile for using Caddy in front of Dendrite.
|
# Sample Caddyfile for using Caddy in front of Dendrite
|
||||||
#
|
|
||||||
# Customize email address and domain names.
|
|
||||||
# Optional settings commented out.
|
|
||||||
#
|
|
||||||
# BE SURE YOUR DOMAINS ARE POINTED AT YOUR SERVER FIRST.
|
|
||||||
# Documentation: https://caddyserver.com/docs/
|
|
||||||
#
|
|
||||||
# Bonus tip: If your IP address changes, use Caddy's
|
|
||||||
# dynamic DNS plugin to update your DNS records to
|
|
||||||
# point to your new IP automatically:
|
|
||||||
# https://github.com/mholt/caddy-dynamicdns
|
|
||||||
#
|
#
|
||||||
|
|
||||||
|
# Customize email address and domain names
|
||||||
|
|
||||||
|
# Optional settings commented out
|
||||||
|
|
||||||
|
#
|
||||||
|
|
||||||
|
# BE SURE YOUR DOMAINS ARE POINTED AT YOUR SERVER FIRST
|
||||||
|
|
||||||
|
# Documentation: <https://caddyserver.com/docs/>
|
||||||
|
|
||||||
|
#
|
||||||
|
|
||||||
|
# Bonus tip: If your IP address changes, use Caddy's
|
||||||
|
|
||||||
|
# dynamic DNS plugin to update your DNS records to
|
||||||
|
|
||||||
|
# point to your new IP automatically
|
||||||
|
|
||||||
|
# <https://github.com/mholt/caddy-dynamicdns>
|
||||||
|
|
||||||
|
#
|
||||||
|
|
||||||
# Global options block
|
# Global options block
|
||||||
|
|
||||||
{
|
{
|
||||||
# In case there is a problem with your certificates.
|
# In case there is a problem with your certificates.
|
||||||
# email example@example.com
|
# email example@example.com
|
||||||
|
|
||||||
# Turn off the admin endpoint if you don't need graceful config
|
# Turn off the admin endpoint if you don't need graceful config
|
||||||
# changes and/or are running untrusted code on your machine.
|
# changes and/or are running untrusted code on your machine.
|
||||||
# admin off
|
# admin off
|
||||||
|
|
||||||
# Enable this if your clients don't send ServerName in TLS handshakes.
|
# Enable this if your clients don't send ServerName in TLS handshakes.
|
||||||
# default_sni example.com
|
# default_sni example.com
|
||||||
|
|
||||||
# Enable debug mode for verbose logging.
|
# Enable debug mode for verbose logging.
|
||||||
# debug
|
# debug
|
||||||
|
|
||||||
# Use Let's Encrypt's staging endpoint for testing.
|
# Use Let's Encrypt's staging endpoint for testing.
|
||||||
# acme_ca https://acme-staging-v02.api.letsencrypt.org/directory
|
# acme_ca https://acme-staging-v02.api.letsencrypt.org/directory
|
||||||
|
|
||||||
# If you're port-forwarding HTTP/HTTPS ports from 80/443 to something
|
# If you're port-forwarding HTTP/HTTPS ports from 80/443 to something
|
||||||
# else, enable these and put the alternate port numbers here.
|
# else, enable these and put the alternate port numbers here.
|
||||||
# http_port 8080
|
# http_port 8080
|
||||||
# https_port 8443
|
# https_port 8443
|
||||||
}
|
}
|
||||||
|
|
||||||
# The server name of your matrix homeserver. This example shows
|
# The server name of your matrix homeserver. This example shows
|
||||||
# "well-known delegation" from the registered domain to a subdomain,
|
|
||||||
|
# "well-known delegation" from the registered domain to a subdomain
|
||||||
|
|
||||||
# which is only needed if your server_name doesn't match your Matrix
|
# which is only needed if your server_name doesn't match your Matrix
|
||||||
|
|
||||||
# homeserver URL (i.e. you can show users a vanity domain that looks
|
# homeserver URL (i.e. you can show users a vanity domain that looks
|
||||||
|
|
||||||
# nice and is easy to remember but still have your Matrix server on
|
# nice and is easy to remember but still have your Matrix server on
|
||||||
# its own subdomain or hosted service).
|
|
||||||
|
# its own subdomain or hosted service)
|
||||||
|
|
||||||
example.com {
|
example.com {
|
||||||
header /.well-known/matrix/* Content-Type application/json
|
header /.well-known/matrix/*Content-Type application/json
|
||||||
header /.well-known/matrix/* Access-Control-Allow-Origin *
|
header /.well-known/matrix/* Access-Control-Allow-Origin *
|
||||||
respond /.well-known/matrix/server `{"m.server": "matrix.example.com:443"}`
|
respond /.well-known/matrix/server `{"m.server": "matrix.example.com:443"}`
|
||||||
respond /.well-known/matrix/client `{"m.homeserver": {"base_url": "https://matrix.example.com"}}`
|
respond /.well-known/matrix/client `{"m.homeserver": {"base_url": "https://matrix.example.com"}}`
|
||||||
}
|
}
|
||||||
|
|
||||||
# The actual domain name whereby your Matrix server is accessed.
|
# The actual domain name whereby your Matrix server is accessed
|
||||||
|
|
||||||
matrix.example.com {
|
matrix.example.com {
|
||||||
# Change the end of each reverse_proxy line to the correct
|
# Change the end of each reverse_proxy line to the correct
|
||||||
# address for your various services.
|
# address for your various services.
|
||||||
@sync_api {
|
@sync_api {
|
||||||
path_regexp /_matrix/client/.*?/(sync|user/.*?/filter/?.*|keys/changes|rooms/.*?/messages)$
|
path_regexp /_matrix/client/.*?/(sync|user/.*?/filter/?.*|keys/changes|rooms/.*?/(messages|context/.*?|relations/.*?|event/.*?))$
|
||||||
}
|
}
|
||||||
reverse_proxy @sync_api sync_api:8073
|
reverse_proxy @sync_api sync_api:8073
|
||||||
|
|
||||||
reverse_proxy /_matrix/client* client_api:8071
|
reverse_proxy /_matrix/client* client_api:8071
|
||||||
reverse_proxy /_matrix/federation* federation_api:8071
|
reverse_proxy /_matrix/federation* federation_api:8071
|
||||||
reverse_proxy /_matrix/key* federation_api:8071
|
reverse_proxy /_matrix/key* federation_api:8071
|
||||||
reverse_proxy /_matrix/media* media_api:8071
|
reverse_proxy /_matrix/media* media_api:8071
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -18,8 +18,13 @@ VirtualHost {
|
||||||
# /_matrix/client/.*/user/{userId}/filter/{filterID}
|
# /_matrix/client/.*/user/{userId}/filter/{filterID}
|
||||||
# /_matrix/client/.*/keys/changes
|
# /_matrix/client/.*/keys/changes
|
||||||
# /_matrix/client/.*/rooms/{roomId}/messages
|
# /_matrix/client/.*/rooms/{roomId}/messages
|
||||||
|
# /_matrix/client/.*/rooms/{roomId}/context/{eventID}
|
||||||
|
# /_matrix/client/.*/rooms/{roomId}/event/{eventID}
|
||||||
|
# /_matrix/client/.*/rooms/{roomId}/relations/{eventID}
|
||||||
|
# /_matrix/client/.*/rooms/{roomId}/relations/{eventID}/{relType}
|
||||||
|
# /_matrix/client/.*/rooms/{roomId}/relations/{eventID}/{relType}/{eventType}
|
||||||
# to sync_api
|
# to sync_api
|
||||||
ReverseProxy = /_matrix/client/.*?/(sync|user/.*?/filter/?.*|keys/changes|rooms/.*?/messages) http://localhost:8073 600
|
ReverseProxy = /_matrix/client/.*?/(sync|user/.*?/filter/?.*|keys/changes|rooms/.*?/(messages|context/.*?|relations/.*?|event/.*?))$ http://localhost:8073 600
|
||||||
ReverseProxy = /_matrix/client http://localhost:8071 600
|
ReverseProxy = /_matrix/client http://localhost:8071 600
|
||||||
ReverseProxy = /_matrix/federation http://localhost:8072 600
|
ReverseProxy = /_matrix/federation http://localhost:8072 600
|
||||||
ReverseProxy = /_matrix/key http://localhost:8072 600
|
ReverseProxy = /_matrix/key http://localhost:8072 600
|
||||||
|
|
|
||||||
|
|
@ -11,6 +11,41 @@ permalink: /installation/start/optimisation
|
||||||
Now that you have Dendrite running, the following tweaks will improve the reliability
|
Now that you have Dendrite running, the following tweaks will improve the reliability
|
||||||
and performance of your installation.
|
and performance of your installation.
|
||||||
|
|
||||||
|
## PostgreSQL connection limit
|
||||||
|
|
||||||
|
A PostgreSQL database engine is configured to allow only a certain number of connections.
|
||||||
|
This is typically controlled by the `max_connections` and `superuser_reserved_connections`
|
||||||
|
configuration items in `postgresql.conf`. Once these limits are violated, **PostgreSQL will
|
||||||
|
immediately stop accepting new connections** until some of the existing connections are closed.
|
||||||
|
This is a common source of misconfiguration and requires particular care.
|
||||||
|
|
||||||
|
If your PostgreSQL `max_connections` is set to `100` and `superuser_reserved_connections` is
|
||||||
|
set to `3` then you have an effective connection limit of 97 database connections. It is
|
||||||
|
therefore important to ensure that Dendrite doesn't violate that limit, otherwise database
|
||||||
|
queries will unexpectedly fail and this will cause problems both within Dendrite and for users.
|
||||||
|
|
||||||
|
If you are also running other software that uses the same PostgreSQL database engine, then you
|
||||||
|
must also take into account that some connections will be already used by your other software
|
||||||
|
and therefore will not be available to Dendrite. Check the configuration of any other software
|
||||||
|
using the same database engine for their configured connection limits and adjust your calculations
|
||||||
|
accordingly.
|
||||||
|
|
||||||
|
Dendrite has a `max_open_conns` configuration item in each `database` block to control how many
|
||||||
|
connections it will open to the database.
|
||||||
|
|
||||||
|
**If you are using the `global` database pool** then you only need to configure the
|
||||||
|
`max_open_conns` setting once in the `global` section.
|
||||||
|
|
||||||
|
**If you are defining a `database` config per component** then you will need to ensure that
|
||||||
|
the **sum total** of all configured `max_open_conns` to a given database server do not exceed
|
||||||
|
the connection limit. If you configure a total that adds up to more connections than are available
|
||||||
|
then this will cause database queries to fail.
|
||||||
|
|
||||||
|
You may wish to raise the `max_connections` limit on your PostgreSQL server to accommodate
|
||||||
|
additional connections, in which case you should also update the `max_open_conns` in your
|
||||||
|
Dendrite configuration accordingly. However be aware that this is only advisable on particularly
|
||||||
|
powerful servers that can handle the concurrent load of additional queries running at one time.
|
||||||
|
|
||||||
## File descriptor limit
|
## File descriptor limit
|
||||||
|
|
||||||
Most platforms have a limit on how many file descriptors a single process can open. All
|
Most platforms have a limit on how many file descriptors a single process can open. All
|
||||||
|
|
|
||||||
|
|
@ -87,6 +87,12 @@ and contain the following JSON document:
|
||||||
For example, this can be done with the following Caddy config:
|
For example, this can be done with the following Caddy config:
|
||||||
|
|
||||||
```
|
```
|
||||||
|
handle /.well-known/matrix/server {
|
||||||
|
header Content-Type application/json
|
||||||
|
header Access-Control-Allow-Origin *
|
||||||
|
respond `"m.server": "matrix.example.com:8448"`
|
||||||
|
}
|
||||||
|
|
||||||
handle /.well-known/matrix/client {
|
handle /.well-known/matrix/client {
|
||||||
header Content-Type application/json
|
header Content-Type application/json
|
||||||
header Access-Control-Allow-Origin *
|
header Access-Control-Allow-Origin *
|
||||||
|
|
|
||||||
|
|
@ -10,30 +10,15 @@ permalink: /installation/database
|
||||||
Dendrite uses SQL databases to store data. Depending on the database engine being used, you
|
Dendrite uses SQL databases to store data. Depending on the database engine being used, you
|
||||||
may need to perform some manual steps outlined below.
|
may need to perform some manual steps outlined below.
|
||||||
|
|
||||||
## SQLite
|
|
||||||
|
|
||||||
SQLite deployments do not require manual database creation. Simply configure the database
|
|
||||||
filenames in the Dendrite configuration file and start Dendrite. The databases will be created
|
|
||||||
and populated automatically.
|
|
||||||
|
|
||||||
Note that Dendrite **cannot share a single SQLite database across multiple components**. Each
|
|
||||||
component must be configured with its own SQLite database filename. You will have to remove
|
|
||||||
the `global.database` section from your Dendrite config and add it to each individual section
|
|
||||||
instead in order to use SQLite.
|
|
||||||
|
|
||||||
### Connection strings
|
|
||||||
|
|
||||||
Connection strings for SQLite databases take the following forms:
|
|
||||||
|
|
||||||
* Current working directory path: `file:dendrite_component.db`
|
|
||||||
* Full specified path: `file:///path/to/dendrite_component.db`
|
|
||||||
|
|
||||||
## PostgreSQL
|
## PostgreSQL
|
||||||
|
|
||||||
Dendrite can automatically populate the database with the relevant tables and indexes, but
|
Dendrite can automatically populate the database with the relevant tables and indexes, but
|
||||||
it is not capable of creating the databases themselves. You will need to create the databases
|
it is not capable of creating the databases themselves. You will need to create the databases
|
||||||
manually.
|
manually.
|
||||||
|
|
||||||
|
The databases **must** be created with UTF-8 encoding configured or you will likely run into problems
|
||||||
|
with your Dendrite deployment.
|
||||||
|
|
||||||
At this point, you can choose to either use a single database for all Dendrite components,
|
At this point, you can choose to either use a single database for all Dendrite components,
|
||||||
or you can run each component with its own separate database:
|
or you can run each component with its own separate database:
|
||||||
|
|
||||||
|
|
@ -83,7 +68,7 @@ sudo -u postgres createuser -P dendrite
|
||||||
Create the database itself, using the `dendrite` role from above:
|
Create the database itself, using the `dendrite` role from above:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo -u postgres createdb -O dendrite dendrite
|
sudo -u postgres createdb -O dendrite -E UTF-8 dendrite
|
||||||
```
|
```
|
||||||
|
|
||||||
### Multiple database creation
|
### Multiple database creation
|
||||||
|
|
@ -103,6 +88,28 @@ The following eight components require a database. In this example they will be
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
for i in appservice federationapi mediaapi mscs roomserver syncapi keyserver userapi; do
|
for i in appservice federationapi mediaapi mscs roomserver syncapi keyserver userapi; do
|
||||||
sudo -u postgres createdb -O dendrite dendrite_$i
|
sudo -u postgres createdb -O dendrite -E UTF-8 dendrite_$i
|
||||||
done
|
done
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## SQLite
|
||||||
|
|
||||||
|
**WARNING:** The Dendrite SQLite backend is slower, less reliable and not recommended for
|
||||||
|
production usage. You should use PostgreSQL instead. We may not be able to provide support if
|
||||||
|
you run into issues with your deployment while using the SQLite backend.
|
||||||
|
|
||||||
|
SQLite deployments do not require manual database creation. Simply configure the database
|
||||||
|
filenames in the Dendrite configuration file and start Dendrite. The databases will be created
|
||||||
|
and populated automatically.
|
||||||
|
|
||||||
|
Note that Dendrite **cannot share a single SQLite database across multiple components**. Each
|
||||||
|
component must be configured with its own SQLite database filename. You will have to remove
|
||||||
|
the `global.database` section from your Dendrite config and add it to each individual section
|
||||||
|
instead in order to use SQLite.
|
||||||
|
|
||||||
|
### Connection strings
|
||||||
|
|
||||||
|
Connection strings for SQLite databases take the following forms:
|
||||||
|
|
||||||
|
* Current working directory path: `file:dendrite_component.db`
|
||||||
|
* Full specified path: `file:///path/to/dendrite_component.db`
|
||||||
|
|
|
||||||
|
|
@ -138,6 +138,21 @@ room_server:
|
||||||
conn_max_lifetime: -1
|
conn_max_lifetime: -1
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Full-text search
|
||||||
|
|
||||||
|
Dendrite supports experimental full-text indexing using [Bleve](https://github.com/blevesearch/bleve). It is configured in the `sync_api` section as follows.
|
||||||
|
|
||||||
|
Depending on the language most likely to be used on the server, it might make sense to change the `language` used when indexing, to ensure the returned results match the expectations. A full list of possible languages can be found [here](https://github.com/blevesearch/bleve/tree/master/analysis/lang).
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
sync_api:
|
||||||
|
# ...
|
||||||
|
search:
|
||||||
|
enabled: false
|
||||||
|
index_path: "./searchindex"
|
||||||
|
language: "en"
|
||||||
|
```
|
||||||
|
|
||||||
## Other sections
|
## Other sections
|
||||||
|
|
||||||
There are other options which may be useful so review them all. In particular, if you are
|
There are other options which may be useful so review them all. In particular, if you are
|
||||||
|
|
|
||||||
|
|
@ -28,8 +28,13 @@ server {
|
||||||
# /_matrix/client/.*/user/{userId}/filter/{filterID}
|
# /_matrix/client/.*/user/{userId}/filter/{filterID}
|
||||||
# /_matrix/client/.*/keys/changes
|
# /_matrix/client/.*/keys/changes
|
||||||
# /_matrix/client/.*/rooms/{roomId}/messages
|
# /_matrix/client/.*/rooms/{roomId}/messages
|
||||||
|
# /_matrix/client/.*/rooms/{roomId}/context/{eventID}
|
||||||
|
# /_matrix/client/.*/rooms/{roomId}/event/{eventID}
|
||||||
|
# /_matrix/client/.*/rooms/{roomId}/relations/{eventID}
|
||||||
|
# /_matrix/client/.*/rooms/{roomId}/relations/{eventID}/{relType}
|
||||||
|
# /_matrix/client/.*/rooms/{roomId}/relations/{eventID}/{relType}/{eventType}
|
||||||
# to sync_api
|
# to sync_api
|
||||||
location ~ /_matrix/client/.*?/(sync|user/.*?/filter/?.*|keys/changes|rooms/.*?/messages)$ {
|
location ~ /_matrix/client/.*?/(sync|user/.*?/filter/?.*|keys/changes|rooms/.*?/(messages|context/.*?|relations/.*?|event/.*?))$ {
|
||||||
proxy_pass http://sync_api:8073;
|
proxy_pass http://sync_api:8073;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -5,9 +5,10 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/federationapi/types"
|
|
||||||
"github.com/matrix-org/gomatrix"
|
"github.com/matrix-org/gomatrix"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
|
||||||
|
"github.com/matrix-org/dendrite/federationapi/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FederationInternalAPI is used to query information from the federation sender.
|
// FederationInternalAPI is used to query information from the federation sender.
|
||||||
|
|
@ -108,6 +109,7 @@ type FederationClientError struct {
|
||||||
Err string
|
Err string
|
||||||
RetryAfter time.Duration
|
RetryAfter time.Duration
|
||||||
Blacklisted bool
|
Blacklisted bool
|
||||||
|
Code int // HTTP Status code from the remote server
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e FederationClientError) Error() string {
|
func (e FederationClientError) Error() string {
|
||||||
|
|
@ -157,6 +159,7 @@ type PerformJoinRequest struct {
|
||||||
// The sorted list of servers to try. Servers will be tried sequentially, after de-duplication.
|
// The sorted list of servers to try. Servers will be tried sequentially, after de-duplication.
|
||||||
ServerNames types.ServerNames `json:"server_names"`
|
ServerNames types.ServerNames `json:"server_names"`
|
||||||
Content map[string]interface{} `json:"content"`
|
Content map[string]interface{} `json:"content"`
|
||||||
|
Unsigned map[string]interface{} `json:"unsigned"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type PerformJoinResponse struct {
|
type PerformJoinResponse struct {
|
||||||
|
|
|
||||||
|
|
@ -18,6 +18,11 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
|
||||||
|
"github.com/getsentry/sentry-go"
|
||||||
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
"github.com/nats-io/nats.go"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/federationapi/queue"
|
"github.com/matrix-org/dendrite/federationapi/queue"
|
||||||
"github.com/matrix-org/dendrite/federationapi/storage"
|
"github.com/matrix-org/dendrite/federationapi/storage"
|
||||||
"github.com/matrix-org/dendrite/federationapi/types"
|
"github.com/matrix-org/dendrite/federationapi/types"
|
||||||
|
|
@ -26,9 +31,6 @@ import (
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
"github.com/matrix-org/dendrite/setup/config"
|
||||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||||
"github.com/matrix-org/dendrite/setup/process"
|
"github.com/matrix-org/dendrite/setup/process"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
|
||||||
"github.com/nats-io/nats.go"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// KeyChangeConsumer consumes events that originate in key server.
|
// KeyChangeConsumer consumes events that originate in key server.
|
||||||
|
|
@ -67,16 +69,18 @@ func NewKeyChangeConsumer(
|
||||||
// Start consuming from key servers
|
// Start consuming from key servers
|
||||||
func (t *KeyChangeConsumer) Start() error {
|
func (t *KeyChangeConsumer) Start() error {
|
||||||
return jetstream.JetStreamConsumer(
|
return jetstream.JetStreamConsumer(
|
||||||
t.ctx, t.jetstream, t.topic, t.durable, t.onMessage,
|
t.ctx, t.jetstream, t.topic, t.durable, 1,
|
||||||
nats.DeliverAll(), nats.ManualAck(),
|
t.onMessage, nats.DeliverAll(), nats.ManualAck(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// onMessage is called in response to a message received on the
|
// onMessage is called in response to a message received on the
|
||||||
// key change events topic from the key server.
|
// key change events topic from the key server.
|
||||||
func (t *KeyChangeConsumer) onMessage(ctx context.Context, msg *nats.Msg) bool {
|
func (t *KeyChangeConsumer) onMessage(ctx context.Context, msgs []*nats.Msg) bool {
|
||||||
|
msg := msgs[0] // Guaranteed to exist if onMessage is called
|
||||||
var m api.DeviceMessage
|
var m api.DeviceMessage
|
||||||
if err := json.Unmarshal(msg.Data, &m); err != nil {
|
if err := json.Unmarshal(msg.Data, &m); err != nil {
|
||||||
|
sentry.CaptureException(err)
|
||||||
logrus.WithError(err).Errorf("failed to read device message from key change topic")
|
logrus.WithError(err).Errorf("failed to read device message from key change topic")
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
@ -104,6 +108,7 @@ func (t *KeyChangeConsumer) onDeviceKeyMessage(m api.DeviceMessage) bool {
|
||||||
// only send key change events which originated from us
|
// only send key change events which originated from us
|
||||||
_, originServerName, err := gomatrixserverlib.SplitID('@', m.UserID)
|
_, originServerName, err := gomatrixserverlib.SplitID('@', m.UserID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
sentry.CaptureException(err)
|
||||||
logger.WithError(err).Error("Failed to extract domain from key change event")
|
logger.WithError(err).Error("Failed to extract domain from key change event")
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
@ -117,6 +122,7 @@ func (t *KeyChangeConsumer) onDeviceKeyMessage(m api.DeviceMessage) bool {
|
||||||
WantMembership: "join",
|
WantMembership: "join",
|
||||||
}, &queryRes)
|
}, &queryRes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
sentry.CaptureException(err)
|
||||||
logger.WithError(err).Error("failed to calculate joined rooms for user")
|
logger.WithError(err).Error("failed to calculate joined rooms for user")
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
@ -124,6 +130,7 @@ func (t *KeyChangeConsumer) onDeviceKeyMessage(m api.DeviceMessage) bool {
|
||||||
// send this key change to all servers who share rooms with this user.
|
// send this key change to all servers who share rooms with this user.
|
||||||
destinations, err := t.db.GetJoinedHostsForRooms(t.ctx, queryRes.RoomIDs, true)
|
destinations, err := t.db.GetJoinedHostsForRooms(t.ctx, queryRes.RoomIDs, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
sentry.CaptureException(err)
|
||||||
logger.WithError(err).Error("failed to calculate joined hosts for rooms user is in")
|
logger.WithError(err).Error("failed to calculate joined hosts for rooms user is in")
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
@ -146,6 +153,7 @@ func (t *KeyChangeConsumer) onDeviceKeyMessage(m api.DeviceMessage) bool {
|
||||||
Keys: m.KeyJSON,
|
Keys: m.KeyJSON,
|
||||||
}
|
}
|
||||||
if edu.Content, err = json.Marshal(event); err != nil {
|
if edu.Content, err = json.Marshal(event); err != nil {
|
||||||
|
sentry.CaptureException(err)
|
||||||
logger.WithError(err).Error("failed to marshal EDU JSON")
|
logger.WithError(err).Error("failed to marshal EDU JSON")
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
@ -159,6 +167,7 @@ func (t *KeyChangeConsumer) onCrossSigningMessage(m api.DeviceMessage) bool {
|
||||||
output := m.CrossSigningKeyUpdate
|
output := m.CrossSigningKeyUpdate
|
||||||
_, host, err := gomatrixserverlib.SplitID('@', output.UserID)
|
_, host, err := gomatrixserverlib.SplitID('@', output.UserID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
sentry.CaptureException(err)
|
||||||
logrus.WithError(err).Errorf("fedsender key change consumer: user ID parse failure")
|
logrus.WithError(err).Errorf("fedsender key change consumer: user ID parse failure")
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
@ -175,12 +184,14 @@ func (t *KeyChangeConsumer) onCrossSigningMessage(m api.DeviceMessage) bool {
|
||||||
WantMembership: "join",
|
WantMembership: "join",
|
||||||
}, &queryRes)
|
}, &queryRes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
sentry.CaptureException(err)
|
||||||
logger.WithError(err).Error("fedsender key change consumer: failed to calculate joined rooms for user")
|
logger.WithError(err).Error("fedsender key change consumer: failed to calculate joined rooms for user")
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// send this key change to all servers who share rooms with this user.
|
// send this key change to all servers who share rooms with this user.
|
||||||
destinations, err := t.db.GetJoinedHostsForRooms(t.ctx, queryRes.RoomIDs, true)
|
destinations, err := t.db.GetJoinedHostsForRooms(t.ctx, queryRes.RoomIDs, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
sentry.CaptureException(err)
|
||||||
logger.WithError(err).Error("fedsender key change consumer: failed to calculate joined hosts for rooms user is in")
|
logger.WithError(err).Error("fedsender key change consumer: failed to calculate joined hosts for rooms user is in")
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
@ -195,6 +206,7 @@ func (t *KeyChangeConsumer) onCrossSigningMessage(m api.DeviceMessage) bool {
|
||||||
Origin: string(t.serverName),
|
Origin: string(t.serverName),
|
||||||
}
|
}
|
||||||
if edu.Content, err = json.Marshal(output); err != nil {
|
if edu.Content, err = json.Marshal(output); err != nil {
|
||||||
|
sentry.CaptureException(err)
|
||||||
logger.WithError(err).Error("fedsender key change consumer: failed to marshal output, dropping")
|
logger.WithError(err).Error("fedsender key change consumer: failed to marshal output, dropping")
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -69,14 +69,15 @@ func (t *OutputPresenceConsumer) Start() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return jetstream.JetStreamConsumer(
|
return jetstream.JetStreamConsumer(
|
||||||
t.ctx, t.jetstream, t.topic, t.durable, t.onMessage,
|
t.ctx, t.jetstream, t.topic, t.durable, 1, t.onMessage,
|
||||||
nats.DeliverAll(), nats.ManualAck(), nats.HeadersOnly(),
|
nats.DeliverAll(), nats.ManualAck(), nats.HeadersOnly(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// onMessage is called in response to a message received on the presence
|
// onMessage is called in response to a message received on the presence
|
||||||
// events topic from the client api.
|
// events topic from the client api.
|
||||||
func (t *OutputPresenceConsumer) onMessage(ctx context.Context, msg *nats.Msg) bool {
|
func (t *OutputPresenceConsumer) onMessage(ctx context.Context, msgs []*nats.Msg) bool {
|
||||||
|
msg := msgs[0] // Guaranteed to exist if onMessage is called
|
||||||
// only send presence events which originated from us
|
// only send presence events which originated from us
|
||||||
userID := msg.Header.Get(jetstream.UserID)
|
userID := msg.Header.Get(jetstream.UserID)
|
||||||
_, serverName, err := gomatrixserverlib.SplitID('@', userID)
|
_, serverName, err := gomatrixserverlib.SplitID('@', userID)
|
||||||
|
|
|
||||||
|
|
@ -65,14 +65,15 @@ func NewOutputReceiptConsumer(
|
||||||
// Start consuming from the clientapi
|
// Start consuming from the clientapi
|
||||||
func (t *OutputReceiptConsumer) Start() error {
|
func (t *OutputReceiptConsumer) Start() error {
|
||||||
return jetstream.JetStreamConsumer(
|
return jetstream.JetStreamConsumer(
|
||||||
t.ctx, t.jetstream, t.topic, t.durable, t.onMessage,
|
t.ctx, t.jetstream, t.topic, t.durable, 1, t.onMessage,
|
||||||
nats.DeliverAll(), nats.ManualAck(), nats.HeadersOnly(),
|
nats.DeliverAll(), nats.ManualAck(), nats.HeadersOnly(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// onMessage is called in response to a message received on the receipt
|
// onMessage is called in response to a message received on the receipt
|
||||||
// events topic from the client api.
|
// events topic from the client api.
|
||||||
func (t *OutputReceiptConsumer) onMessage(ctx context.Context, msg *nats.Msg) bool {
|
func (t *OutputReceiptConsumer) onMessage(ctx context.Context, msgs []*nats.Msg) bool {
|
||||||
|
msg := msgs[0] // Guaranteed to exist if onMessage is called
|
||||||
receipt := syncTypes.OutputReceiptEvent{
|
receipt := syncTypes.OutputReceiptEvent{
|
||||||
UserID: msg.Header.Get(jetstream.UserID),
|
UserID: msg.Header.Get(jetstream.UserID),
|
||||||
RoomID: msg.Header.Get(jetstream.RoomID),
|
RoomID: msg.Header.Get(jetstream.RoomID),
|
||||||
|
|
@ -80,6 +81,14 @@ func (t *OutputReceiptConsumer) onMessage(ctx context.Context, msg *nats.Msg) bo
|
||||||
Type: msg.Header.Get("type"),
|
Type: msg.Header.Get("type"),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
switch receipt.Type {
|
||||||
|
case "m.read":
|
||||||
|
// These are allowed to be sent over federation
|
||||||
|
case "m.read.private", "m.fully_read":
|
||||||
|
// These must not be sent over federation
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// only send receipt events which originated from us
|
// only send receipt events which originated from us
|
||||||
_, receiptServerName, err := gomatrixserverlib.SplitID('@', receipt.UserID)
|
_, receiptServerName, err := gomatrixserverlib.SplitID('@', receipt.UserID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
||||||
|
|
@ -68,8 +68,8 @@ func NewOutputRoomEventConsumer(
|
||||||
// Start consuming from room servers
|
// Start consuming from room servers
|
||||||
func (s *OutputRoomEventConsumer) Start() error {
|
func (s *OutputRoomEventConsumer) Start() error {
|
||||||
return jetstream.JetStreamConsumer(
|
return jetstream.JetStreamConsumer(
|
||||||
s.ctx, s.jetstream, s.topic, s.durable, s.onMessage,
|
s.ctx, s.jetstream, s.topic, s.durable, 1,
|
||||||
nats.DeliverAll(), nats.ManualAck(),
|
s.onMessage, nats.DeliverAll(), nats.ManualAck(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -77,7 +77,15 @@ func (s *OutputRoomEventConsumer) Start() error {
|
||||||
// It is unsafe to call this with messages for the same room in multiple gorountines
|
// It is unsafe to call this with messages for the same room in multiple gorountines
|
||||||
// because updates it will likely fail with a types.EventIDMismatchError when it
|
// because updates it will likely fail with a types.EventIDMismatchError when it
|
||||||
// realises that it cannot update the room state using the deltas.
|
// realises that it cannot update the room state using the deltas.
|
||||||
func (s *OutputRoomEventConsumer) onMessage(ctx context.Context, msg *nats.Msg) bool {
|
func (s *OutputRoomEventConsumer) onMessage(ctx context.Context, msgs []*nats.Msg) bool {
|
||||||
|
msg := msgs[0] // Guaranteed to exist if onMessage is called
|
||||||
|
receivedType := api.OutputType(msg.Header.Get(jetstream.RoomEventType))
|
||||||
|
|
||||||
|
// Only handle events we care about
|
||||||
|
if receivedType != api.OutputTypeNewRoomEvent && receivedType != api.OutputTypeNewInboundPeek {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// Parse out the event JSON
|
// Parse out the event JSON
|
||||||
var output api.OutputEvent
|
var output api.OutputEvent
|
||||||
if err := json.Unmarshal(msg.Data, &output); err != nil {
|
if err := json.Unmarshal(msg.Data, &output); err != nil {
|
||||||
|
|
|
||||||
|
|
@ -18,16 +18,18 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
|
||||||
|
"github.com/getsentry/sentry-go"
|
||||||
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
"github.com/matrix-org/util"
|
||||||
|
"github.com/nats-io/nats.go"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/federationapi/queue"
|
"github.com/matrix-org/dendrite/federationapi/queue"
|
||||||
"github.com/matrix-org/dendrite/federationapi/storage"
|
"github.com/matrix-org/dendrite/federationapi/storage"
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
"github.com/matrix-org/dendrite/setup/config"
|
||||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||||
"github.com/matrix-org/dendrite/setup/process"
|
"github.com/matrix-org/dendrite/setup/process"
|
||||||
syncTypes "github.com/matrix-org/dendrite/syncapi/types"
|
syncTypes "github.com/matrix-org/dendrite/syncapi/types"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
|
||||||
"github.com/matrix-org/util"
|
|
||||||
"github.com/nats-io/nats.go"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// OutputSendToDeviceConsumer consumes events that originate in the clientapi.
|
// OutputSendToDeviceConsumer consumes events that originate in the clientapi.
|
||||||
|
|
@ -63,34 +65,37 @@ func NewOutputSendToDeviceConsumer(
|
||||||
// Start consuming from the client api
|
// Start consuming from the client api
|
||||||
func (t *OutputSendToDeviceConsumer) Start() error {
|
func (t *OutputSendToDeviceConsumer) Start() error {
|
||||||
return jetstream.JetStreamConsumer(
|
return jetstream.JetStreamConsumer(
|
||||||
t.ctx, t.jetstream, t.topic, t.durable, t.onMessage,
|
t.ctx, t.jetstream, t.topic, t.durable, 1,
|
||||||
nats.DeliverAll(), nats.ManualAck(),
|
t.onMessage, nats.DeliverAll(), nats.ManualAck(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// onMessage is called in response to a message received on the
|
// onMessage is called in response to a message received on the
|
||||||
// send-to-device events topic from the client api.
|
// send-to-device events topic from the client api.
|
||||||
func (t *OutputSendToDeviceConsumer) onMessage(ctx context.Context, msg *nats.Msg) bool {
|
func (t *OutputSendToDeviceConsumer) onMessage(ctx context.Context, msgs []*nats.Msg) bool {
|
||||||
|
msg := msgs[0] // Guaranteed to exist if onMessage is called
|
||||||
// only send send-to-device events which originated from us
|
// only send send-to-device events which originated from us
|
||||||
sender := msg.Header.Get("sender")
|
sender := msg.Header.Get("sender")
|
||||||
_, originServerName, err := gomatrixserverlib.SplitID('@', sender)
|
_, originServerName, err := gomatrixserverlib.SplitID('@', sender)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
sentry.CaptureException(err)
|
||||||
log.WithError(err).WithField("user_id", sender).Error("Failed to extract domain from send-to-device sender")
|
log.WithError(err).WithField("user_id", sender).Error("Failed to extract domain from send-to-device sender")
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if originServerName != t.ServerName {
|
if originServerName != t.ServerName {
|
||||||
log.WithField("other_server", originServerName).Info("Suppressing send-to-device: originated elsewhere")
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// Extract the send-to-device event from msg.
|
// Extract the send-to-device event from msg.
|
||||||
var ote syncTypes.OutputSendToDeviceEvent
|
var ote syncTypes.OutputSendToDeviceEvent
|
||||||
if err = json.Unmarshal(msg.Data, &ote); err != nil {
|
if err = json.Unmarshal(msg.Data, &ote); err != nil {
|
||||||
|
sentry.CaptureException(err)
|
||||||
log.WithError(err).Errorf("output log: message parse failed (expected send-to-device)")
|
log.WithError(err).Errorf("output log: message parse failed (expected send-to-device)")
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
_, destServerName, err := gomatrixserverlib.SplitID('@', ote.UserID)
|
_, destServerName, err := gomatrixserverlib.SplitID('@', ote.UserID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
sentry.CaptureException(err)
|
||||||
log.WithError(err).WithField("user_id", ote.UserID).Error("Failed to extract domain from send-to-device destination")
|
log.WithError(err).WithField("user_id", ote.UserID).Error("Failed to extract domain from send-to-device destination")
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
@ -116,6 +121,7 @@ func (t *OutputSendToDeviceConsumer) onMessage(ctx context.Context, msg *nats.Ms
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if edu.Content, err = json.Marshal(tdm); err != nil {
|
if edu.Content, err = json.Marshal(tdm); err != nil {
|
||||||
|
sentry.CaptureException(err)
|
||||||
log.WithError(err).Error("failed to marshal EDU JSON")
|
log.WithError(err).Error("failed to marshal EDU JSON")
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -62,14 +62,15 @@ func NewOutputTypingConsumer(
|
||||||
// Start consuming from the clientapi
|
// Start consuming from the clientapi
|
||||||
func (t *OutputTypingConsumer) Start() error {
|
func (t *OutputTypingConsumer) Start() error {
|
||||||
return jetstream.JetStreamConsumer(
|
return jetstream.JetStreamConsumer(
|
||||||
t.ctx, t.jetstream, t.topic, t.durable, t.onMessage,
|
t.ctx, t.jetstream, t.topic, t.durable, 1, t.onMessage,
|
||||||
nats.DeliverAll(), nats.ManualAck(), nats.HeadersOnly(),
|
nats.DeliverAll(), nats.ManualAck(), nats.HeadersOnly(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// onMessage is called in response to a message received on the typing
|
// onMessage is called in response to a message received on the typing
|
||||||
// events topic from the client api.
|
// events topic from the client api.
|
||||||
func (t *OutputTypingConsumer) onMessage(ctx context.Context, msg *nats.Msg) bool {
|
func (t *OutputTypingConsumer) onMessage(ctx context.Context, msgs []*nats.Msg) bool {
|
||||||
|
msg := msgs[0] // Guaranteed to exist if onMessage is called
|
||||||
// Extract the typing event from msg.
|
// Extract the typing event from msg.
|
||||||
roomID := msg.Header.Get(jetstream.RoomID)
|
roomID := msg.Header.Get(jetstream.RoomID)
|
||||||
userID := msg.Header.Get(jetstream.UserID)
|
userID := msg.Header.Get(jetstream.UserID)
|
||||||
|
|
|
||||||
|
|
@ -18,6 +18,8 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/federationapi/api"
|
"github.com/matrix-org/dendrite/federationapi/api"
|
||||||
federationAPI "github.com/matrix-org/dendrite/federationapi/api"
|
federationAPI "github.com/matrix-org/dendrite/federationapi/api"
|
||||||
"github.com/matrix-org/dendrite/federationapi/consumers"
|
"github.com/matrix-org/dendrite/federationapi/consumers"
|
||||||
|
|
@ -33,10 +35,10 @@ import (
|
||||||
"github.com/matrix-org/dendrite/setup/base"
|
"github.com/matrix-org/dendrite/setup/base"
|
||||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/federationapi/routing"
|
"github.com/matrix-org/dendrite/federationapi/routing"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// AddInternalRoutes registers HTTP handlers for the internal API. Invokes functions
|
// AddInternalRoutes registers HTTP handlers for the internal API. Invokes functions
|
||||||
|
|
@ -66,6 +68,7 @@ func AddPublicRoutes(
|
||||||
TopicTypingEvent: cfg.Matrix.JetStream.Prefixed(jetstream.OutputTypingEvent),
|
TopicTypingEvent: cfg.Matrix.JetStream.Prefixed(jetstream.OutputTypingEvent),
|
||||||
TopicPresenceEvent: cfg.Matrix.JetStream.Prefixed(jetstream.OutputPresenceEvent),
|
TopicPresenceEvent: cfg.Matrix.JetStream.Prefixed(jetstream.OutputPresenceEvent),
|
||||||
TopicDeviceListUpdate: cfg.Matrix.JetStream.Prefixed(jetstream.InputDeviceListUpdate),
|
TopicDeviceListUpdate: cfg.Matrix.JetStream.Prefixed(jetstream.InputDeviceListUpdate),
|
||||||
|
TopicSigningKeyUpdate: cfg.Matrix.JetStream.Prefixed(jetstream.InputSigningKeyUpdate),
|
||||||
ServerName: cfg.Matrix.ServerName,
|
ServerName: cfg.Matrix.ServerName,
|
||||||
UserAPI: userAPI,
|
UserAPI: userAPI,
|
||||||
}
|
}
|
||||||
|
|
@ -113,17 +116,14 @@ func NewInternalAPI(
|
||||||
_ = federationDB.RemoveAllServersFromBlacklist()
|
_ = federationDB.RemoveAllServersFromBlacklist()
|
||||||
}
|
}
|
||||||
|
|
||||||
stats := &statistics.Statistics{
|
stats := statistics.NewStatistics(federationDB, cfg.FederationMaxRetries+1)
|
||||||
DB: federationDB,
|
|
||||||
FailuresUntilBlacklist: cfg.FederationMaxRetries,
|
|
||||||
}
|
|
||||||
|
|
||||||
js, _ := base.NATS.Prepare(base.ProcessContext, &cfg.Matrix.JetStream)
|
js, _ := base.NATS.Prepare(base.ProcessContext, &cfg.Matrix.JetStream)
|
||||||
|
|
||||||
queues := queue.NewOutgoingQueues(
|
queues := queue.NewOutgoingQueues(
|
||||||
federationDB, base.ProcessContext,
|
federationDB, base.ProcessContext,
|
||||||
cfg.Matrix.DisableFederation,
|
cfg.Matrix.DisableFederation,
|
||||||
cfg.Matrix.ServerName, federation, rsAPI, stats,
|
cfg.Matrix.ServerName, federation, rsAPI, &stats,
|
||||||
&queue.SigningInfo{
|
&queue.SigningInfo{
|
||||||
KeyID: cfg.Matrix.KeyID,
|
KeyID: cfg.Matrix.KeyID,
|
||||||
PrivateKey: cfg.Matrix.PrivateKey,
|
PrivateKey: cfg.Matrix.PrivateKey,
|
||||||
|
|
@ -180,5 +180,5 @@ func NewInternalAPI(
|
||||||
}
|
}
|
||||||
time.AfterFunc(time.Minute, cleanExpiredEDUs)
|
time.AfterFunc(time.Minute, cleanExpiredEDUs)
|
||||||
|
|
||||||
return internal.NewFederationInternalAPI(federationDB, cfg, rsAPI, federation, stats, caches, queues, keyRing)
|
return internal.NewFederationInternalAPI(federationDB, cfg, rsAPI, federation, &stats, caches, queues, keyRing)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -12,12 +12,13 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/federationapi/api"
|
"github.com/matrix-org/dendrite/federationapi/api"
|
||||||
"github.com/matrix-org/dendrite/federationapi/routing"
|
"github.com/matrix-org/dendrite/federationapi/routing"
|
||||||
"github.com/matrix-org/dendrite/internal/caching"
|
"github.com/matrix-org/dendrite/internal/caching"
|
||||||
"github.com/matrix-org/dendrite/setup/base"
|
"github.com/matrix-org/dendrite/setup/base"
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
"github.com/matrix-org/dendrite/setup/config"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type server struct {
|
type server struct {
|
||||||
|
|
@ -75,7 +76,10 @@ func TestMain(m *testing.M) {
|
||||||
// Draw up just enough Dendrite config for the server key
|
// Draw up just enough Dendrite config for the server key
|
||||||
// API to work.
|
// API to work.
|
||||||
cfg := &config.Dendrite{}
|
cfg := &config.Dendrite{}
|
||||||
cfg.Defaults(true)
|
cfg.Defaults(config.DefaultOpts{
|
||||||
|
Generate: true,
|
||||||
|
Monolithic: true,
|
||||||
|
})
|
||||||
cfg.Global.ServerName = gomatrixserverlib.ServerName(s.name)
|
cfg.Global.ServerName = gomatrixserverlib.ServerName(s.name)
|
||||||
cfg.Global.PrivateKey = testPriv
|
cfg.Global.PrivateKey = testPriv
|
||||||
cfg.Global.JetStream.InMemory = true
|
cfg.Global.JetStream.InMemory = true
|
||||||
|
|
@ -83,7 +87,12 @@ func TestMain(m *testing.M) {
|
||||||
cfg.Global.JetStream.StoragePath = config.Path(d)
|
cfg.Global.JetStream.StoragePath = config.Path(d)
|
||||||
cfg.Global.KeyID = serverKeyID
|
cfg.Global.KeyID = serverKeyID
|
||||||
cfg.Global.KeyValidityPeriod = s.validity
|
cfg.Global.KeyValidityPeriod = s.validity
|
||||||
cfg.FederationAPI.Database.ConnectionString = config.DataSource("file::memory:")
|
f, err := os.CreateTemp(d, "federation_keys_test*.db")
|
||||||
|
if err != nil {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
cfg.FederationAPI.Database.ConnectionString = config.DataSource("file:" + f.Name())
|
||||||
s.config = &cfg.FederationAPI
|
s.config = &cfg.FederationAPI
|
||||||
|
|
||||||
// Create a transport which redirects federation requests to
|
// Create a transport which redirects federation requests to
|
||||||
|
|
|
||||||
|
|
@ -10,6 +10,10 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/matrix-org/gomatrix"
|
||||||
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
"github.com/nats-io/nats.go"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/federationapi"
|
"github.com/matrix-org/dendrite/federationapi"
|
||||||
"github.com/matrix-org/dendrite/federationapi/api"
|
"github.com/matrix-org/dendrite/federationapi/api"
|
||||||
"github.com/matrix-org/dendrite/federationapi/internal"
|
"github.com/matrix-org/dendrite/federationapi/internal"
|
||||||
|
|
@ -20,9 +24,6 @@ import (
|
||||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||||
"github.com/matrix-org/dendrite/test"
|
"github.com/matrix-org/dendrite/test"
|
||||||
"github.com/matrix-org/dendrite/test/testrig"
|
"github.com/matrix-org/dendrite/test/testrig"
|
||||||
"github.com/matrix-org/gomatrix"
|
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
|
||||||
"github.com/nats-io/nats.go"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type fedRoomserverAPI struct {
|
type fedRoomserverAPI struct {
|
||||||
|
|
@ -263,12 +264,14 @@ func testFederationAPIJoinThenKeyUpdate(t *testing.T, dbType test.DBType) {
|
||||||
func TestRoomsV3URLEscapeDoNot404(t *testing.T) {
|
func TestRoomsV3URLEscapeDoNot404(t *testing.T) {
|
||||||
_, privKey, _ := ed25519.GenerateKey(nil)
|
_, privKey, _ := ed25519.GenerateKey(nil)
|
||||||
cfg := &config.Dendrite{}
|
cfg := &config.Dendrite{}
|
||||||
cfg.Defaults(true)
|
cfg.Defaults(config.DefaultOpts{
|
||||||
|
Generate: true,
|
||||||
|
Monolithic: true,
|
||||||
|
})
|
||||||
cfg.Global.KeyID = gomatrixserverlib.KeyID("ed25519:auto")
|
cfg.Global.KeyID = gomatrixserverlib.KeyID("ed25519:auto")
|
||||||
cfg.Global.ServerName = gomatrixserverlib.ServerName("localhost")
|
cfg.Global.ServerName = gomatrixserverlib.ServerName("localhost")
|
||||||
cfg.Global.PrivateKey = privKey
|
cfg.Global.PrivateKey = privKey
|
||||||
cfg.Global.JetStream.InMemory = true
|
cfg.Global.JetStream.InMemory = true
|
||||||
cfg.FederationAPI.Database.ConnectionString = config.DataSource("file::memory:")
|
|
||||||
base := base.NewBaseDendrite(cfg, "Monolith")
|
base := base.NewBaseDendrite(cfg, "Monolith")
|
||||||
keyRing := &test.NopJSONVerifier{}
|
keyRing := &test.NopJSONVerifier{}
|
||||||
// TODO: This is pretty fragile, as if anything calls anything on these nils this test will break.
|
// TODO: This is pretty fragile, as if anything calls anything on these nils this test will break.
|
||||||
|
|
|
||||||
|
|
@ -7,14 +7,15 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/federationapi/api"
|
|
||||||
"github.com/matrix-org/dendrite/federationapi/consumers"
|
|
||||||
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
|
||||||
"github.com/matrix-org/dendrite/roomserver/version"
|
|
||||||
"github.com/matrix-org/gomatrix"
|
"github.com/matrix-org/gomatrix"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
"github.com/matrix-org/util"
|
"github.com/matrix-org/util"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
"github.com/matrix-org/dendrite/federationapi/api"
|
||||||
|
"github.com/matrix-org/dendrite/federationapi/consumers"
|
||||||
|
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
||||||
|
"github.com/matrix-org/dendrite/roomserver/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PerformLeaveRequest implements api.FederationInternalAPI
|
// PerformLeaveRequest implements api.FederationInternalAPI
|
||||||
|
|
@ -95,6 +96,7 @@ func (r *FederationInternalAPI) PerformJoin(
|
||||||
request.Content,
|
request.Content,
|
||||||
serverName,
|
serverName,
|
||||||
supportedVersions,
|
supportedVersions,
|
||||||
|
request.Unsigned,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
logrus.WithError(err).WithFields(logrus.Fields{
|
logrus.WithError(err).WithFields(logrus.Fields{
|
||||||
"server_name": serverName,
|
"server_name": serverName,
|
||||||
|
|
@ -139,6 +141,7 @@ func (r *FederationInternalAPI) performJoinUsingServer(
|
||||||
content map[string]interface{},
|
content map[string]interface{},
|
||||||
serverName gomatrixserverlib.ServerName,
|
serverName gomatrixserverlib.ServerName,
|
||||||
supportedVersions []gomatrixserverlib.RoomVersion,
|
supportedVersions []gomatrixserverlib.RoomVersion,
|
||||||
|
unsigned map[string]interface{},
|
||||||
) error {
|
) error {
|
||||||
// Try to perform a make_join using the information supplied in the
|
// Try to perform a make_join using the information supplied in the
|
||||||
// request.
|
// request.
|
||||||
|
|
@ -217,7 +220,7 @@ func (r *FederationInternalAPI) performJoinUsingServer(
|
||||||
var remoteEvent *gomatrixserverlib.Event
|
var remoteEvent *gomatrixserverlib.Event
|
||||||
remoteEvent, err = respSendJoin.Event.UntrustedEvent(respMakeJoin.RoomVersion)
|
remoteEvent, err = respSendJoin.Event.UntrustedEvent(respMakeJoin.RoomVersion)
|
||||||
if err == nil && isWellFormedMembershipEvent(
|
if err == nil && isWellFormedMembershipEvent(
|
||||||
remoteEvent, roomID, userID, r.cfg.Matrix.ServerName,
|
remoteEvent, roomID, userID,
|
||||||
) {
|
) {
|
||||||
event = remoteEvent
|
event = remoteEvent
|
||||||
}
|
}
|
||||||
|
|
@ -259,7 +262,7 @@ func (r *FederationInternalAPI) performJoinUsingServer(
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("JoinedHostsFromEvents: failed to get joined hosts: %s", err)
|
return fmt.Errorf("JoinedHostsFromEvents: failed to get joined hosts: %s", err)
|
||||||
}
|
}
|
||||||
logrus.WithField("hosts", joinedHosts).WithField("room", roomID).Info("Joined federated room with hosts")
|
logrus.WithField("room", roomID).Infof("Joined federated room with %d hosts", len(joinedHosts))
|
||||||
if _, err = r.db.UpdateRoom(context.Background(), roomID, joinedHosts, nil, true); err != nil {
|
if _, err = r.db.UpdateRoom(context.Background(), roomID, joinedHosts, nil, true); err != nil {
|
||||||
return fmt.Errorf("UpdatedRoom: failed to update room with joined hosts: %s", err)
|
return fmt.Errorf("UpdatedRoom: failed to update room with joined hosts: %s", err)
|
||||||
}
|
}
|
||||||
|
|
@ -267,6 +270,14 @@ func (r *FederationInternalAPI) performJoinUsingServer(
|
||||||
// If we successfully performed a send_join above then the other
|
// If we successfully performed a send_join above then the other
|
||||||
// server now thinks we're a part of the room. Send the newly
|
// server now thinks we're a part of the room. Send the newly
|
||||||
// returned state to the roomserver to update our local view.
|
// returned state to the roomserver to update our local view.
|
||||||
|
if unsigned != nil {
|
||||||
|
event, err = event.SetUnsigned(unsigned)
|
||||||
|
if err != nil {
|
||||||
|
// non-fatal, log and continue
|
||||||
|
logrus.WithError(err).Errorf("Failed to set unsigned content")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if err = roomserverAPI.SendEventWithState(
|
if err = roomserverAPI.SendEventWithState(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
r.rsAPI,
|
r.rsAPI,
|
||||||
|
|
@ -285,7 +296,7 @@ func (r *FederationInternalAPI) performJoinUsingServer(
|
||||||
|
|
||||||
// isWellFormedMembershipEvent returns true if the event looks like a legitimate
|
// isWellFormedMembershipEvent returns true if the event looks like a legitimate
|
||||||
// membership event.
|
// membership event.
|
||||||
func isWellFormedMembershipEvent(event *gomatrixserverlib.Event, roomID, userID string, origin gomatrixserverlib.ServerName) bool {
|
func isWellFormedMembershipEvent(event *gomatrixserverlib.Event, roomID, userID string) bool {
|
||||||
if membership, err := event.Membership(); err != nil {
|
if membership, err := event.Membership(); err != nil {
|
||||||
return false
|
return false
|
||||||
} else if membership != gomatrixserverlib.Join {
|
} else if membership != gomatrixserverlib.Join {
|
||||||
|
|
@ -294,9 +305,6 @@ func isWellFormedMembershipEvent(event *gomatrixserverlib.Event, roomID, userID
|
||||||
if event.RoomID() != roomID {
|
if event.RoomID() != roomID {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if event.Origin() != origin {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !event.StateKeyEquals(userID) {
|
if !event.StateKeyEquals(userID) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -4,12 +4,15 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
"github.com/matrix-org/dendrite/federationapi/api"
|
"github.com/matrix-org/gomatrix"
|
||||||
"github.com/matrix-org/dendrite/internal/httputil"
|
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
"github.com/matrix-org/util"
|
"github.com/matrix-org/util"
|
||||||
|
|
||||||
|
"github.com/matrix-org/dendrite/federationapi/api"
|
||||||
|
"github.com/matrix-org/dendrite/internal/httputil"
|
||||||
)
|
)
|
||||||
|
|
||||||
// AddRoutes adds the FederationInternalAPI handlers to the http.ServeMux.
|
// AddRoutes adds the FederationInternalAPI handlers to the http.ServeMux.
|
||||||
|
|
@ -229,9 +232,21 @@ func federationClientError(err error) error {
|
||||||
return &ferr
|
return &ferr
|
||||||
case *api.FederationClientError:
|
case *api.FederationClientError:
|
||||||
return ferr
|
return ferr
|
||||||
default:
|
case gomatrix.HTTPError:
|
||||||
return &api.FederationClientError{
|
return &api.FederationClientError{
|
||||||
Err: err.Error(),
|
Code: ferr.Code,
|
||||||
|
}
|
||||||
|
case *url.Error: // e.g. certificate error, unable to connect
|
||||||
|
return &api.FederationClientError{
|
||||||
|
Err: ferr.Error(),
|
||||||
|
Code: 400,
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// We don't know what exactly failed, but we probably don't
|
||||||
|
// want to retry the request immediately in the device list updater
|
||||||
|
return &api.FederationClientError{
|
||||||
|
Err: err.Error(),
|
||||||
|
Code: 400,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -21,12 +21,13 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
|
||||||
"github.com/matrix-org/dendrite/syncapi/types"
|
|
||||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
"github.com/nats-io/nats.go"
|
"github.com/nats-io/nats.go"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||||
|
"github.com/matrix-org/dendrite/syncapi/types"
|
||||||
|
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SyncAPIProducer produces events for the sync API server to consume
|
// SyncAPIProducer produces events for the sync API server to consume
|
||||||
|
|
@ -36,6 +37,7 @@ type SyncAPIProducer struct {
|
||||||
TopicTypingEvent string
|
TopicTypingEvent string
|
||||||
TopicPresenceEvent string
|
TopicPresenceEvent string
|
||||||
TopicDeviceListUpdate string
|
TopicDeviceListUpdate string
|
||||||
|
TopicSigningKeyUpdate string
|
||||||
JetStream nats.JetStreamContext
|
JetStream nats.JetStreamContext
|
||||||
ServerName gomatrixserverlib.ServerName
|
ServerName gomatrixserverlib.ServerName
|
||||||
UserAPI userapi.UserInternalAPI
|
UserAPI userapi.UserInternalAPI
|
||||||
|
|
@ -62,7 +64,7 @@ func (p *SyncAPIProducer) SendReceipt(
|
||||||
|
|
||||||
func (p *SyncAPIProducer) SendToDevice(
|
func (p *SyncAPIProducer) SendToDevice(
|
||||||
ctx context.Context, sender, userID, deviceID, eventType string,
|
ctx context.Context, sender, userID, deviceID, eventType string,
|
||||||
message interface{},
|
message json.RawMessage,
|
||||||
) error {
|
) error {
|
||||||
devices := []string{}
|
devices := []string{}
|
||||||
_, domain, err := gomatrixserverlib.SplitID('@', userID)
|
_, domain, err := gomatrixserverlib.SplitID('@', userID)
|
||||||
|
|
@ -90,24 +92,19 @@ func (p *SyncAPIProducer) SendToDevice(
|
||||||
devices = append(devices, deviceID)
|
devices = append(devices, deviceID)
|
||||||
}
|
}
|
||||||
|
|
||||||
js, err := json.Marshal(message)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.WithFields(log.Fields{
|
log.WithFields(log.Fields{
|
||||||
"user_id": userID,
|
"user_id": userID,
|
||||||
"num_devices": len(devices),
|
"num_devices": len(devices),
|
||||||
"type": eventType,
|
"type": eventType,
|
||||||
}).Tracef("Producing to topic '%s'", p.TopicSendToDeviceEvent)
|
}).Tracef("Producing to topic '%s'", p.TopicSendToDeviceEvent)
|
||||||
for _, device := range devices {
|
for i, device := range devices {
|
||||||
ote := &types.OutputSendToDeviceEvent{
|
ote := &types.OutputSendToDeviceEvent{
|
||||||
UserID: userID,
|
UserID: userID,
|
||||||
DeviceID: device,
|
DeviceID: device,
|
||||||
SendToDeviceEvent: gomatrixserverlib.SendToDeviceEvent{
|
SendToDeviceEvent: gomatrixserverlib.SendToDeviceEvent{
|
||||||
Sender: sender,
|
Sender: sender,
|
||||||
Type: eventType,
|
Type: eventType,
|
||||||
Content: js,
|
Content: message,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -116,16 +113,17 @@ func (p *SyncAPIProducer) SendToDevice(
|
||||||
log.WithError(err).Error("sendToDevice failed json.Marshal")
|
log.WithError(err).Error("sendToDevice failed json.Marshal")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
m := &nats.Msg{
|
m := nats.NewMsg(p.TopicSendToDeviceEvent)
|
||||||
Subject: p.TopicSendToDeviceEvent,
|
m.Data = eventJSON
|
||||||
Data: eventJSON,
|
|
||||||
Header: nats.Header{},
|
|
||||||
}
|
|
||||||
m.Header.Set("sender", sender)
|
m.Header.Set("sender", sender)
|
||||||
m.Header.Set(jetstream.UserID, userID)
|
m.Header.Set(jetstream.UserID, userID)
|
||||||
|
|
||||||
if _, err = p.JetStream.PublishMsg(m, nats.Context(ctx)); err != nil {
|
if _, err = p.JetStream.PublishMsg(m, nats.Context(ctx)); err != nil {
|
||||||
log.WithError(err).Error("sendToDevice failed t.Producer.SendMessage")
|
if i < len(devices)-1 {
|
||||||
|
log.WithError(err).Warn("sendToDevice failed to PublishMsg, trying further devices")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
log.WithError(err).Error("sendToDevice failed to PublishMsg for all devices")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -165,16 +163,24 @@ func (p *SyncAPIProducer) SendPresence(
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *SyncAPIProducer) SendDeviceListUpdate(
|
func (p *SyncAPIProducer) SendDeviceListUpdate(
|
||||||
ctx context.Context, deviceListUpdate *gomatrixserverlib.DeviceListUpdateEvent,
|
ctx context.Context, deviceListUpdate gomatrixserverlib.RawJSON, origin gomatrixserverlib.ServerName,
|
||||||
) (err error) {
|
) (err error) {
|
||||||
m := nats.NewMsg(p.TopicDeviceListUpdate)
|
m := nats.NewMsg(p.TopicDeviceListUpdate)
|
||||||
m.Header.Set(jetstream.UserID, deviceListUpdate.UserID)
|
m.Header.Set("origin", string(origin))
|
||||||
m.Data, err = json.Marshal(deviceListUpdate)
|
m.Data = deviceListUpdate
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("json.Marshal: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("Sending device list update: %+v", m.Header)
|
log.Debugf("Sending device list update: %+v", m.Header)
|
||||||
_, err = p.JetStream.PublishMsg(m, nats.Context(ctx))
|
_, err = p.JetStream.PublishMsg(m, nats.Context(ctx))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *SyncAPIProducer) SendSigningKeyUpdate(
|
||||||
|
ctx context.Context, data gomatrixserverlib.RawJSON, origin gomatrixserverlib.ServerName,
|
||||||
|
) (err error) {
|
||||||
|
m := nats.NewMsg(p.TopicSigningKeyUpdate)
|
||||||
|
m.Header.Set("origin", string(origin))
|
||||||
|
m.Data = data
|
||||||
|
|
||||||
|
log.Debugf("Sending signing key update")
|
||||||
|
_, err = p.JetStream.PublishMsg(m, nats.Context(ctx))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -21,21 +21,22 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/matrix-org/gomatrix"
|
||||||
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"go.uber.org/atomic"
|
||||||
|
|
||||||
fedapi "github.com/matrix-org/dendrite/federationapi/api"
|
fedapi "github.com/matrix-org/dendrite/federationapi/api"
|
||||||
"github.com/matrix-org/dendrite/federationapi/statistics"
|
"github.com/matrix-org/dendrite/federationapi/statistics"
|
||||||
"github.com/matrix-org/dendrite/federationapi/storage"
|
"github.com/matrix-org/dendrite/federationapi/storage"
|
||||||
"github.com/matrix-org/dendrite/federationapi/storage/shared"
|
"github.com/matrix-org/dendrite/federationapi/storage/shared"
|
||||||
"github.com/matrix-org/dendrite/roomserver/api"
|
"github.com/matrix-org/dendrite/roomserver/api"
|
||||||
"github.com/matrix-org/dendrite/setup/process"
|
"github.com/matrix-org/dendrite/setup/process"
|
||||||
"github.com/matrix-org/gomatrix"
|
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"go.uber.org/atomic"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
maxPDUsPerTransaction = 50
|
maxPDUsPerTransaction = 50
|
||||||
maxEDUsPerTransaction = 50
|
maxEDUsPerTransaction = 100
|
||||||
maxPDUsInMemory = 128
|
maxPDUsInMemory = 128
|
||||||
maxEDUsInMemory = 128
|
maxEDUsInMemory = 128
|
||||||
queueIdleTimeout = time.Second * 30
|
queueIdleTimeout = time.Second * 30
|
||||||
|
|
@ -64,7 +65,6 @@ type destinationQueue struct {
|
||||||
pendingPDUs []*queuedPDU // PDUs waiting to be sent
|
pendingPDUs []*queuedPDU // PDUs waiting to be sent
|
||||||
pendingEDUs []*queuedEDU // EDUs waiting to be sent
|
pendingEDUs []*queuedEDU // EDUs waiting to be sent
|
||||||
pendingMutex sync.RWMutex // protects pendingPDUs and pendingEDUs
|
pendingMutex sync.RWMutex // protects pendingPDUs and pendingEDUs
|
||||||
interruptBackoff chan bool // interrupts backoff
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send event adds the event to the pending queue for the destination.
|
// Send event adds the event to the pending queue for the destination.
|
||||||
|
|
@ -75,39 +75,22 @@ func (oq *destinationQueue) sendEvent(event *gomatrixserverlib.HeaderedEvent, re
|
||||||
logrus.Errorf("attempt to send nil PDU with destination %q", oq.destination)
|
logrus.Errorf("attempt to send nil PDU with destination %q", oq.destination)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Create a database entry that associates the given PDU NID with
|
|
||||||
// this destination queue. We'll then be able to retrieve the PDU
|
// If there's room in memory to hold the event then add it to the
|
||||||
// later.
|
// list.
|
||||||
if err := oq.db.AssociatePDUWithDestination(
|
oq.pendingMutex.Lock()
|
||||||
oq.process.Context(),
|
if len(oq.pendingPDUs) < maxPDUsInMemory {
|
||||||
"", // TODO: remove this, as we don't need to persist the transaction ID
|
oq.pendingPDUs = append(oq.pendingPDUs, &queuedPDU{
|
||||||
oq.destination, // the destination server name
|
pdu: event,
|
||||||
receipt, // NIDs from federationapi_queue_json table
|
receipt: receipt,
|
||||||
); err != nil {
|
})
|
||||||
logrus.WithError(err).Errorf("failed to associate PDU %q with destination %q", event.EventID(), oq.destination)
|
} else {
|
||||||
return
|
oq.overflowed.Store(true)
|
||||||
}
|
}
|
||||||
// Check if the destination is blacklisted. If it isn't then wake
|
oq.pendingMutex.Unlock()
|
||||||
// up the queue.
|
|
||||||
if !oq.statistics.Blacklisted() {
|
if !oq.backingOff.Load() {
|
||||||
// If there's room in memory to hold the event then add it to the
|
oq.wakeQueueAndNotify()
|
||||||
// list.
|
|
||||||
oq.pendingMutex.Lock()
|
|
||||||
if len(oq.pendingPDUs) < maxPDUsInMemory {
|
|
||||||
oq.pendingPDUs = append(oq.pendingPDUs, &queuedPDU{
|
|
||||||
pdu: event,
|
|
||||||
receipt: receipt,
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
oq.overflowed.Store(true)
|
|
||||||
}
|
|
||||||
oq.pendingMutex.Unlock()
|
|
||||||
// Wake up the queue if it's asleep.
|
|
||||||
oq.wakeQueueIfNeeded()
|
|
||||||
select {
|
|
||||||
case oq.notify <- struct{}{}:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -119,40 +102,47 @@ func (oq *destinationQueue) sendEDU(event *gomatrixserverlib.EDU, receipt *share
|
||||||
logrus.Errorf("attempt to send nil EDU with destination %q", oq.destination)
|
logrus.Errorf("attempt to send nil EDU with destination %q", oq.destination)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Create a database entry that associates the given PDU NID with
|
|
||||||
// this destination queue. We'll then be able to retrieve the PDU
|
// If there's room in memory to hold the event then add it to the
|
||||||
// later.
|
// list.
|
||||||
if err := oq.db.AssociateEDUWithDestination(
|
oq.pendingMutex.Lock()
|
||||||
oq.process.Context(),
|
if len(oq.pendingEDUs) < maxEDUsInMemory {
|
||||||
oq.destination, // the destination server name
|
oq.pendingEDUs = append(oq.pendingEDUs, &queuedEDU{
|
||||||
receipt, // NIDs from federationapi_queue_json table
|
edu: event,
|
||||||
event.Type,
|
receipt: receipt,
|
||||||
nil, // this will use the default expireEDUTypes map
|
})
|
||||||
); err != nil {
|
} else {
|
||||||
logrus.WithError(err).Errorf("failed to associate EDU with destination %q", oq.destination)
|
oq.overflowed.Store(true)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
// Check if the destination is blacklisted. If it isn't then wake
|
oq.pendingMutex.Unlock()
|
||||||
// up the queue.
|
|
||||||
if !oq.statistics.Blacklisted() {
|
if !oq.backingOff.Load() {
|
||||||
// If there's room in memory to hold the event then add it to the
|
oq.wakeQueueAndNotify()
|
||||||
// list.
|
}
|
||||||
oq.pendingMutex.Lock()
|
}
|
||||||
if len(oq.pendingEDUs) < maxEDUsInMemory {
|
|
||||||
oq.pendingEDUs = append(oq.pendingEDUs, &queuedEDU{
|
// handleBackoffNotifier is registered as the backoff notification
|
||||||
edu: event,
|
// callback with Statistics. It will wakeup and notify the queue
|
||||||
receipt: receipt,
|
// if the queue is currently backing off.
|
||||||
})
|
func (oq *destinationQueue) handleBackoffNotifier() {
|
||||||
} else {
|
// Only wake up the queue if it is backing off.
|
||||||
oq.overflowed.Store(true)
|
// Otherwise there is no pending work for the queue to handle
|
||||||
}
|
// so waking the queue would be a waste of resources.
|
||||||
oq.pendingMutex.Unlock()
|
if oq.backingOff.Load() {
|
||||||
// Wake up the queue if it's asleep.
|
oq.wakeQueueAndNotify()
|
||||||
oq.wakeQueueIfNeeded()
|
}
|
||||||
select {
|
}
|
||||||
case oq.notify <- struct{}{}:
|
|
||||||
default:
|
// wakeQueueAndNotify ensures the destination queue is running and notifies it
|
||||||
}
|
// that there is pending work.
|
||||||
|
func (oq *destinationQueue) wakeQueueAndNotify() {
|
||||||
|
// Wake up the queue if it's asleep.
|
||||||
|
oq.wakeQueueIfNeeded()
|
||||||
|
|
||||||
|
// Notify the queue that there are events ready to send.
|
||||||
|
select {
|
||||||
|
case oq.notify <- struct{}{}:
|
||||||
|
default:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -161,10 +151,11 @@ func (oq *destinationQueue) sendEDU(event *gomatrixserverlib.EDU, receipt *share
|
||||||
// then we will interrupt the backoff, causing any federation
|
// then we will interrupt the backoff, causing any federation
|
||||||
// requests to retry.
|
// requests to retry.
|
||||||
func (oq *destinationQueue) wakeQueueIfNeeded() {
|
func (oq *destinationQueue) wakeQueueIfNeeded() {
|
||||||
// If we are backing off then interrupt the backoff.
|
// Clear the backingOff flag and update the backoff metrics if it was set.
|
||||||
if oq.backingOff.CAS(true, false) {
|
if oq.backingOff.CompareAndSwap(true, false) {
|
||||||
oq.interruptBackoff <- true
|
destinationQueueBackingOff.Dec()
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we aren't running then wake up the queue.
|
// If we aren't running then wake up the queue.
|
||||||
if !oq.running.Load() {
|
if !oq.running.Load() {
|
||||||
// Start the queue.
|
// Start the queue.
|
||||||
|
|
@ -196,38 +187,54 @@ func (oq *destinationQueue) getPendingFromDatabase() {
|
||||||
gotEDUs[edu.receipt.String()] = struct{}{}
|
gotEDUs[edu.receipt.String()] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
overflowed := false
|
||||||
if pduCapacity := maxPDUsInMemory - len(oq.pendingPDUs); pduCapacity > 0 {
|
if pduCapacity := maxPDUsInMemory - len(oq.pendingPDUs); pduCapacity > 0 {
|
||||||
// We have room in memory for some PDUs - let's request no more than that.
|
// We have room in memory for some PDUs - let's request no more than that.
|
||||||
if pdus, err := oq.db.GetPendingPDUs(ctx, oq.destination, pduCapacity); err == nil {
|
if pdus, err := oq.db.GetPendingPDUs(ctx, oq.destination, maxPDUsInMemory); err == nil {
|
||||||
|
if len(pdus) == maxPDUsInMemory {
|
||||||
|
overflowed = true
|
||||||
|
}
|
||||||
for receipt, pdu := range pdus {
|
for receipt, pdu := range pdus {
|
||||||
if _, ok := gotPDUs[receipt.String()]; ok {
|
if _, ok := gotPDUs[receipt.String()]; ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
oq.pendingPDUs = append(oq.pendingPDUs, &queuedPDU{receipt, pdu})
|
oq.pendingPDUs = append(oq.pendingPDUs, &queuedPDU{receipt, pdu})
|
||||||
retrieved = true
|
retrieved = true
|
||||||
|
if len(oq.pendingPDUs) == maxPDUsInMemory {
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
logrus.WithError(err).Errorf("Failed to get pending PDUs for %q", oq.destination)
|
logrus.WithError(err).Errorf("Failed to get pending PDUs for %q", oq.destination)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if eduCapacity := maxEDUsInMemory - len(oq.pendingEDUs); eduCapacity > 0 {
|
if eduCapacity := maxEDUsInMemory - len(oq.pendingEDUs); eduCapacity > 0 {
|
||||||
// We have room in memory for some EDUs - let's request no more than that.
|
// We have room in memory for some EDUs - let's request no more than that.
|
||||||
if edus, err := oq.db.GetPendingEDUs(ctx, oq.destination, eduCapacity); err == nil {
|
if edus, err := oq.db.GetPendingEDUs(ctx, oq.destination, maxEDUsInMemory); err == nil {
|
||||||
|
if len(edus) == maxEDUsInMemory {
|
||||||
|
overflowed = true
|
||||||
|
}
|
||||||
for receipt, edu := range edus {
|
for receipt, edu := range edus {
|
||||||
if _, ok := gotEDUs[receipt.String()]; ok {
|
if _, ok := gotEDUs[receipt.String()]; ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
oq.pendingEDUs = append(oq.pendingEDUs, &queuedEDU{receipt, edu})
|
oq.pendingEDUs = append(oq.pendingEDUs, &queuedEDU{receipt, edu})
|
||||||
retrieved = true
|
retrieved = true
|
||||||
|
if len(oq.pendingEDUs) == maxEDUsInMemory {
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
logrus.WithError(err).Errorf("Failed to get pending EDUs for %q", oq.destination)
|
logrus.WithError(err).Errorf("Failed to get pending EDUs for %q", oq.destination)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we've retrieved all of the events from the database with room to spare
|
// If we've retrieved all of the events from the database with room to spare
|
||||||
// in memory then we'll no longer consider this queue to be overflowed.
|
// in memory then we'll no longer consider this queue to be overflowed.
|
||||||
if len(oq.pendingPDUs) < maxPDUsInMemory && len(oq.pendingEDUs) < maxEDUsInMemory {
|
if !overflowed {
|
||||||
oq.overflowed.Store(false)
|
oq.overflowed.Store(false)
|
||||||
|
} else {
|
||||||
}
|
}
|
||||||
// If we've retrieved some events then notify the destination queue goroutine.
|
// If we've retrieved some events then notify the destination queue goroutine.
|
||||||
if retrieved {
|
if retrieved {
|
||||||
|
|
@ -238,17 +245,42 @@ func (oq *destinationQueue) getPendingFromDatabase() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// checkNotificationsOnClose checks for any remaining notifications
|
||||||
|
// and starts a new backgroundSend goroutine if any exist.
|
||||||
|
func (oq *destinationQueue) checkNotificationsOnClose() {
|
||||||
|
// NOTE : If we are stopping the queue due to blacklist then it
|
||||||
|
// doesn't matter if we have been notified of new work since
|
||||||
|
// this queue instance will be deleted anyway.
|
||||||
|
if !oq.statistics.Blacklisted() {
|
||||||
|
select {
|
||||||
|
case <-oq.notify:
|
||||||
|
// We received a new notification in between the
|
||||||
|
// idle timeout firing and stopping the goroutine.
|
||||||
|
// Immediately restart the queue.
|
||||||
|
oq.wakeQueueAndNotify()
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// backgroundSend is the worker goroutine for sending events.
|
// backgroundSend is the worker goroutine for sending events.
|
||||||
func (oq *destinationQueue) backgroundSend() {
|
func (oq *destinationQueue) backgroundSend() {
|
||||||
// Check if a worker is already running, and if it isn't, then
|
// Check if a worker is already running, and if it isn't, then
|
||||||
// mark it as started.
|
// mark it as started.
|
||||||
if !oq.running.CAS(false, true) {
|
if !oq.running.CompareAndSwap(false, true) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Register queue cleanup functions.
|
||||||
|
// NOTE : The ordering here is very intentional.
|
||||||
|
defer oq.checkNotificationsOnClose()
|
||||||
|
defer oq.running.Store(false)
|
||||||
|
|
||||||
destinationQueueRunning.Inc()
|
destinationQueueRunning.Inc()
|
||||||
defer destinationQueueRunning.Dec()
|
defer destinationQueueRunning.Dec()
|
||||||
defer oq.queues.clearQueue(oq)
|
|
||||||
defer oq.running.Store(false)
|
idleTimeout := time.NewTimer(queueIdleTimeout)
|
||||||
|
defer idleTimeout.Stop()
|
||||||
|
|
||||||
// Mark the queue as overflowed, so we will consult the database
|
// Mark the queue as overflowed, so we will consult the database
|
||||||
// to see if there's anything new to send.
|
// to see if there's anything new to send.
|
||||||
|
|
@ -261,59 +293,33 @@ func (oq *destinationQueue) backgroundSend() {
|
||||||
oq.getPendingFromDatabase()
|
oq.getPendingFromDatabase()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Reset the queue idle timeout.
|
||||||
|
if !idleTimeout.Stop() {
|
||||||
|
select {
|
||||||
|
case <-idleTimeout.C:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
idleTimeout.Reset(queueIdleTimeout)
|
||||||
|
|
||||||
// If we have nothing to do then wait either for incoming events, or
|
// If we have nothing to do then wait either for incoming events, or
|
||||||
// until we hit an idle timeout.
|
// until we hit an idle timeout.
|
||||||
select {
|
select {
|
||||||
case <-oq.notify:
|
case <-oq.notify:
|
||||||
// There's work to do, either because getPendingFromDatabase
|
// There's work to do, either because getPendingFromDatabase
|
||||||
// told us there is, or because a new event has come in via
|
// told us there is, a new event has come in via sendEvent/sendEDU,
|
||||||
// sendEvent/sendEDU.
|
// or we are backing off and it is time to retry.
|
||||||
case <-time.After(queueIdleTimeout):
|
case <-idleTimeout.C:
|
||||||
// The worker is idle so stop the goroutine. It'll get
|
// The worker is idle so stop the goroutine. It'll get
|
||||||
// restarted automatically the next time we have an event to
|
// restarted automatically the next time we have an event to
|
||||||
// send.
|
// send.
|
||||||
return
|
return
|
||||||
case <-oq.process.Context().Done():
|
case <-oq.process.Context().Done():
|
||||||
// The parent process is shutting down, so stop.
|
// The parent process is shutting down, so stop.
|
||||||
|
oq.statistics.ClearBackoff()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we are backing off this server then wait for the
|
|
||||||
// backoff duration to complete first, or until explicitly
|
|
||||||
// told to retry.
|
|
||||||
until, blacklisted := oq.statistics.BackoffInfo()
|
|
||||||
if blacklisted {
|
|
||||||
// It's been suggested that we should give up because the backoff
|
|
||||||
// has exceeded a maximum allowable value. Clean up the in-memory
|
|
||||||
// buffers at this point. The PDU clean-up is already on a defer.
|
|
||||||
logrus.Warnf("Blacklisting %q due to exceeding backoff threshold", oq.destination)
|
|
||||||
oq.pendingMutex.Lock()
|
|
||||||
for i := range oq.pendingPDUs {
|
|
||||||
oq.pendingPDUs[i] = nil
|
|
||||||
}
|
|
||||||
for i := range oq.pendingEDUs {
|
|
||||||
oq.pendingEDUs[i] = nil
|
|
||||||
}
|
|
||||||
oq.pendingPDUs = nil
|
|
||||||
oq.pendingEDUs = nil
|
|
||||||
oq.pendingMutex.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if until != nil && until.After(time.Now()) {
|
|
||||||
// We haven't backed off yet, so wait for the suggested amount of
|
|
||||||
// time.
|
|
||||||
duration := time.Until(*until)
|
|
||||||
logrus.Debugf("Backing off %q for %s", oq.destination, duration)
|
|
||||||
oq.backingOff.Store(true)
|
|
||||||
destinationQueueBackingOff.Inc()
|
|
||||||
select {
|
|
||||||
case <-time.After(duration):
|
|
||||||
case <-oq.interruptBackoff:
|
|
||||||
}
|
|
||||||
destinationQueueBackingOff.Dec()
|
|
||||||
oq.backingOff.Store(false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Work out which PDUs/EDUs to include in the next transaction.
|
// Work out which PDUs/EDUs to include in the next transaction.
|
||||||
oq.pendingMutex.RLock()
|
oq.pendingMutex.RLock()
|
||||||
pduCount := len(oq.pendingPDUs)
|
pduCount := len(oq.pendingPDUs)
|
||||||
|
|
@ -328,99 +334,52 @@ func (oq *destinationQueue) backgroundSend() {
|
||||||
toSendEDUs := oq.pendingEDUs[:eduCount]
|
toSendEDUs := oq.pendingEDUs[:eduCount]
|
||||||
oq.pendingMutex.RUnlock()
|
oq.pendingMutex.RUnlock()
|
||||||
|
|
||||||
|
// If we didn't get anything from the database and there are no
|
||||||
|
// pending EDUs then there's nothing to do - stop here.
|
||||||
|
if pduCount == 0 && eduCount == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
// If we have pending PDUs or EDUs then construct a transaction.
|
// If we have pending PDUs or EDUs then construct a transaction.
|
||||||
// Try sending the next transaction and see what happens.
|
// Try sending the next transaction and see what happens.
|
||||||
transaction, pc, ec, terr := oq.nextTransaction(toSendPDUs, toSendEDUs)
|
terr := oq.nextTransaction(toSendPDUs, toSendEDUs)
|
||||||
if terr != nil {
|
if terr != nil {
|
||||||
// We failed to send the transaction. Mark it as a failure.
|
// We failed to send the transaction. Mark it as a failure.
|
||||||
oq.statistics.Failure()
|
_, blacklisted := oq.statistics.Failure()
|
||||||
|
if !blacklisted {
|
||||||
} else if transaction {
|
// Register the backoff state and exit the goroutine.
|
||||||
// If we successfully sent the transaction then clear out
|
// It'll get restarted automatically when the backoff
|
||||||
// the pending events and EDUs, and wipe our transaction ID.
|
// completes.
|
||||||
oq.statistics.Success()
|
oq.backingOff.Store(true)
|
||||||
oq.pendingMutex.Lock()
|
destinationQueueBackingOff.Inc()
|
||||||
for i := range oq.pendingPDUs[:pc] {
|
return
|
||||||
oq.pendingPDUs[i] = nil
|
} else {
|
||||||
|
// Immediately trigger the blacklist logic.
|
||||||
|
oq.blacklistDestination()
|
||||||
|
return
|
||||||
}
|
}
|
||||||
for i := range oq.pendingEDUs[:ec] {
|
} else {
|
||||||
oq.pendingEDUs[i] = nil
|
oq.handleTransactionSuccess(pduCount, eduCount)
|
||||||
}
|
|
||||||
oq.pendingPDUs = oq.pendingPDUs[pc:]
|
|
||||||
oq.pendingEDUs = oq.pendingEDUs[ec:]
|
|
||||||
oq.pendingMutex.Unlock()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// nextTransaction creates a new transaction from the pending event
|
// nextTransaction creates a new transaction from the pending event
|
||||||
// queue and sends it. Returns true if a transaction was sent or
|
// queue and sends it.
|
||||||
// false otherwise.
|
// Returns an error if the transaction wasn't sent.
|
||||||
func (oq *destinationQueue) nextTransaction(
|
func (oq *destinationQueue) nextTransaction(
|
||||||
pdus []*queuedPDU,
|
pdus []*queuedPDU,
|
||||||
edus []*queuedEDU,
|
edus []*queuedEDU,
|
||||||
) (bool, int, int, error) {
|
) error {
|
||||||
// If there's no projected transaction ID then generate one. If
|
|
||||||
// the transaction succeeds then we'll set it back to "" so that
|
|
||||||
// we generate a new one next time. If it fails, we'll preserve
|
|
||||||
// it so that we retry with the same transaction ID.
|
|
||||||
oq.transactionIDMutex.Lock()
|
|
||||||
if oq.transactionID == "" {
|
|
||||||
now := gomatrixserverlib.AsTimestamp(time.Now())
|
|
||||||
oq.transactionID = gomatrixserverlib.TransactionID(fmt.Sprintf("%d-%d", now, oq.statistics.SuccessCount()))
|
|
||||||
}
|
|
||||||
oq.transactionIDMutex.Unlock()
|
|
||||||
|
|
||||||
// Create the transaction.
|
// Create the transaction.
|
||||||
t := gomatrixserverlib.Transaction{
|
t, pduReceipts, eduReceipts := oq.createTransaction(pdus, edus)
|
||||||
PDUs: []json.RawMessage{},
|
|
||||||
EDUs: []gomatrixserverlib.EDU{},
|
|
||||||
}
|
|
||||||
t.Origin = oq.origin
|
|
||||||
t.Destination = oq.destination
|
|
||||||
t.OriginServerTS = gomatrixserverlib.AsTimestamp(time.Now())
|
|
||||||
t.TransactionID = oq.transactionID
|
|
||||||
|
|
||||||
// If we didn't get anything from the database and there are no
|
|
||||||
// pending EDUs then there's nothing to do - stop here.
|
|
||||||
if len(pdus) == 0 && len(edus) == 0 {
|
|
||||||
return false, 0, 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var pduReceipts []*shared.Receipt
|
|
||||||
var eduReceipts []*shared.Receipt
|
|
||||||
|
|
||||||
// Go through PDUs that we retrieved from the database, if any,
|
|
||||||
// and add them into the transaction.
|
|
||||||
for _, pdu := range pdus {
|
|
||||||
if pdu == nil || pdu.pdu == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Append the JSON of the event, since this is a json.RawMessage type in the
|
|
||||||
// gomatrixserverlib.Transaction struct
|
|
||||||
t.PDUs = append(t.PDUs, pdu.pdu.JSON())
|
|
||||||
pduReceipts = append(pduReceipts, pdu.receipt)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do the same for pending EDUS in the queue.
|
|
||||||
for _, edu := range edus {
|
|
||||||
if edu == nil || edu.edu == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
t.EDUs = append(t.EDUs, *edu.edu)
|
|
||||||
eduReceipts = append(eduReceipts, edu.receipt)
|
|
||||||
}
|
|
||||||
|
|
||||||
logrus.WithField("server_name", oq.destination).Debugf("Sending transaction %q containing %d PDUs, %d EDUs", t.TransactionID, len(t.PDUs), len(t.EDUs))
|
logrus.WithField("server_name", oq.destination).Debugf("Sending transaction %q containing %d PDUs, %d EDUs", t.TransactionID, len(t.PDUs), len(t.EDUs))
|
||||||
|
|
||||||
// Try to send the transaction to the destination server.
|
// Try to send the transaction to the destination server.
|
||||||
// TODO: we should check for 500-ish fails vs 400-ish here,
|
|
||||||
// since we shouldn't queue things indefinitely in response
|
|
||||||
// to a 400-ish error
|
|
||||||
ctx, cancel := context.WithTimeout(oq.process.Context(), time.Minute*5)
|
ctx, cancel := context.WithTimeout(oq.process.Context(), time.Minute*5)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
_, err := oq.client.SendTransaction(ctx, t)
|
_, err := oq.client.SendTransaction(ctx, t)
|
||||||
switch err.(type) {
|
switch errResponse := err.(type) {
|
||||||
case nil:
|
case nil:
|
||||||
// Clean up the transaction in the database.
|
// Clean up the transaction in the database.
|
||||||
if pduReceipts != nil {
|
if pduReceipts != nil {
|
||||||
|
|
@ -439,16 +398,129 @@ func (oq *destinationQueue) nextTransaction(
|
||||||
oq.transactionIDMutex.Lock()
|
oq.transactionIDMutex.Lock()
|
||||||
oq.transactionID = ""
|
oq.transactionID = ""
|
||||||
oq.transactionIDMutex.Unlock()
|
oq.transactionIDMutex.Unlock()
|
||||||
return true, len(t.PDUs), len(t.EDUs), nil
|
return nil
|
||||||
case gomatrix.HTTPError:
|
case gomatrix.HTTPError:
|
||||||
// Report that we failed to send the transaction and we
|
// Report that we failed to send the transaction and we
|
||||||
// will retry again, subject to backoff.
|
// will retry again, subject to backoff.
|
||||||
return false, 0, 0, err
|
|
||||||
|
// TODO: we should check for 500-ish fails vs 400-ish here,
|
||||||
|
// since we shouldn't queue things indefinitely in response
|
||||||
|
// to a 400-ish error
|
||||||
|
code := errResponse.Code
|
||||||
|
logrus.Debug("Transaction failed with HTTP", code)
|
||||||
|
return err
|
||||||
default:
|
default:
|
||||||
logrus.WithFields(logrus.Fields{
|
logrus.WithFields(logrus.Fields{
|
||||||
"destination": oq.destination,
|
"destination": oq.destination,
|
||||||
logrus.ErrorKey: err,
|
logrus.ErrorKey: err,
|
||||||
}).Debugf("Failed to send transaction %q", t.TransactionID)
|
}).Debugf("Failed to send transaction %q", t.TransactionID)
|
||||||
return false, 0, 0, err
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// createTransaction generates a gomatrixserverlib.Transaction from the provided pdus and edus.
|
||||||
|
// It also returns the associated event receipts so they can be cleaned from the database in
|
||||||
|
// the case of a successful transaction.
|
||||||
|
func (oq *destinationQueue) createTransaction(
|
||||||
|
pdus []*queuedPDU,
|
||||||
|
edus []*queuedEDU,
|
||||||
|
) (gomatrixserverlib.Transaction, []*shared.Receipt, []*shared.Receipt) {
|
||||||
|
// If there's no projected transaction ID then generate one. If
|
||||||
|
// the transaction succeeds then we'll set it back to "" so that
|
||||||
|
// we generate a new one next time. If it fails, we'll preserve
|
||||||
|
// it so that we retry with the same transaction ID.
|
||||||
|
oq.transactionIDMutex.Lock()
|
||||||
|
if oq.transactionID == "" {
|
||||||
|
now := gomatrixserverlib.AsTimestamp(time.Now())
|
||||||
|
oq.transactionID = gomatrixserverlib.TransactionID(fmt.Sprintf("%d-%d", now, oq.statistics.SuccessCount()))
|
||||||
|
}
|
||||||
|
oq.transactionIDMutex.Unlock()
|
||||||
|
|
||||||
|
t := gomatrixserverlib.Transaction{
|
||||||
|
PDUs: []json.RawMessage{},
|
||||||
|
EDUs: []gomatrixserverlib.EDU{},
|
||||||
|
}
|
||||||
|
t.Origin = oq.origin
|
||||||
|
t.Destination = oq.destination
|
||||||
|
t.OriginServerTS = gomatrixserverlib.AsTimestamp(time.Now())
|
||||||
|
t.TransactionID = oq.transactionID
|
||||||
|
|
||||||
|
var pduReceipts []*shared.Receipt
|
||||||
|
var eduReceipts []*shared.Receipt
|
||||||
|
|
||||||
|
// Go through PDUs that we retrieved from the database, if any,
|
||||||
|
// and add them into the transaction.
|
||||||
|
for _, pdu := range pdus {
|
||||||
|
// These should never be nil.
|
||||||
|
if pdu == nil || pdu.pdu == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Append the JSON of the event, since this is a json.RawMessage type in the
|
||||||
|
// gomatrixserverlib.Transaction struct
|
||||||
|
t.PDUs = append(t.PDUs, pdu.pdu.JSON())
|
||||||
|
pduReceipts = append(pduReceipts, pdu.receipt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do the same for pending EDUS in the queue.
|
||||||
|
for _, edu := range edus {
|
||||||
|
// These should never be nil.
|
||||||
|
if edu == nil || edu.edu == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
t.EDUs = append(t.EDUs, *edu.edu)
|
||||||
|
eduReceipts = append(eduReceipts, edu.receipt)
|
||||||
|
}
|
||||||
|
|
||||||
|
return t, pduReceipts, eduReceipts
|
||||||
|
}
|
||||||
|
|
||||||
|
// blacklistDestination removes all pending PDUs and EDUs that have been cached
|
||||||
|
// and deletes this queue.
|
||||||
|
func (oq *destinationQueue) blacklistDestination() {
|
||||||
|
// It's been suggested that we should give up because the backoff
|
||||||
|
// has exceeded a maximum allowable value. Clean up the in-memory
|
||||||
|
// buffers at this point. The PDU clean-up is already on a defer.
|
||||||
|
logrus.Warnf("Blacklisting %q due to exceeding backoff threshold", oq.destination)
|
||||||
|
|
||||||
|
oq.pendingMutex.Lock()
|
||||||
|
for i := range oq.pendingPDUs {
|
||||||
|
oq.pendingPDUs[i] = nil
|
||||||
|
}
|
||||||
|
for i := range oq.pendingEDUs {
|
||||||
|
oq.pendingEDUs[i] = nil
|
||||||
|
}
|
||||||
|
oq.pendingPDUs = nil
|
||||||
|
oq.pendingEDUs = nil
|
||||||
|
oq.pendingMutex.Unlock()
|
||||||
|
|
||||||
|
// Delete this queue as no more messages will be sent to this
|
||||||
|
// destination until it is no longer blacklisted.
|
||||||
|
oq.statistics.AssignBackoffNotifier(nil)
|
||||||
|
oq.queues.clearQueue(oq)
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleTransactionSuccess updates the cached event queues as well as the success and
|
||||||
|
// backoff information for this server.
|
||||||
|
func (oq *destinationQueue) handleTransactionSuccess(pduCount int, eduCount int) {
|
||||||
|
// If we successfully sent the transaction then clear out
|
||||||
|
// the pending events and EDUs, and wipe our transaction ID.
|
||||||
|
oq.statistics.Success()
|
||||||
|
oq.pendingMutex.Lock()
|
||||||
|
defer oq.pendingMutex.Unlock()
|
||||||
|
|
||||||
|
for i := range oq.pendingPDUs[:pduCount] {
|
||||||
|
oq.pendingPDUs[i] = nil
|
||||||
|
}
|
||||||
|
for i := range oq.pendingEDUs[:eduCount] {
|
||||||
|
oq.pendingEDUs[i] = nil
|
||||||
|
}
|
||||||
|
oq.pendingPDUs = oq.pendingPDUs[pduCount:]
|
||||||
|
oq.pendingEDUs = oq.pendingEDUs[eduCount:]
|
||||||
|
|
||||||
|
if len(oq.pendingPDUs) > 0 || len(oq.pendingEDUs) > 0 {
|
||||||
|
select {
|
||||||
|
case oq.notify <- struct{}{}:
|
||||||
|
default:
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -21,8 +21,10 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/getsentry/sentry-go"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/tidwall/gjson"
|
"github.com/tidwall/gjson"
|
||||||
|
|
||||||
|
|
@ -161,23 +163,25 @@ func (oqs *OutgoingQueues) getQueue(destination gomatrixserverlib.ServerName) *d
|
||||||
if !ok || oq == nil {
|
if !ok || oq == nil {
|
||||||
destinationQueueTotal.Inc()
|
destinationQueueTotal.Inc()
|
||||||
oq = &destinationQueue{
|
oq = &destinationQueue{
|
||||||
queues: oqs,
|
queues: oqs,
|
||||||
db: oqs.db,
|
db: oqs.db,
|
||||||
process: oqs.process,
|
process: oqs.process,
|
||||||
rsAPI: oqs.rsAPI,
|
rsAPI: oqs.rsAPI,
|
||||||
origin: oqs.origin,
|
origin: oqs.origin,
|
||||||
destination: destination,
|
destination: destination,
|
||||||
client: oqs.client,
|
client: oqs.client,
|
||||||
statistics: oqs.statistics.ForServer(destination),
|
statistics: oqs.statistics.ForServer(destination),
|
||||||
notify: make(chan struct{}, 1),
|
notify: make(chan struct{}, 1),
|
||||||
interruptBackoff: make(chan bool),
|
signing: oqs.signing,
|
||||||
signing: oqs.signing,
|
|
||||||
}
|
}
|
||||||
|
oq.statistics.AssignBackoffNotifier(oq.handleBackoffNotifier)
|
||||||
oqs.queues[destination] = oq
|
oqs.queues[destination] = oq
|
||||||
}
|
}
|
||||||
return oq
|
return oq
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// clearQueue removes the queue for the provided destination from the
|
||||||
|
// set of destination queues.
|
||||||
func (oqs *OutgoingQueues) clearQueue(oq *destinationQueue) {
|
func (oqs *OutgoingQueues) clearQueue(oq *destinationQueue) {
|
||||||
oqs.queuesMutex.Lock()
|
oqs.queuesMutex.Lock()
|
||||||
defer oqs.queuesMutex.Unlock()
|
defer oqs.queuesMutex.Unlock()
|
||||||
|
|
@ -244,11 +248,25 @@ func (oqs *OutgoingQueues) SendEvent(
|
||||||
}
|
}
|
||||||
|
|
||||||
for destination := range destmap {
|
for destination := range destmap {
|
||||||
if queue := oqs.getQueue(destination); queue != nil {
|
if queue := oqs.getQueue(destination); queue != nil && !queue.statistics.Blacklisted() {
|
||||||
queue.sendEvent(ev, nid)
|
queue.sendEvent(ev, nid)
|
||||||
|
} else {
|
||||||
|
delete(destmap, destination)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Create a database entry that associates the given PDU NID with
|
||||||
|
// this destinations queue. We'll then be able to retrieve the PDU
|
||||||
|
// later.
|
||||||
|
if err := oqs.db.AssociatePDUWithDestinations(
|
||||||
|
oqs.process.Context(),
|
||||||
|
destmap,
|
||||||
|
nid, // NIDs from federationapi_queue_json table
|
||||||
|
); err != nil {
|
||||||
|
logrus.WithError(err).Errorf("failed to associate PDUs %q with destinations", nid)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -307,20 +325,38 @@ func (oqs *OutgoingQueues) SendEDU(
|
||||||
|
|
||||||
ephemeralJSON, err := json.Marshal(e)
|
ephemeralJSON, err := json.Marshal(e)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
sentry.CaptureException(err)
|
||||||
return fmt.Errorf("json.Marshal: %w", err)
|
return fmt.Errorf("json.Marshal: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
nid, err := oqs.db.StoreJSON(oqs.process.Context(), string(ephemeralJSON))
|
nid, err := oqs.db.StoreJSON(oqs.process.Context(), string(ephemeralJSON))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
sentry.CaptureException(err)
|
||||||
return fmt.Errorf("sendevent: oqs.db.StoreJSON: %w", err)
|
return fmt.Errorf("sendevent: oqs.db.StoreJSON: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for destination := range destmap {
|
for destination := range destmap {
|
||||||
if queue := oqs.getQueue(destination); queue != nil {
|
if queue := oqs.getQueue(destination); queue != nil && !queue.statistics.Blacklisted() {
|
||||||
queue.sendEDU(e, nid)
|
queue.sendEDU(e, nid)
|
||||||
|
} else {
|
||||||
|
delete(destmap, destination)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Create a database entry that associates the given PDU NID with
|
||||||
|
// this destination queue. We'll then be able to retrieve the PDU
|
||||||
|
// later.
|
||||||
|
if err := oqs.db.AssociateEDUWithDestinations(
|
||||||
|
oqs.process.Context(),
|
||||||
|
destmap, // the destination server name
|
||||||
|
nid, // NIDs from federationapi_queue_json table
|
||||||
|
e.Type,
|
||||||
|
nil, // this will use the default expireEDUTypes map
|
||||||
|
); err != nil {
|
||||||
|
logrus.WithError(err).Errorf("failed to associate EDU with destinations")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -329,7 +365,9 @@ func (oqs *OutgoingQueues) RetryServer(srv gomatrixserverlib.ServerName) {
|
||||||
if oqs.disabled {
|
if oqs.disabled {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
oqs.statistics.ForServer(srv).RemoveBlacklist()
|
||||||
if queue := oqs.getQueue(srv); queue != nil {
|
if queue := oqs.getQueue(srv); queue != nil {
|
||||||
|
queue.statistics.ClearBackoff()
|
||||||
queue.wakeQueueIfNeeded()
|
queue.wakeQueueIfNeeded()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
1060
federationapi/queue/queue_test.go
Normal file
1060
federationapi/queue/queue_test.go
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -20,6 +20,7 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||||
"github.com/matrix-org/dendrite/roomserver/api"
|
"github.com/matrix-org/dendrite/roomserver/api"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
"github.com/matrix-org/util"
|
"github.com/matrix-org/util"
|
||||||
|
|
@ -95,7 +96,10 @@ func fetchEvent(ctx context.Context, rsAPI api.FederationRoomserverAPI, eventID
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(eventsResponse.Events) == 0 {
|
if len(eventsResponse.Events) == 0 {
|
||||||
return nil, &util.JSONResponse{Code: http.StatusNotFound, JSON: nil}
|
return nil, &util.JSONResponse{
|
||||||
|
Code: http.StatusNotFound,
|
||||||
|
JSON: jsonerror.NotFound("Event not found"),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return eventsResponse.Events[0].Event, nil
|
return eventsResponse.Events[0].Event, nil
|
||||||
|
|
|
||||||
|
|
@ -148,8 +148,15 @@ func processInvite(
|
||||||
JSON: jsonerror.BadJSON("The event JSON could not be redacted"),
|
JSON: jsonerror.BadJSON("The event JSON could not be redacted"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
_, serverName, err := gomatrixserverlib.SplitID('@', event.Sender())
|
||||||
|
if err != nil {
|
||||||
|
return util.JSONResponse{
|
||||||
|
Code: http.StatusBadRequest,
|
||||||
|
JSON: jsonerror.BadJSON("The event JSON contains an invalid sender"),
|
||||||
|
}
|
||||||
|
}
|
||||||
verifyRequests := []gomatrixserverlib.VerifyJSONRequest{{
|
verifyRequests := []gomatrixserverlib.VerifyJSONRequest{{
|
||||||
ServerName: event.Origin(),
|
ServerName: serverName,
|
||||||
Message: redacted,
|
Message: redacted,
|
||||||
AtTS: event.OriginServerTS(),
|
AtTS: event.OriginServerTS(),
|
||||||
StrictValidityChecking: true,
|
StrictValidityChecking: true,
|
||||||
|
|
|
||||||
|
|
@ -203,14 +203,6 @@ func SendJoin(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check that the event is from the server sending the request.
|
|
||||||
if event.Origin() != request.Origin() {
|
|
||||||
return util.JSONResponse{
|
|
||||||
Code: http.StatusForbidden,
|
|
||||||
JSON: jsonerror.Forbidden("The join must be sent by the server it originated on"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check that a state key is provided.
|
// Check that a state key is provided.
|
||||||
if event.StateKey() == nil || event.StateKeyEquals("") {
|
if event.StateKey() == nil || event.StateKeyEquals("") {
|
||||||
return util.JSONResponse{
|
return util.JSONResponse{
|
||||||
|
|
@ -228,16 +220,16 @@ func SendJoin(
|
||||||
// Check that the sender belongs to the server that is sending us
|
// Check that the sender belongs to the server that is sending us
|
||||||
// the request. By this point we've already asserted that the sender
|
// the request. By this point we've already asserted that the sender
|
||||||
// and the state key are equal so we don't need to check both.
|
// and the state key are equal so we don't need to check both.
|
||||||
var domain gomatrixserverlib.ServerName
|
var serverName gomatrixserverlib.ServerName
|
||||||
if _, domain, err = gomatrixserverlib.SplitID('@', event.Sender()); err != nil {
|
if _, serverName, err = gomatrixserverlib.SplitID('@', event.Sender()); err != nil {
|
||||||
return util.JSONResponse{
|
return util.JSONResponse{
|
||||||
Code: http.StatusForbidden,
|
Code: http.StatusForbidden,
|
||||||
JSON: jsonerror.Forbidden("The sender of the join is invalid"),
|
JSON: jsonerror.Forbidden("The sender of the join is invalid"),
|
||||||
}
|
}
|
||||||
} else if domain != request.Origin() {
|
} else if serverName != request.Origin() {
|
||||||
return util.JSONResponse{
|
return util.JSONResponse{
|
||||||
Code: http.StatusForbidden,
|
Code: http.StatusForbidden,
|
||||||
JSON: jsonerror.Forbidden("The sender of the join must belong to the origin server"),
|
JSON: jsonerror.Forbidden("The sender does not match the server that originated the request"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -292,7 +284,7 @@ func SendJoin(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
verifyRequests := []gomatrixserverlib.VerifyJSONRequest{{
|
verifyRequests := []gomatrixserverlib.VerifyJSONRequest{{
|
||||||
ServerName: event.Origin(),
|
ServerName: serverName,
|
||||||
Message: redacted,
|
Message: redacted,
|
||||||
AtTS: event.OriginServerTS(),
|
AtTS: event.OriginServerTS(),
|
||||||
StrictValidityChecking: true,
|
StrictValidityChecking: true,
|
||||||
|
|
@ -329,6 +321,12 @@ func SendJoin(
|
||||||
JSON: jsonerror.NotFound("Room does not exist"),
|
JSON: jsonerror.NotFound("Room does not exist"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if !stateAndAuthChainResponse.StateKnown {
|
||||||
|
return util.JSONResponse{
|
||||||
|
Code: http.StatusForbidden,
|
||||||
|
JSON: jsonerror.Forbidden("State not known"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Check if the user is already in the room. If they're already in then
|
// Check if the user is already in the room. If they're already in then
|
||||||
// there isn't much point in sending another join event into the room.
|
// there isn't much point in sending another join event into the room.
|
||||||
|
|
|
||||||
|
|
@ -160,7 +160,7 @@ func localKeys(cfg *config.FederationAPI, validUntil time.Time) (*gomatrixserver
|
||||||
for _, oldVerifyKey := range cfg.Matrix.OldVerifyKeys {
|
for _, oldVerifyKey := range cfg.Matrix.OldVerifyKeys {
|
||||||
keys.OldVerifyKeys[oldVerifyKey.KeyID] = gomatrixserverlib.OldVerifyKey{
|
keys.OldVerifyKeys[oldVerifyKey.KeyID] = gomatrixserverlib.OldVerifyKey{
|
||||||
VerifyKey: gomatrixserverlib.VerifyKey{
|
VerifyKey: gomatrixserverlib.VerifyKey{
|
||||||
Key: gomatrixserverlib.Base64Bytes(oldVerifyKey.PrivateKey.Public().(ed25519.PublicKey)),
|
Key: oldVerifyKey.PublicKey,
|
||||||
},
|
},
|
||||||
ExpiredTS: oldVerifyKey.ExpiredAt,
|
ExpiredTS: oldVerifyKey.ExpiredAt,
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -118,6 +118,7 @@ func MakeLeave(
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendLeave implements the /send_leave API
|
// SendLeave implements the /send_leave API
|
||||||
|
// nolint:gocyclo
|
||||||
func SendLeave(
|
func SendLeave(
|
||||||
httpReq *http.Request,
|
httpReq *http.Request,
|
||||||
request *gomatrixserverlib.FederationRequest,
|
request *gomatrixserverlib.FederationRequest,
|
||||||
|
|
@ -167,14 +168,6 @@ func SendLeave(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check that the event is from the server sending the request.
|
|
||||||
if event.Origin() != request.Origin() {
|
|
||||||
return util.JSONResponse{
|
|
||||||
Code: http.StatusForbidden,
|
|
||||||
JSON: jsonerror.Forbidden("The leave must be sent by the server it originated on"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if event.StateKey() == nil || event.StateKeyEquals("") {
|
if event.StateKey() == nil || event.StateKeyEquals("") {
|
||||||
return util.JSONResponse{
|
return util.JSONResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
|
|
@ -188,6 +181,22 @@ func SendLeave(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check that the sender belongs to the server that is sending us
|
||||||
|
// the request. By this point we've already asserted that the sender
|
||||||
|
// and the state key are equal so we don't need to check both.
|
||||||
|
var serverName gomatrixserverlib.ServerName
|
||||||
|
if _, serverName, err = gomatrixserverlib.SplitID('@', event.Sender()); err != nil {
|
||||||
|
return util.JSONResponse{
|
||||||
|
Code: http.StatusForbidden,
|
||||||
|
JSON: jsonerror.Forbidden("The sender of the join is invalid"),
|
||||||
|
}
|
||||||
|
} else if serverName != request.Origin() {
|
||||||
|
return util.JSONResponse{
|
||||||
|
Code: http.StatusForbidden,
|
||||||
|
JSON: jsonerror.Forbidden("The sender does not match the server that originated the request"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Check if the user has already left. If so, no-op!
|
// Check if the user has already left. If so, no-op!
|
||||||
queryReq := &api.QueryLatestEventsAndStateRequest{
|
queryReq := &api.QueryLatestEventsAndStateRequest{
|
||||||
RoomID: roomID,
|
RoomID: roomID,
|
||||||
|
|
@ -240,7 +249,7 @@ func SendLeave(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
verifyRequests := []gomatrixserverlib.VerifyJSONRequest{{
|
verifyRequests := []gomatrixserverlib.VerifyJSONRequest{{
|
||||||
ServerName: event.Origin(),
|
ServerName: serverName,
|
||||||
Message: redacted,
|
Message: redacted,
|
||||||
AtTS: event.OriginServerTS(),
|
AtTS: event.OriginServerTS(),
|
||||||
StrictValidityChecking: true,
|
StrictValidityChecking: true,
|
||||||
|
|
|
||||||
|
|
@ -22,6 +22,12 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/getsentry/sentry-go"
|
||||||
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
"github.com/matrix-org/util"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||||
federationAPI "github.com/matrix-org/dendrite/federationapi/api"
|
federationAPI "github.com/matrix-org/dendrite/federationapi/api"
|
||||||
"github.com/matrix-org/dendrite/federationapi/producers"
|
"github.com/matrix-org/dendrite/federationapi/producers"
|
||||||
|
|
@ -31,10 +37,6 @@ import (
|
||||||
"github.com/matrix-org/dendrite/roomserver/api"
|
"github.com/matrix-org/dendrite/roomserver/api"
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
"github.com/matrix-org/dendrite/setup/config"
|
||||||
syncTypes "github.com/matrix-org/dendrite/syncapi/types"
|
syncTypes "github.com/matrix-org/dendrite/syncapi/types"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
|
||||||
"github.com/matrix-org/util"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -349,6 +351,7 @@ func (t *txnReq) processEDUs(ctx context.Context) {
|
||||||
for deviceID, message := range byUser {
|
for deviceID, message := range byUser {
|
||||||
// TODO: check that the user and the device actually exist here
|
// TODO: check that the user and the device actually exist here
|
||||||
if err := t.producer.SendToDevice(ctx, directPayload.Sender, userID, deviceID, directPayload.Type, message); err != nil {
|
if err := t.producer.SendToDevice(ctx, directPayload.Sender, userID, deviceID, directPayload.Type, message); err != nil {
|
||||||
|
sentry.CaptureException(err)
|
||||||
util.GetLogger(ctx).WithError(err).WithFields(logrus.Fields{
|
util.GetLogger(ctx).WithError(err).WithFields(logrus.Fields{
|
||||||
"sender": directPayload.Sender,
|
"sender": directPayload.Sender,
|
||||||
"user_id": userID,
|
"user_id": userID,
|
||||||
|
|
@ -358,7 +361,10 @@ func (t *txnReq) processEDUs(ctx context.Context) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case gomatrixserverlib.MDeviceListUpdate:
|
case gomatrixserverlib.MDeviceListUpdate:
|
||||||
t.processDeviceListUpdate(ctx, e)
|
if err := t.producer.SendDeviceListUpdate(ctx, e.Content, t.Origin); err != nil {
|
||||||
|
sentry.CaptureException(err)
|
||||||
|
util.GetLogger(ctx).WithError(err).Error("failed to InputDeviceListUpdate")
|
||||||
|
}
|
||||||
case gomatrixserverlib.MReceipt:
|
case gomatrixserverlib.MReceipt:
|
||||||
// https://matrix.org/docs/spec/server_server/r0.1.4#receipts
|
// https://matrix.org/docs/spec/server_server/r0.1.4#receipts
|
||||||
payload := map[string]types.FederationReceiptMRead{}
|
payload := map[string]types.FederationReceiptMRead{}
|
||||||
|
|
@ -391,7 +397,8 @@ func (t *txnReq) processEDUs(ctx context.Context) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case types.MSigningKeyUpdate:
|
case types.MSigningKeyUpdate:
|
||||||
if err := t.processSigningKeyUpdate(ctx, e); err != nil {
|
if err := t.producer.SendSigningKeyUpdate(ctx, e.Content, t.Origin); err != nil {
|
||||||
|
sentry.CaptureException(err)
|
||||||
logrus.WithError(err).Errorf("Failed to process signing key update")
|
logrus.WithError(err).Errorf("Failed to process signing key update")
|
||||||
}
|
}
|
||||||
case gomatrixserverlib.MPresence:
|
case gomatrixserverlib.MPresence:
|
||||||
|
|
@ -431,42 +438,6 @@ func (t *txnReq) processPresence(ctx context.Context, e gomatrixserverlib.EDU) e
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *txnReq) processSigningKeyUpdate(ctx context.Context, e gomatrixserverlib.EDU) error {
|
|
||||||
var updatePayload keyapi.CrossSigningKeyUpdate
|
|
||||||
if err := json.Unmarshal(e.Content, &updatePayload); err != nil {
|
|
||||||
util.GetLogger(ctx).WithError(err).WithFields(logrus.Fields{
|
|
||||||
"user_id": updatePayload.UserID,
|
|
||||||
}).Debug("Failed to unmarshal signing key update")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, serverName, err := gomatrixserverlib.SplitID('@', updatePayload.UserID); err != nil {
|
|
||||||
return nil
|
|
||||||
} else if serverName == t.ourServerName {
|
|
||||||
return nil
|
|
||||||
} else if serverName != t.Origin {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
keys := gomatrixserverlib.CrossSigningKeys{}
|
|
||||||
if updatePayload.MasterKey != nil {
|
|
||||||
keys.MasterKey = *updatePayload.MasterKey
|
|
||||||
}
|
|
||||||
if updatePayload.SelfSigningKey != nil {
|
|
||||||
keys.SelfSigningKey = *updatePayload.SelfSigningKey
|
|
||||||
}
|
|
||||||
uploadReq := &keyapi.PerformUploadDeviceKeysRequest{
|
|
||||||
CrossSigningKeys: keys,
|
|
||||||
UserID: updatePayload.UserID,
|
|
||||||
}
|
|
||||||
uploadRes := &keyapi.PerformUploadDeviceKeysResponse{}
|
|
||||||
if err := t.keyAPI.PerformUploadDeviceKeys(ctx, uploadReq, uploadRes); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if uploadRes.Error != nil {
|
|
||||||
return uploadRes.Error
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// processReceiptEvent sends receipt events to JetStream
|
// processReceiptEvent sends receipt events to JetStream
|
||||||
func (t *txnReq) processReceiptEvent(ctx context.Context,
|
func (t *txnReq) processReceiptEvent(ctx context.Context,
|
||||||
userID, roomID, receiptType string,
|
userID, roomID, receiptType string,
|
||||||
|
|
@ -489,21 +460,3 @@ func (t *txnReq) processReceiptEvent(ctx context.Context,
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *txnReq) processDeviceListUpdate(ctx context.Context, e gomatrixserverlib.EDU) {
|
|
||||||
var payload gomatrixserverlib.DeviceListUpdateEvent
|
|
||||||
if err := json.Unmarshal(e.Content, &payload); err != nil {
|
|
||||||
util.GetLogger(ctx).WithError(err).Error("Failed to unmarshal device list update event")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if _, serverName, err := gomatrixserverlib.SplitID('@', payload.UserID); err != nil {
|
|
||||||
return
|
|
||||||
} else if serverName == t.ourServerName {
|
|
||||||
return
|
|
||||||
} else if serverName != t.Origin {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err := t.producer.SendDeviceListUpdate(ctx, &payload); err != nil {
|
|
||||||
util.GetLogger(ctx).WithError(err).WithField("user_id", payload.UserID).Error("failed to InputDeviceListUpdate")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -135,17 +135,24 @@ func getState(
|
||||||
return nil, nil, &resErr
|
return nil, nil, &resErr
|
||||||
}
|
}
|
||||||
|
|
||||||
if response.IsRejected {
|
switch {
|
||||||
|
case !response.RoomExists:
|
||||||
|
return nil, nil, &util.JSONResponse{
|
||||||
|
Code: http.StatusNotFound,
|
||||||
|
JSON: jsonerror.NotFound("Room not found"),
|
||||||
|
}
|
||||||
|
case !response.StateKnown:
|
||||||
|
return nil, nil, &util.JSONResponse{
|
||||||
|
Code: http.StatusNotFound,
|
||||||
|
JSON: jsonerror.NotFound("State not known"),
|
||||||
|
}
|
||||||
|
case response.IsRejected:
|
||||||
return nil, nil, &util.JSONResponse{
|
return nil, nil, &util.JSONResponse{
|
||||||
Code: http.StatusNotFound,
|
Code: http.StatusNotFound,
|
||||||
JSON: jsonerror.NotFound("Event not found"),
|
JSON: jsonerror.NotFound("Event not found"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !response.RoomExists {
|
|
||||||
return nil, nil, &util.JSONResponse{Code: http.StatusNotFound, JSON: nil}
|
|
||||||
}
|
|
||||||
|
|
||||||
return response.StateEvents, response.AuthChainEvents, nil
|
return response.StateEvents, response.AuthChainEvents, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -2,13 +2,15 @@ package statistics
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math"
|
"math"
|
||||||
|
"math/rand"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/federationapi/storage"
|
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"go.uber.org/atomic"
|
"go.uber.org/atomic"
|
||||||
|
|
||||||
|
"github.com/matrix-org/dendrite/federationapi/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Statistics contains information about all of the remote federated
|
// Statistics contains information about all of the remote federated
|
||||||
|
|
@ -19,12 +21,23 @@ type Statistics struct {
|
||||||
servers map[gomatrixserverlib.ServerName]*ServerStatistics
|
servers map[gomatrixserverlib.ServerName]*ServerStatistics
|
||||||
mutex sync.RWMutex
|
mutex sync.RWMutex
|
||||||
|
|
||||||
|
backoffTimers map[gomatrixserverlib.ServerName]*time.Timer
|
||||||
|
backoffMutex sync.RWMutex
|
||||||
|
|
||||||
// How many times should we tolerate consecutive failures before we
|
// How many times should we tolerate consecutive failures before we
|
||||||
// just blacklist the host altogether? The backoff is exponential,
|
// just blacklist the host altogether? The backoff is exponential,
|
||||||
// so the max time here to attempt is 2**failures seconds.
|
// so the max time here to attempt is 2**failures seconds.
|
||||||
FailuresUntilBlacklist uint32
|
FailuresUntilBlacklist uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewStatistics(db storage.Database, failuresUntilBlacklist uint32) Statistics {
|
||||||
|
return Statistics{
|
||||||
|
DB: db,
|
||||||
|
FailuresUntilBlacklist: failuresUntilBlacklist,
|
||||||
|
backoffTimers: make(map[gomatrixserverlib.ServerName]*time.Timer),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ForServer returns server statistics for the given server name. If it
|
// ForServer returns server statistics for the given server name. If it
|
||||||
// does not exist, it will create empty statistics and return those.
|
// does not exist, it will create empty statistics and return those.
|
||||||
func (s *Statistics) ForServer(serverName gomatrixserverlib.ServerName) *ServerStatistics {
|
func (s *Statistics) ForServer(serverName gomatrixserverlib.ServerName) *ServerStatistics {
|
||||||
|
|
@ -44,7 +57,6 @@ func (s *Statistics) ForServer(serverName gomatrixserverlib.ServerName) *ServerS
|
||||||
server = &ServerStatistics{
|
server = &ServerStatistics{
|
||||||
statistics: s,
|
statistics: s,
|
||||||
serverName: serverName,
|
serverName: serverName,
|
||||||
interrupt: make(chan struct{}),
|
|
||||||
}
|
}
|
||||||
s.servers[serverName] = server
|
s.servers[serverName] = server
|
||||||
s.mutex.Unlock()
|
s.mutex.Unlock()
|
||||||
|
|
@ -63,29 +75,43 @@ func (s *Statistics) ForServer(serverName gomatrixserverlib.ServerName) *ServerS
|
||||||
// many times we failed etc. It also manages the backoff time and black-
|
// many times we failed etc. It also manages the backoff time and black-
|
||||||
// listing a remote host if it remains uncooperative.
|
// listing a remote host if it remains uncooperative.
|
||||||
type ServerStatistics struct {
|
type ServerStatistics struct {
|
||||||
statistics *Statistics //
|
statistics *Statistics //
|
||||||
serverName gomatrixserverlib.ServerName //
|
serverName gomatrixserverlib.ServerName //
|
||||||
blacklisted atomic.Bool // is the node blacklisted
|
blacklisted atomic.Bool // is the node blacklisted
|
||||||
backoffStarted atomic.Bool // is the backoff started
|
backoffStarted atomic.Bool // is the backoff started
|
||||||
backoffUntil atomic.Value // time.Time until this backoff interval ends
|
backoffUntil atomic.Value // time.Time until this backoff interval ends
|
||||||
backoffCount atomic.Uint32 // number of times BackoffDuration has been called
|
backoffCount atomic.Uint32 // number of times BackoffDuration has been called
|
||||||
interrupt chan struct{} // interrupts the backoff goroutine
|
successCounter atomic.Uint32 // how many times have we succeeded?
|
||||||
successCounter atomic.Uint32 // how many times have we succeeded?
|
backoffNotifier func() // notifies destination queue when backoff completes
|
||||||
|
notifierMutex sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const maxJitterMultiplier = 1.4
|
||||||
|
const minJitterMultiplier = 0.8
|
||||||
|
|
||||||
// duration returns how long the next backoff interval should be.
|
// duration returns how long the next backoff interval should be.
|
||||||
func (s *ServerStatistics) duration(count uint32) time.Duration {
|
func (s *ServerStatistics) duration(count uint32) time.Duration {
|
||||||
return time.Second * time.Duration(math.Exp2(float64(count)))
|
// Add some jitter to minimise the chance of having multiple backoffs
|
||||||
|
// ending at the same time.
|
||||||
|
jitter := rand.Float64()*(maxJitterMultiplier-minJitterMultiplier) + minJitterMultiplier
|
||||||
|
duration := time.Millisecond * time.Duration(math.Exp2(float64(count))*jitter*1000)
|
||||||
|
return duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// cancel will interrupt the currently active backoff.
|
// cancel will interrupt the currently active backoff.
|
||||||
func (s *ServerStatistics) cancel() {
|
func (s *ServerStatistics) cancel() {
|
||||||
s.blacklisted.Store(false)
|
s.blacklisted.Store(false)
|
||||||
s.backoffUntil.Store(time.Time{})
|
s.backoffUntil.Store(time.Time{})
|
||||||
select {
|
|
||||||
case s.interrupt <- struct{}{}:
|
s.ClearBackoff()
|
||||||
default:
|
}
|
||||||
}
|
|
||||||
|
// AssignBackoffNotifier configures the channel to send to when
|
||||||
|
// a backoff completes.
|
||||||
|
func (s *ServerStatistics) AssignBackoffNotifier(notifier func()) {
|
||||||
|
s.notifierMutex.Lock()
|
||||||
|
defer s.notifierMutex.Unlock()
|
||||||
|
s.backoffNotifier = notifier
|
||||||
}
|
}
|
||||||
|
|
||||||
// Success updates the server statistics with a new successful
|
// Success updates the server statistics with a new successful
|
||||||
|
|
@ -94,8 +120,8 @@ func (s *ServerStatistics) cancel() {
|
||||||
// we will unblacklist it.
|
// we will unblacklist it.
|
||||||
func (s *ServerStatistics) Success() {
|
func (s *ServerStatistics) Success() {
|
||||||
s.cancel()
|
s.cancel()
|
||||||
s.successCounter.Inc()
|
|
||||||
s.backoffCount.Store(0)
|
s.backoffCount.Store(0)
|
||||||
|
s.successCounter.Inc()
|
||||||
if s.statistics.DB != nil {
|
if s.statistics.DB != nil {
|
||||||
if err := s.statistics.DB.RemoveServerFromBlacklist(s.serverName); err != nil {
|
if err := s.statistics.DB.RemoveServerFromBlacklist(s.serverName); err != nil {
|
||||||
logrus.WithError(err).Errorf("Failed to remove %q from blacklist", s.serverName)
|
logrus.WithError(err).Errorf("Failed to remove %q from blacklist", s.serverName)
|
||||||
|
|
@ -104,16 +130,20 @@ func (s *ServerStatistics) Success() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Failure marks a failure and starts backing off if needed.
|
// Failure marks a failure and starts backing off if needed.
|
||||||
// The next call to BackoffIfRequired will do the right thing
|
// It will return the time that the current failure
|
||||||
// after this. It will return the time that the current failure
|
|
||||||
// will result in backoff waiting until, and a bool signalling
|
// will result in backoff waiting until, and a bool signalling
|
||||||
// whether we have blacklisted and therefore to give up.
|
// whether we have blacklisted and therefore to give up.
|
||||||
func (s *ServerStatistics) Failure() (time.Time, bool) {
|
func (s *ServerStatistics) Failure() (time.Time, bool) {
|
||||||
|
// Return immediately if we have blacklisted this node.
|
||||||
|
if s.blacklisted.Load() {
|
||||||
|
return time.Time{}, true
|
||||||
|
}
|
||||||
|
|
||||||
// If we aren't already backing off, this call will start
|
// If we aren't already backing off, this call will start
|
||||||
// a new backoff period. Increase the failure counter and
|
// a new backoff period, increase the failure counter and
|
||||||
// start a goroutine which will wait out the backoff and
|
// start a goroutine which will wait out the backoff and
|
||||||
// unset the backoffStarted flag when done.
|
// unset the backoffStarted flag when done.
|
||||||
if s.backoffStarted.CAS(false, true) {
|
if s.backoffStarted.CompareAndSwap(false, true) {
|
||||||
if s.backoffCount.Inc() >= s.statistics.FailuresUntilBlacklist {
|
if s.backoffCount.Inc() >= s.statistics.FailuresUntilBlacklist {
|
||||||
s.blacklisted.Store(true)
|
s.blacklisted.Store(true)
|
||||||
if s.statistics.DB != nil {
|
if s.statistics.DB != nil {
|
||||||
|
|
@ -121,40 +151,48 @@ func (s *ServerStatistics) Failure() (time.Time, bool) {
|
||||||
logrus.WithError(err).Errorf("Failed to add %q to blacklist", s.serverName)
|
logrus.WithError(err).Errorf("Failed to add %q to blacklist", s.serverName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
s.ClearBackoff()
|
||||||
return time.Time{}, true
|
return time.Time{}, true
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
// We're starting a new back off so work out what the next interval
|
||||||
until, ok := s.backoffUntil.Load().(time.Time)
|
// will be.
|
||||||
if ok {
|
count := s.backoffCount.Load()
|
||||||
select {
|
until := time.Now().Add(s.duration(count))
|
||||||
case <-time.After(time.Until(until)):
|
s.backoffUntil.Store(until)
|
||||||
case <-s.interrupt:
|
|
||||||
}
|
s.statistics.backoffMutex.Lock()
|
||||||
}
|
defer s.statistics.backoffMutex.Unlock()
|
||||||
s.backoffStarted.Store(false)
|
s.statistics.backoffTimers[s.serverName] = time.AfterFunc(time.Until(until), s.backoffFinished)
|
||||||
}()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if we have blacklisted this node.
|
return s.backoffUntil.Load().(time.Time), false
|
||||||
if s.blacklisted.Load() {
|
}
|
||||||
return time.Now(), true
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we're already backing off and we haven't yet surpassed
|
// ClearBackoff stops the backoff timer for this destination if it is running
|
||||||
// the deadline then return that. Repeated calls to Failure
|
// and removes the timer from the backoffTimers map.
|
||||||
// within a single backoff interval will have no side effects.
|
func (s *ServerStatistics) ClearBackoff() {
|
||||||
if until, ok := s.backoffUntil.Load().(time.Time); ok && !time.Now().After(until) {
|
// If the timer is still running then stop it so it's memory is cleaned up sooner.
|
||||||
return until, false
|
s.statistics.backoffMutex.Lock()
|
||||||
|
defer s.statistics.backoffMutex.Unlock()
|
||||||
|
if timer, ok := s.statistics.backoffTimers[s.serverName]; ok {
|
||||||
|
timer.Stop()
|
||||||
}
|
}
|
||||||
|
delete(s.statistics.backoffTimers, s.serverName)
|
||||||
|
|
||||||
// We're either backing off and have passed the deadline, or
|
s.backoffStarted.Store(false)
|
||||||
// we aren't backing off, so work out what the next interval
|
}
|
||||||
// will be.
|
|
||||||
count := s.backoffCount.Load()
|
// backoffFinished will clear the previous backoff and notify the destination queue.
|
||||||
until := time.Now().Add(s.duration(count))
|
func (s *ServerStatistics) backoffFinished() {
|
||||||
s.backoffUntil.Store(until)
|
s.ClearBackoff()
|
||||||
return until, false
|
|
||||||
|
// Notify the destinationQueue if one is currently running.
|
||||||
|
s.notifierMutex.Lock()
|
||||||
|
defer s.notifierMutex.Unlock()
|
||||||
|
if s.backoffNotifier != nil {
|
||||||
|
s.backoffNotifier()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// BackoffInfo returns information about the current or previous backoff.
|
// BackoffInfo returns information about the current or previous backoff.
|
||||||
|
|
@ -173,6 +211,12 @@ func (s *ServerStatistics) Blacklisted() bool {
|
||||||
return s.blacklisted.Load()
|
return s.blacklisted.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RemoveBlacklist removes the blacklisted status from the server.
|
||||||
|
func (s *ServerStatistics) RemoveBlacklist() {
|
||||||
|
s.cancel()
|
||||||
|
s.backoffCount.Store(0)
|
||||||
|
}
|
||||||
|
|
||||||
// SuccessCount returns the number of successful requests. This is
|
// SuccessCount returns the number of successful requests. This is
|
||||||
// usually useful in constructing transaction IDs.
|
// usually useful in constructing transaction IDs.
|
||||||
func (s *ServerStatistics) SuccessCount() uint32 {
|
func (s *ServerStatistics) SuccessCount() uint32 {
|
||||||
|
|
|
||||||
|
|
@ -7,9 +7,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestBackoff(t *testing.T) {
|
func TestBackoff(t *testing.T) {
|
||||||
stats := Statistics{
|
stats := NewStatistics(nil, 7)
|
||||||
FailuresUntilBlacklist: 7,
|
|
||||||
}
|
|
||||||
server := ServerStatistics{
|
server := ServerStatistics{
|
||||||
statistics: &stats,
|
statistics: &stats,
|
||||||
serverName: "test.com",
|
serverName: "test.com",
|
||||||
|
|
@ -36,7 +34,7 @@ func TestBackoff(t *testing.T) {
|
||||||
|
|
||||||
// Get the duration.
|
// Get the duration.
|
||||||
_, blacklist := server.BackoffInfo()
|
_, blacklist := server.BackoffInfo()
|
||||||
duration := time.Until(until).Round(time.Second)
|
duration := time.Until(until)
|
||||||
|
|
||||||
// Unset the backoff, or otherwise our next call will think that
|
// Unset the backoff, or otherwise our next call will think that
|
||||||
// there's a backoff in progress and return the same result.
|
// there's a backoff in progress and return the same result.
|
||||||
|
|
@ -57,8 +55,17 @@ func TestBackoff(t *testing.T) {
|
||||||
|
|
||||||
// Check if the duration is what we expect.
|
// Check if the duration is what we expect.
|
||||||
t.Logf("Backoff %d is for %s", i, duration)
|
t.Logf("Backoff %d is for %s", i, duration)
|
||||||
if wanted := time.Second * time.Duration(math.Exp2(float64(i))); !blacklist && duration != wanted {
|
roundingAllowance := 0.01
|
||||||
t.Fatalf("Backoff %d should have been %s but was %s", i, wanted, duration)
|
minDuration := time.Millisecond * time.Duration(math.Exp2(float64(i))*minJitterMultiplier*1000-roundingAllowance)
|
||||||
|
maxDuration := time.Millisecond * time.Duration(math.Exp2(float64(i))*maxJitterMultiplier*1000+roundingAllowance)
|
||||||
|
var inJitterRange bool
|
||||||
|
if duration >= minDuration && duration <= maxDuration {
|
||||||
|
inJitterRange = true
|
||||||
|
} else {
|
||||||
|
inJitterRange = false
|
||||||
|
}
|
||||||
|
if !blacklist && !inJitterRange {
|
||||||
|
t.Fatalf("Backoff %d should have been between %s and %s but was %s", i, minDuration, maxDuration, duration)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -18,9 +18,10 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/federationapi/storage/shared"
|
"github.com/matrix-org/dendrite/federationapi/storage/shared"
|
||||||
"github.com/matrix-org/dendrite/federationapi/types"
|
"github.com/matrix-org/dendrite/federationapi/types"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type Database interface {
|
type Database interface {
|
||||||
|
|
@ -38,8 +39,8 @@ type Database interface {
|
||||||
GetPendingPDUs(ctx context.Context, serverName gomatrixserverlib.ServerName, limit int) (pdus map[*shared.Receipt]*gomatrixserverlib.HeaderedEvent, err error)
|
GetPendingPDUs(ctx context.Context, serverName gomatrixserverlib.ServerName, limit int) (pdus map[*shared.Receipt]*gomatrixserverlib.HeaderedEvent, err error)
|
||||||
GetPendingEDUs(ctx context.Context, serverName gomatrixserverlib.ServerName, limit int) (edus map[*shared.Receipt]*gomatrixserverlib.EDU, err error)
|
GetPendingEDUs(ctx context.Context, serverName gomatrixserverlib.ServerName, limit int) (edus map[*shared.Receipt]*gomatrixserverlib.EDU, err error)
|
||||||
|
|
||||||
AssociatePDUWithDestination(ctx context.Context, transactionID gomatrixserverlib.TransactionID, serverName gomatrixserverlib.ServerName, receipt *shared.Receipt) error
|
AssociatePDUWithDestinations(ctx context.Context, destinations map[gomatrixserverlib.ServerName]struct{}, receipt *shared.Receipt) error
|
||||||
AssociateEDUWithDestination(ctx context.Context, serverName gomatrixserverlib.ServerName, receipt *shared.Receipt, eduType string, expireEDUTypes map[string]time.Duration) error
|
AssociateEDUWithDestinations(ctx context.Context, destinations map[gomatrixserverlib.ServerName]struct{}, receipt *shared.Receipt, eduType string, expireEDUTypes map[string]time.Duration) error
|
||||||
|
|
||||||
CleanPDUs(ctx context.Context, serverName gomatrixserverlib.ServerName, receipts []*shared.Receipt) error
|
CleanPDUs(ctx context.Context, serverName gomatrixserverlib.ServerName, receipts []*shared.Receipt) error
|
||||||
CleanEDUs(ctx context.Context, serverName gomatrixserverlib.ServerName, receipts []*shared.Receipt) error
|
CleanEDUs(ctx context.Context, serverName gomatrixserverlib.ServerName, receipts []*shared.Receipt) error
|
||||||
|
|
|
||||||
|
|
@ -52,6 +52,10 @@ type Receipt struct {
|
||||||
nid int64
|
nid int64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewReceipt(nid int64) Receipt {
|
||||||
|
return Receipt{nid: nid}
|
||||||
|
}
|
||||||
|
|
||||||
func (r *Receipt) String() string {
|
func (r *Receipt) String() string {
|
||||||
return fmt.Sprintf("%d", r.nid)
|
return fmt.Sprintf("%d", r.nid)
|
||||||
}
|
}
|
||||||
|
|
@ -70,27 +74,27 @@ func (d *Database) UpdateRoom(
|
||||||
) (joinedHosts []types.JoinedHost, err error) {
|
) (joinedHosts []types.JoinedHost, err error) {
|
||||||
err = d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
err = d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
||||||
if purgeRoomFirst {
|
if purgeRoomFirst {
|
||||||
// If the event is a create event then we'll delete all of the existing
|
|
||||||
// data for the room. The only reason that a create event would be replayed
|
|
||||||
// to us in this way is if we're about to receive the entire room state.
|
|
||||||
if err = d.FederationJoinedHosts.DeleteJoinedHostsForRoom(ctx, txn, roomID); err != nil {
|
if err = d.FederationJoinedHosts.DeleteJoinedHostsForRoom(ctx, txn, roomID); err != nil {
|
||||||
return fmt.Errorf("d.FederationJoinedHosts.DeleteJoinedHosts: %w", err)
|
return fmt.Errorf("d.FederationJoinedHosts.DeleteJoinedHosts: %w", err)
|
||||||
}
|
}
|
||||||
}
|
for _, add := range addHosts {
|
||||||
|
if err = d.FederationJoinedHosts.InsertJoinedHosts(ctx, txn, roomID, add.MemberEventID, add.ServerName); err != nil {
|
||||||
joinedHosts, err = d.FederationJoinedHosts.SelectJoinedHostsWithTx(ctx, txn, roomID)
|
return err
|
||||||
if err != nil {
|
}
|
||||||
return err
|
joinedHosts = append(joinedHosts, add)
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
for _, add := range addHosts {
|
if joinedHosts, err = d.FederationJoinedHosts.SelectJoinedHostsWithTx(ctx, txn, roomID); err != nil {
|
||||||
err = d.FederationJoinedHosts.InsertJoinedHosts(ctx, txn, roomID, add.MemberEventID, add.ServerName)
|
return err
|
||||||
if err != nil {
|
}
|
||||||
|
for _, add := range addHosts {
|
||||||
|
if err = d.FederationJoinedHosts.InsertJoinedHosts(ctx, txn, roomID, add.MemberEventID, add.ServerName); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err = d.FederationJoinedHosts.DeleteJoinedHosts(ctx, txn, removeHosts); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if err = d.FederationJoinedHosts.DeleteJoinedHosts(ctx, txn, removeHosts); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
|
||||||
|
|
@ -38,9 +38,9 @@ var defaultExpireEDUTypes = map[string]time.Duration{
|
||||||
// AssociateEDUWithDestination creates an association that the
|
// AssociateEDUWithDestination creates an association that the
|
||||||
// destination queues will use to determine which JSON blobs to send
|
// destination queues will use to determine which JSON blobs to send
|
||||||
// to which servers.
|
// to which servers.
|
||||||
func (d *Database) AssociateEDUWithDestination(
|
func (d *Database) AssociateEDUWithDestinations(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
serverName gomatrixserverlib.ServerName,
|
destinations map[gomatrixserverlib.ServerName]struct{},
|
||||||
receipt *Receipt,
|
receipt *Receipt,
|
||||||
eduType string,
|
eduType string,
|
||||||
expireEDUTypes map[string]time.Duration,
|
expireEDUTypes map[string]time.Duration,
|
||||||
|
|
@ -53,23 +53,24 @@ func (d *Database) AssociateEDUWithDestination(
|
||||||
// Keep EDUs for at least x minutes before deleting them
|
// Keep EDUs for at least x minutes before deleting them
|
||||||
expiresAt = gomatrixserverlib.AsTimestamp(time.Now().Add(duration))
|
expiresAt = gomatrixserverlib.AsTimestamp(time.Now().Add(duration))
|
||||||
}
|
}
|
||||||
// We forcibly set m.direct_to_device events to 0, as we always want them
|
// We forcibly set m.direct_to_device and m.device_list_update events
|
||||||
// to be delivered. (required for E2EE)
|
// to 0, as we always want them to be delivered. (required for E2EE)
|
||||||
if eduType == gomatrixserverlib.MDirectToDevice {
|
if eduType == gomatrixserverlib.MDirectToDevice || eduType == gomatrixserverlib.MDeviceListUpdate {
|
||||||
expiresAt = 0
|
expiresAt = 0
|
||||||
}
|
}
|
||||||
return d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
return d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
||||||
if err := d.FederationQueueEDUs.InsertQueueEDU(
|
var err error
|
||||||
ctx, // context
|
for destination := range destinations {
|
||||||
txn, // SQL transaction
|
err = d.FederationQueueEDUs.InsertQueueEDU(
|
||||||
eduType, // EDU type for coalescing
|
ctx, // context
|
||||||
serverName, // destination server name
|
txn, // SQL transaction
|
||||||
receipt.nid, // NID from the federationapi_queue_json table
|
eduType, // EDU type for coalescing
|
||||||
expiresAt, // The timestamp this EDU will expire
|
destination, // destination server name
|
||||||
); err != nil {
|
receipt.nid, // NID from the federationapi_queue_json table
|
||||||
return fmt.Errorf("InsertQueueEDU: %w", err)
|
expiresAt, // The timestamp this EDU will expire
|
||||||
|
)
|
||||||
}
|
}
|
||||||
return nil
|
return err
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -110,6 +111,7 @@ func (d *Database) GetPendingEDUs(
|
||||||
return fmt.Errorf("json.Unmarshal: %w", err)
|
return fmt.Errorf("json.Unmarshal: %w", err)
|
||||||
}
|
}
|
||||||
edus[&Receipt{nid}] = &event
|
edus[&Receipt{nid}] = &event
|
||||||
|
d.Cache.StoreFederationQueuedEDU(nid, &event)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -177,20 +179,18 @@ func (d *Database) GetPendingEDUServerNames(
|
||||||
return d.FederationQueueEDUs.SelectQueueEDUServerNames(ctx, nil)
|
return d.FederationQueueEDUs.SelectQueueEDUServerNames(ctx, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteExpiredEDUs deletes expired EDUs
|
// DeleteExpiredEDUs deletes expired EDUs and evicts them from the cache.
|
||||||
func (d *Database) DeleteExpiredEDUs(ctx context.Context) error {
|
func (d *Database) DeleteExpiredEDUs(ctx context.Context) error {
|
||||||
return d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
var jsonNIDs []int64
|
||||||
|
err := d.Writer.Do(d.DB, nil, func(txn *sql.Tx) (err error) {
|
||||||
expiredBefore := gomatrixserverlib.AsTimestamp(time.Now())
|
expiredBefore := gomatrixserverlib.AsTimestamp(time.Now())
|
||||||
jsonNIDs, err := d.FederationQueueEDUs.SelectExpiredEDUs(ctx, txn, expiredBefore)
|
jsonNIDs, err = d.FederationQueueEDUs.SelectExpiredEDUs(ctx, txn, expiredBefore)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(jsonNIDs) == 0 {
|
if len(jsonNIDs) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
for i := range jsonNIDs {
|
|
||||||
d.Cache.EvictFederationQueuedEDU(jsonNIDs[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = d.FederationQueueJSON.DeleteQueueJSON(ctx, txn, jsonNIDs); err != nil {
|
if err = d.FederationQueueJSON.DeleteQueueJSON(ctx, txn, jsonNIDs); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
@ -198,4 +198,14 @@ func (d *Database) DeleteExpiredEDUs(ctx context.Context) error {
|
||||||
|
|
||||||
return d.FederationQueueEDUs.DeleteExpiredEDUs(ctx, txn, expiredBefore)
|
return d.FederationQueueEDUs.DeleteExpiredEDUs(ctx, txn, expiredBefore)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range jsonNIDs {
|
||||||
|
d.Cache.EvictFederationQueuedEDU(jsonNIDs[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue