mirror of
https://github.com/matrix-org/dendrite.git
synced 2026-01-09 15:13:12 -06:00
sync latest main, and resolve merge conflicts
This commit is contained in:
commit
9541a78dd7
41
.github/workflows/dendrite.yml
vendored
41
.github/workflows/dendrite.yml
vendored
|
|
@ -97,7 +97,7 @@ jobs:
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
go: ["1.18"]
|
go: ["1.18", "1.19"]
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- name: Setup go
|
- name: Setup go
|
||||||
|
|
@ -127,7 +127,7 @@ jobs:
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
go: ["1.18"]
|
go: ["1.18", "1.19"]
|
||||||
goos: ["linux"]
|
goos: ["linux"]
|
||||||
goarch: ["amd64", "386"]
|
goarch: ["amd64", "386"]
|
||||||
steps:
|
steps:
|
||||||
|
|
@ -151,6 +151,7 @@ jobs:
|
||||||
GOOS: ${{ matrix.goos }}
|
GOOS: ${{ matrix.goos }}
|
||||||
GOARCH: ${{ matrix.goarch }}
|
GOARCH: ${{ matrix.goarch }}
|
||||||
CGO_ENABLED: 1
|
CGO_ENABLED: 1
|
||||||
|
CGO_CFLAGS: -fno-stack-protector
|
||||||
run: go build -trimpath -v -o "bin/" ./cmd/...
|
run: go build -trimpath -v -o "bin/" ./cmd/...
|
||||||
|
|
||||||
# build for Windows 64-bit
|
# build for Windows 64-bit
|
||||||
|
|
@ -160,7 +161,7 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
go: ["1.18"]
|
go: ["1.18", "1.19"]
|
||||||
goos: ["windows"]
|
goos: ["windows"]
|
||||||
goarch: ["amd64"]
|
goarch: ["amd64"]
|
||||||
steps:
|
steps:
|
||||||
|
|
@ -223,6 +224,31 @@ jobs:
|
||||||
- name: Test upgrade
|
- name: Test upgrade
|
||||||
run: ./dendrite-upgrade-tests --head .
|
run: ./dendrite-upgrade-tests --head .
|
||||||
|
|
||||||
|
# run database upgrade tests, skipping over one version
|
||||||
|
upgrade_test_direct:
|
||||||
|
name: Upgrade tests from HEAD-2
|
||||||
|
timeout-minutes: 20
|
||||||
|
needs: initial-tests-done
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- name: Setup go
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: "1.18"
|
||||||
|
- uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/.cache/go-build
|
||||||
|
~/go/pkg/mod
|
||||||
|
key: ${{ runner.os }}-go-upgrade-${{ hashFiles('**/go.sum') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-go-upgrade
|
||||||
|
- name: Build upgrade-tests
|
||||||
|
run: go build ./cmd/dendrite-upgrade-tests
|
||||||
|
- name: Test upgrade
|
||||||
|
run: ./dendrite-upgrade-tests -direct -from HEAD-2 --head .
|
||||||
|
|
||||||
# run Sytest in different variations
|
# run Sytest in different variations
|
||||||
sytest:
|
sytest:
|
||||||
timeout-minutes: 20
|
timeout-minutes: 20
|
||||||
|
|
@ -359,7 +385,14 @@ jobs:
|
||||||
|
|
||||||
integration-tests-done:
|
integration-tests-done:
|
||||||
name: Integration tests passed
|
name: Integration tests passed
|
||||||
needs: [initial-tests-done, upgrade_test, sytest, complement]
|
needs:
|
||||||
|
[
|
||||||
|
initial-tests-done,
|
||||||
|
upgrade_test,
|
||||||
|
upgrade_test_direct,
|
||||||
|
sytest,
|
||||||
|
complement,
|
||||||
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
|
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
|
||||||
steps:
|
steps:
|
||||||
|
|
|
||||||
44
CHANGES.md
44
CHANGES.md
|
|
@ -1,5 +1,49 @@
|
||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## Dendrite 0.9.1 (2022-08-03)
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
|
||||||
|
* Upgrades a dependency which caused issues building Dendrite with Go 1.19
|
||||||
|
* The roomserver will no longer give up prematurely after failing to call `/state_ids`
|
||||||
|
* Removes the faulty room info cache, which caused of a number of race conditions and occasional bugs (including when creating and joining rooms)
|
||||||
|
* The media endpoint now sets the `Cache-Control` header correctly to prevent web-based clients from hitting media endpoints excessively
|
||||||
|
* The sync API will now advance the PDU stream position correctly in all cases (contributed by [sergekh2](https://github.com/sergekh2))
|
||||||
|
* The sync API will now delete the correct range of send-to-device messages when advancing the stream position
|
||||||
|
* The device list `changed` key in the `/sync` response should now return the correct users
|
||||||
|
* A data race when looking up missing state has been fixed
|
||||||
|
* The `/send_join` API is now applying stronger validation to the received membership event
|
||||||
|
|
||||||
|
## Dendrite 0.9.0 (2022-08-01)
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* Dendrite now uses Ristretto for managing in-memory caches
|
||||||
|
* Should improve cache utilisation considerably over time by more intelligently selecting and managing cache entries compared to the previous LRU-based cache
|
||||||
|
* Defaults to a 1GB cache size if not configured otherwise
|
||||||
|
* The estimated cache size in memory and maximum age can now be configured with new [configuration options](https://github.com/matrix-org/dendrite/blob/e94ef84aaba30e12baf7f524c4e7a36d2fdeb189/dendrite-sample.monolith.yaml#L44-L61) to prevent unbounded cache growth
|
||||||
|
* Added support for serving the `/.well-known/matrix/client` hint directly from Dendrite
|
||||||
|
* Configurable with the new [configuration option](https://github.com/matrix-org/dendrite/blob/e94ef84aaba30e12baf7f524c4e7a36d2fdeb189/dendrite-sample.monolith.yaml#L67-L69)
|
||||||
|
* Refactored membership updater, which should eliminate some bugs caused by the membership table getting out of sync with the room state
|
||||||
|
* The User API is now responsible for sending account data updates to other components, which may fix some races and duplicate account data events
|
||||||
|
* Optimised database query for checking whether a remote server is allowed to request an event over federation without using anywhere near as much CPU time (PostgreSQL only)
|
||||||
|
* Database migrations have been refactored to eliminate some problems that were present with `goose` and upgrading from older Dendrite versions
|
||||||
|
* Media fetching will now use the `/v3` endpoints for downloading media from remote homeservers
|
||||||
|
* HTTP 404 and HTTP 405 errors from the client-facing APIs should now be returned with CORS headers so that web-based clients do not produce incorrect access control warnings for unknown endpoints
|
||||||
|
* Some preparation work for full history visibility support
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
|
||||||
|
* Fixes a crash that could occur during event redaction
|
||||||
|
* The `/members` endpoint will no longer incorrectly return HTTP 500 as a result of some invite events
|
||||||
|
* Send-to-device messages should now be ordered more reliably and the last position in the stream updated correctly
|
||||||
|
* Parsing of appservice configuration files is now less strict (contributed by [Kab1r](https://github.com/Kab1r))
|
||||||
|
* The sync API should now identify shared users correctly when waking up for E2EE key changes
|
||||||
|
* The federation `/state` endpoint will now return a HTTP 403 when the state before an event isn't known instead of a HTTP 500
|
||||||
|
* Presence timestamps should now be calculated with the correct precision
|
||||||
|
* A race condition in the roomserver's room info has been fixed
|
||||||
|
* A race condition in the sync API has been fixed
|
||||||
|
|
||||||
## Dendrite 0.8.9 (2022-07-01)
|
## Dendrite 0.8.9 (2022-07-01)
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
|
|
|
||||||
59
README.md
59
README.md
|
|
@ -21,8 +21,7 @@ As of October 2020 (current [progress below](#progress)), Dendrite has now enter
|
||||||
This does not mean:
|
This does not mean:
|
||||||
|
|
||||||
- Dendrite is bug-free. It has not yet been battle-tested in the real world and so will be error prone initially.
|
- Dendrite is bug-free. It has not yet been battle-tested in the real world and so will be error prone initially.
|
||||||
- All of the CS/Federation APIs are implemented. We are tracking progress via a script called 'Are We Synapse Yet?'. In particular,
|
- Dendrite is feature-complete. There may be client or federation APIs that are not implemented.
|
||||||
presence and push notifications are entirely missing from Dendrite. See [CHANGES.md](CHANGES.md) for updates.
|
|
||||||
- Dendrite is ready for massive homeserver deployments. You cannot shard each microservice, only run each one on a different machine.
|
- Dendrite is ready for massive homeserver deployments. You cannot shard each microservice, only run each one on a different machine.
|
||||||
|
|
||||||
Currently, we expect Dendrite to function well for small (10s/100s of users) homeserver deployments as well as P2P Matrix nodes in-browser or on mobile devices.
|
Currently, we expect Dendrite to function well for small (10s/100s of users) homeserver deployments as well as P2P Matrix nodes in-browser or on mobile devices.
|
||||||
|
|
@ -36,6 +35,9 @@ If you have further questions, please take a look at [our FAQ](docs/FAQ.md) or j
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
|
See the [Planning your Installation](https://matrix-org.github.io/dendrite/installation/planning) page for
|
||||||
|
more information on requirements.
|
||||||
|
|
||||||
To build Dendrite, you will need Go 1.18 or later.
|
To build Dendrite, you will need Go 1.18 or later.
|
||||||
|
|
||||||
For a usable federating Dendrite deployment, you will also need:
|
For a usable federating Dendrite deployment, you will also need:
|
||||||
|
|
@ -83,11 +85,11 @@ $ ./bin/create-account --config dendrite.yaml -username alice
|
||||||
|
|
||||||
Then point your favourite Matrix client at `http://localhost:8008` or `https://localhost:8448`.
|
Then point your favourite Matrix client at `http://localhost:8008` or `https://localhost:8448`.
|
||||||
|
|
||||||
## <a id="progress"></a> Progress
|
## Progress
|
||||||
|
|
||||||
We use a script called Are We Synapse Yet which checks Sytest compliance rates. Sytest is a black-box homeserver
|
We use a script called Are We Synapse Yet which checks Sytest compliance rates. Sytest is a black-box homeserver
|
||||||
test rig with around 900 tests. The script works out how many of these tests are passing on Dendrite and it
|
test rig with around 900 tests. The script works out how many of these tests are passing on Dendrite and it
|
||||||
updates with CI. As of April 2022 we're at around 83% CS API coverage and 95% Federation coverage, though check
|
updates with CI. As of August 2022 we're at around 83% CS API coverage and 95% Federation coverage, though check
|
||||||
CI for the latest numbers. In practice, this means you can communicate locally and via federation with Synapse
|
CI for the latest numbers. In practice, this means you can communicate locally and via federation with Synapse
|
||||||
servers such as matrix.org reasonably well, although there are still some missing features (like Search).
|
servers such as matrix.org reasonably well, although there are still some missing features (like Search).
|
||||||
|
|
||||||
|
|
@ -119,53 +121,8 @@ We would be grateful for any help on issues marked as
|
||||||
all have related Sytests which need to pass in order for the issue to be closed. Once you've written your
|
all have related Sytests which need to pass in order for the issue to be closed. Once you've written your
|
||||||
code, you can quickly run Sytest to ensure that the test names are now passing.
|
code, you can quickly run Sytest to ensure that the test names are now passing.
|
||||||
|
|
||||||
For example, if the test `Local device key changes get to remote servers` was marked as failing, find the
|
If you're new to the project, see our
|
||||||
test file (e.g via `grep` or via the
|
[Contributing page](https://matrix-org.github.io/dendrite/development/contributing) to get up to speed, then
|
||||||
[CI log output](https://buildkite.com/matrix-dot-org/dendrite/builds/2826#39cff5de-e032-4ad0-ad26-f819e6919c42)
|
|
||||||
it's `tests/50federation/40devicelists.pl` ) then to run Sytest:
|
|
||||||
|
|
||||||
```
|
|
||||||
docker run --rm --name sytest
|
|
||||||
-v "/Users/kegan/github/sytest:/sytest"
|
|
||||||
-v "/Users/kegan/github/dendrite:/src"
|
|
||||||
-v "/Users/kegan/logs:/logs"
|
|
||||||
-v "/Users/kegan/go/:/gopath"
|
|
||||||
-e "POSTGRES=1" -e "DENDRITE_TRACE_HTTP=1"
|
|
||||||
matrixdotorg/sytest-dendrite:latest tests/50federation/40devicelists.pl
|
|
||||||
```
|
|
||||||
|
|
||||||
See [sytest.md](docs/sytest.md) for the full description of these flags.
|
|
||||||
|
|
||||||
You can try running sytest outside of docker for faster runs, but the dependencies can be temperamental
|
|
||||||
and we recommend using docker where possible.
|
|
||||||
|
|
||||||
```
|
|
||||||
cd sytest
|
|
||||||
export PERL5LIB=$HOME/lib/perl5
|
|
||||||
export PERL_MB_OPT=--install_base=$HOME
|
|
||||||
export PERL_MM_OPT=INSTALL_BASE=$HOME
|
|
||||||
./install-deps.pl
|
|
||||||
|
|
||||||
./run-tests.pl -I Dendrite::Monolith -d $PATH_TO_DENDRITE_BINARIES
|
|
||||||
```
|
|
||||||
|
|
||||||
Sometimes Sytest is testing the wrong thing or is flakey, so it will need to be patched.
|
|
||||||
Ask on `#dendrite-dev:matrix.org` if you think this is the case for you and we'll be happy to help.
|
|
||||||
|
|
||||||
If you're new to the project, see [CONTRIBUTING.md](docs/CONTRIBUTING.md) to get up to speed then
|
|
||||||
look for [Good First Issues](https://github.com/matrix-org/dendrite/labels/good%20first%20issue). If you're
|
look for [Good First Issues](https://github.com/matrix-org/dendrite/labels/good%20first%20issue). If you're
|
||||||
familiar with the project, look for [Help Wanted](https://github.com/matrix-org/dendrite/labels/help-wanted)
|
familiar with the project, look for [Help Wanted](https://github.com/matrix-org/dendrite/labels/help-wanted)
|
||||||
issues.
|
issues.
|
||||||
|
|
||||||
## Hardware requirements
|
|
||||||
|
|
||||||
Dendrite in Monolith + SQLite works in a range of environments including iOS and in-browser via WASM.
|
|
||||||
|
|
||||||
For small homeserver installations joined on ~10s rooms on matrix.org with ~100s of users in those rooms, including some
|
|
||||||
encrypted rooms:
|
|
||||||
|
|
||||||
- Memory: uses around 100MB of RAM, with peaks at around 200MB.
|
|
||||||
- Disk space: After a few months of usage, the database grew to around 2GB (in Monolith mode).
|
|
||||||
- CPU: Brief spikes when processing events, typically idles at 1% CPU.
|
|
||||||
|
|
||||||
This means Dendrite should comfortably work on things like Raspberry Pis.
|
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,6 @@ COPY . /build
|
||||||
|
|
||||||
RUN mkdir -p bin
|
RUN mkdir -p bin
|
||||||
RUN go build -trimpath -o bin/ ./cmd/dendrite-monolith-server
|
RUN go build -trimpath -o bin/ ./cmd/dendrite-monolith-server
|
||||||
RUN go build -trimpath -o bin/ ./cmd/goose
|
|
||||||
RUN go build -trimpath -o bin/ ./cmd/create-account
|
RUN go build -trimpath -o bin/ ./cmd/create-account
|
||||||
RUN go build -trimpath -o bin/ ./cmd/generate-keys
|
RUN go build -trimpath -o bin/ ./cmd/generate-keys
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,6 @@ COPY . /build
|
||||||
|
|
||||||
RUN mkdir -p bin
|
RUN mkdir -p bin
|
||||||
RUN go build -trimpath -o bin/ ./cmd/dendrite-polylith-multi
|
RUN go build -trimpath -o bin/ ./cmd/dendrite-polylith-multi
|
||||||
RUN go build -trimpath -o bin/ ./cmd/goose
|
|
||||||
RUN go build -trimpath -o bin/ ./cmd/create-account
|
RUN go build -trimpath -o bin/ ./cmd/create-account
|
||||||
RUN go build -trimpath -o bin/ ./cmd/generate-keys
|
RUN go build -trimpath -o bin/ ./cmd/generate-keys
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -13,4 +13,4 @@ go build ./cmd/...
|
||||||
./build/scripts/find-lint.sh
|
./build/scripts/find-lint.sh
|
||||||
|
|
||||||
echo "Testing..."
|
echo "Testing..."
|
||||||
go test -v ./...
|
go test --race -v ./...
|
||||||
|
|
|
||||||
|
|
@ -48,7 +48,6 @@ func AddPublicRoutes(
|
||||||
|
|
||||||
syncProducer := &producers.SyncAPIProducer{
|
syncProducer := &producers.SyncAPIProducer{
|
||||||
JetStream: js,
|
JetStream: js,
|
||||||
TopicClientData: cfg.Matrix.JetStream.Prefixed(jetstream.OutputClientData),
|
|
||||||
TopicReceiptEvent: cfg.Matrix.JetStream.Prefixed(jetstream.OutputReceiptEvent),
|
TopicReceiptEvent: cfg.Matrix.JetStream.Prefixed(jetstream.OutputReceiptEvent),
|
||||||
TopicSendToDeviceEvent: cfg.Matrix.JetStream.Prefixed(jetstream.OutputSendToDeviceEvent),
|
TopicSendToDeviceEvent: cfg.Matrix.JetStream.Prefixed(jetstream.OutputSendToDeviceEvent),
|
||||||
TopicTypingEvent: cfg.Matrix.JetStream.Prefixed(jetstream.OutputTypingEvent),
|
TopicTypingEvent: cfg.Matrix.JetStream.Prefixed(jetstream.OutputTypingEvent),
|
||||||
|
|
@ -59,6 +58,7 @@ func AddPublicRoutes(
|
||||||
|
|
||||||
routing.Setup(
|
routing.Setup(
|
||||||
base.PublicClientAPIMux,
|
base.PublicClientAPIMux,
|
||||||
|
base.PublicWellKnownAPIMux,
|
||||||
base.SynapseAdminMux,
|
base.SynapseAdminMux,
|
||||||
base.DendriteAdminMux,
|
base.DendriteAdminMux,
|
||||||
cfg, rsAPI, asAPI,
|
cfg, rsAPI, asAPI,
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@ package httputil
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
|
|
@ -29,9 +29,9 @@ import (
|
||||||
func UnmarshalJSONRequest(req *http.Request, iface interface{}) *util.JSONResponse {
|
func UnmarshalJSONRequest(req *http.Request, iface interface{}) *util.JSONResponse {
|
||||||
// encoding/json allows invalid utf-8, matrix does not
|
// encoding/json allows invalid utf-8, matrix does not
|
||||||
// https://matrix.org/docs/spec/client_server/r0.6.1#api-standards
|
// https://matrix.org/docs/spec/client_server/r0.6.1#api-standards
|
||||||
body, err := ioutil.ReadAll(req.Body)
|
body, err := io.ReadAll(req.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
util.GetLogger(req.Context()).WithError(err).Error("ioutil.ReadAll failed")
|
util.GetLogger(req.Context()).WithError(err).Error("io.ReadAll failed")
|
||||||
resp := jsonerror.InternalServerError()
|
resp := jsonerror.InternalServerError()
|
||||||
return &resp
|
return &resp
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -21,7 +21,6 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/internal/eventutil"
|
|
||||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||||
"github.com/matrix-org/dendrite/syncapi/types"
|
"github.com/matrix-org/dendrite/syncapi/types"
|
||||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||||
|
|
@ -32,7 +31,6 @@ import (
|
||||||
|
|
||||||
// SyncAPIProducer produces events for the sync API server to consume
|
// SyncAPIProducer produces events for the sync API server to consume
|
||||||
type SyncAPIProducer struct {
|
type SyncAPIProducer struct {
|
||||||
TopicClientData string
|
|
||||||
TopicReceiptEvent string
|
TopicReceiptEvent string
|
||||||
TopicSendToDeviceEvent string
|
TopicSendToDeviceEvent string
|
||||||
TopicTypingEvent string
|
TopicTypingEvent string
|
||||||
|
|
@ -42,36 +40,6 @@ type SyncAPIProducer struct {
|
||||||
UserAPI userapi.ClientUserAPI
|
UserAPI userapi.ClientUserAPI
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendData sends account data to the sync API server
|
|
||||||
func (p *SyncAPIProducer) SendData(userID string, roomID string, dataType string, readMarker *eventutil.ReadMarkerJSON, ignoredUsers *types.IgnoredUsers) error {
|
|
||||||
m := &nats.Msg{
|
|
||||||
Subject: p.TopicClientData,
|
|
||||||
Header: nats.Header{},
|
|
||||||
}
|
|
||||||
m.Header.Set(jetstream.UserID, userID)
|
|
||||||
|
|
||||||
data := eventutil.AccountData{
|
|
||||||
RoomID: roomID,
|
|
||||||
Type: dataType,
|
|
||||||
ReadMarker: readMarker,
|
|
||||||
IgnoredUsers: ignoredUsers,
|
|
||||||
}
|
|
||||||
var err error
|
|
||||||
m.Data, err = json.Marshal(data)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"user_id": userID,
|
|
||||||
"room_id": roomID,
|
|
||||||
"data_type": dataType,
|
|
||||||
}).Tracef("Producing to topic '%s'", p.TopicClientData)
|
|
||||||
|
|
||||||
_, err = p.JetStream.PublishMsg(m)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SyncAPIProducer) SendReceipt(
|
func (p *SyncAPIProducer) SendReceipt(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
userID, roomID, eventID, receiptType string, timestamp gomatrixserverlib.Timestamp,
|
userID, roomID, eventID, receiptType string, timestamp gomatrixserverlib.Timestamp,
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,7 @@ package routing
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/clientapi/httputil"
|
"github.com/matrix-org/dendrite/clientapi/httputil"
|
||||||
|
|
@ -25,7 +25,6 @@ import (
|
||||||
"github.com/matrix-org/dendrite/clientapi/producers"
|
"github.com/matrix-org/dendrite/clientapi/producers"
|
||||||
"github.com/matrix-org/dendrite/internal/eventutil"
|
"github.com/matrix-org/dendrite/internal/eventutil"
|
||||||
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
||||||
"github.com/matrix-org/dendrite/syncapi/types"
|
|
||||||
"github.com/matrix-org/dendrite/userapi/api"
|
"github.com/matrix-org/dendrite/userapi/api"
|
||||||
|
|
||||||
"github.com/matrix-org/util"
|
"github.com/matrix-org/util"
|
||||||
|
|
@ -102,9 +101,9 @@ func SaveAccountData(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
body, err := ioutil.ReadAll(req.Body)
|
body, err := io.ReadAll(req.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
util.GetLogger(req.Context()).WithError(err).Error("ioutil.ReadAll failed")
|
util.GetLogger(req.Context()).WithError(err).Error("io.ReadAll failed")
|
||||||
return jsonerror.InternalServerError()
|
return jsonerror.InternalServerError()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -127,18 +126,6 @@ func SaveAccountData(
|
||||||
return util.ErrorResponse(err)
|
return util.ErrorResponse(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var ignoredUsers *types.IgnoredUsers
|
|
||||||
if dataType == "m.ignored_user_list" {
|
|
||||||
ignoredUsers = &types.IgnoredUsers{}
|
|
||||||
_ = json.Unmarshal(body, ignoredUsers)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: user API should do this since it's account data
|
|
||||||
if err := syncProducer.SendData(userID, roomID, dataType, nil, ignoredUsers); err != nil {
|
|
||||||
util.GetLogger(req.Context()).WithError(err).Error("syncProducer.SendData failed")
|
|
||||||
return jsonerror.InternalServerError()
|
|
||||||
}
|
|
||||||
|
|
||||||
return util.JSONResponse{
|
return util.JSONResponse{
|
||||||
Code: http.StatusOK,
|
Code: http.StatusOK,
|
||||||
JSON: struct{}{},
|
JSON: struct{}{},
|
||||||
|
|
@ -191,11 +178,6 @@ func SaveReadMarker(
|
||||||
return util.ErrorResponse(err)
|
return util.ErrorResponse(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := syncProducer.SendData(device.UserID, roomID, "m.fully_read", &r, nil); err != nil {
|
|
||||||
util.GetLogger(req.Context()).WithError(err).Error("syncProducer.SendData failed")
|
|
||||||
return jsonerror.InternalServerError()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle the read receipt that may be included in the read marker
|
// Handle the read receipt that may be included in the read marker
|
||||||
if r.Read != "" {
|
if r.Read != "" {
|
||||||
return SetReceipt(req, syncProducer, device, roomID, "m.read", r.Read)
|
return SetReceipt(req, syncProducer, device, roomID, "m.read", r.Read)
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
package routing
|
package routing
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io/ioutil"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/clientapi/auth"
|
"github.com/matrix-org/dendrite/clientapi/auth"
|
||||||
|
|
@ -20,7 +20,7 @@ func Deactivate(
|
||||||
) util.JSONResponse {
|
) util.JSONResponse {
|
||||||
ctx := req.Context()
|
ctx := req.Context()
|
||||||
defer req.Body.Close() // nolint:errcheck
|
defer req.Body.Close() // nolint:errcheck
|
||||||
bodyBytes, err := ioutil.ReadAll(req.Body)
|
bodyBytes, err := io.ReadAll(req.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return util.JSONResponse{
|
return util.JSONResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,7 @@
|
||||||
package routing
|
package routing
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io/ioutil"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
|
|
@ -175,7 +175,7 @@ func DeleteDeviceById(
|
||||||
}()
|
}()
|
||||||
ctx := req.Context()
|
ctx := req.Context()
|
||||||
defer req.Body.Close() // nolint:errcheck
|
defer req.Body.Close() // nolint:errcheck
|
||||||
bodyBytes, err := ioutil.ReadAll(req.Body)
|
bodyBytes, err := io.ReadAll(req.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return util.JSONResponse{
|
return util.JSONResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
|
|
|
||||||
|
|
@ -23,13 +23,14 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
"github.com/matrix-org/util"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/clientapi/api"
|
"github.com/matrix-org/dendrite/clientapi/api"
|
||||||
"github.com/matrix-org/dendrite/clientapi/httputil"
|
"github.com/matrix-org/dendrite/clientapi/httputil"
|
||||||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||||
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
"github.com/matrix-org/dendrite/setup/config"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
|
||||||
"github.com/matrix-org/util"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|
@ -196,14 +197,14 @@ func fillPublicRoomsReq(httpReq *http.Request, request *PublicRoomReq) *util.JSO
|
||||||
|
|
||||||
// sliceInto returns a subslice of `slice` which honours the since/limit values given.
|
// sliceInto returns a subslice of `slice` which honours the since/limit values given.
|
||||||
//
|
//
|
||||||
// 0 1 2 3 4 5 6 index
|
// 0 1 2 3 4 5 6 index
|
||||||
// [A, B, C, D, E, F, G] slice
|
// [A, B, C, D, E, F, G] slice
|
||||||
//
|
//
|
||||||
// limit=3 => A,B,C (prev='', next='3')
|
// limit=3 => A,B,C (prev='', next='3')
|
||||||
// limit=3&since=3 => D,E,F (prev='0', next='6')
|
// limit=3&since=3 => D,E,F (prev='0', next='6')
|
||||||
// limit=3&since=6 => G (prev='3', next='')
|
// limit=3&since=6 => G (prev='3', next='')
|
||||||
//
|
//
|
||||||
// A value of '-1' for prev/next indicates no position.
|
// A value of '-1' for prev/next indicates no position.
|
||||||
func sliceInto(slice []gomatrixserverlib.PublicRoom, since int64, limit int16) (subset []gomatrixserverlib.PublicRoom, prev, next int) {
|
func sliceInto(slice []gomatrixserverlib.PublicRoom, since int64, limit int16) (subset []gomatrixserverlib.PublicRoom, prev, next int) {
|
||||||
prev = -1
|
prev = -1
|
||||||
next = -1
|
next = -1
|
||||||
|
|
|
||||||
|
|
@ -19,7 +19,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
|
@ -388,7 +388,7 @@ func validateRecaptcha(
|
||||||
|
|
||||||
// Grab the body of the response from the captcha server
|
// Grab the body of the response from the captcha server
|
||||||
var r recaptchaResponse
|
var r recaptchaResponse
|
||||||
body, err := ioutil.ReadAll(resp.Body)
|
body, err := io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &util.JSONResponse{
|
return &util.JSONResponse{
|
||||||
Code: http.StatusGatewayTimeout,
|
Code: http.StatusGatewayTimeout,
|
||||||
|
|
@ -556,7 +556,7 @@ func Register(
|
||||||
cfg *config.ClientAPI,
|
cfg *config.ClientAPI,
|
||||||
) util.JSONResponse {
|
) util.JSONResponse {
|
||||||
defer req.Body.Close() // nolint: errcheck
|
defer req.Body.Close() // nolint: errcheck
|
||||||
reqBody, err := ioutil.ReadAll(req.Body)
|
reqBody, err := io.ReadAll(req.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return util.JSONResponse{
|
return util.JSONResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@ package routing
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/patrickmn/go-cache"
|
"github.com/patrickmn/go-cache"
|
||||||
|
|
@ -13,7 +13,7 @@ func TestSharedSecretRegister(t *testing.T) {
|
||||||
jsonStr := []byte(`{"admin":false,"mac":"f1ba8d37123866fd659b40de4bad9b0f8965c565","nonce":"759f047f312b99ff428b21d581256f8592b8976e58bc1b543972dc6147e529a79657605b52d7becd160ff5137f3de11975684319187e06901955f79e5a6c5a79","password":"wonderland","username":"alice"}`)
|
jsonStr := []byte(`{"admin":false,"mac":"f1ba8d37123866fd659b40de4bad9b0f8965c565","nonce":"759f047f312b99ff428b21d581256f8592b8976e58bc1b543972dc6147e529a79657605b52d7becd160ff5137f3de11975684319187e06901955f79e5a6c5a79","password":"wonderland","username":"alice"}`)
|
||||||
sharedSecret := "dendritetest"
|
sharedSecret := "dendritetest"
|
||||||
|
|
||||||
req, err := NewSharedSecretRegistrationRequest(ioutil.NopCloser(bytes.NewBuffer(jsonStr)))
|
req, err := NewSharedSecretRegistrationRequest(io.NopCloser(bytes.NewBuffer(jsonStr)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to read request: %s", err)
|
t.Fatalf("failed to read request: %s", err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -18,8 +18,6 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/clientapi/httputil"
|
"github.com/matrix-org/dendrite/clientapi/httputil"
|
||||||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||||
"github.com/matrix-org/dendrite/clientapi/producers"
|
"github.com/matrix-org/dendrite/clientapi/producers"
|
||||||
|
|
@ -98,10 +96,6 @@ func PutTag(
|
||||||
return jsonerror.InternalServerError()
|
return jsonerror.InternalServerError()
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = syncProducer.SendData(userID, roomID, "m.tag", nil, nil); err != nil {
|
|
||||||
logrus.WithError(err).Error("Failed to send m.tag account data update to syncapi")
|
|
||||||
}
|
|
||||||
|
|
||||||
return util.JSONResponse{
|
return util.JSONResponse{
|
||||||
Code: http.StatusOK,
|
Code: http.StatusOK,
|
||||||
JSON: struct{}{},
|
JSON: struct{}{},
|
||||||
|
|
@ -150,11 +144,6 @@ func DeleteTag(
|
||||||
return jsonerror.InternalServerError()
|
return jsonerror.InternalServerError()
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: user API should do this since it's account data
|
|
||||||
if err := syncProducer.SendData(userID, roomID, "m.tag", nil, nil); err != nil {
|
|
||||||
logrus.WithError(err).Error("Failed to send m.tag account data update to syncapi")
|
|
||||||
}
|
|
||||||
|
|
||||||
return util.JSONResponse{
|
return util.JSONResponse{
|
||||||
Code: http.StatusOK,
|
Code: http.StatusOK,
|
||||||
JSON: struct{}{},
|
JSON: struct{}{},
|
||||||
|
|
|
||||||
|
|
@ -48,7 +48,7 @@ import (
|
||||||
// applied:
|
// applied:
|
||||||
// nolint: gocyclo
|
// nolint: gocyclo
|
||||||
func Setup(
|
func Setup(
|
||||||
publicAPIMux, synapseAdminRouter, dendriteAdminRouter *mux.Router,
|
publicAPIMux, wkMux, synapseAdminRouter, dendriteAdminRouter *mux.Router,
|
||||||
cfg *config.ClientAPI,
|
cfg *config.ClientAPI,
|
||||||
rsAPI roomserverAPI.ClientRoomserverAPI,
|
rsAPI roomserverAPI.ClientRoomserverAPI,
|
||||||
asAPI appserviceAPI.AppServiceInternalAPI,
|
asAPI appserviceAPI.AppServiceInternalAPI,
|
||||||
|
|
@ -74,6 +74,26 @@ func Setup(
|
||||||
unstableFeatures["org.matrix."+msc] = true
|
unstableFeatures["org.matrix."+msc] = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if cfg.Matrix.WellKnownClientName != "" {
|
||||||
|
logrus.Infof("Setting m.homeserver base_url as %s at /.well-known/matrix/client", cfg.Matrix.WellKnownClientName)
|
||||||
|
wkMux.Handle("/client", httputil.MakeExternalAPI("wellknown", func(r *http.Request) util.JSONResponse {
|
||||||
|
return util.JSONResponse{
|
||||||
|
Code: http.StatusOK,
|
||||||
|
JSON: struct {
|
||||||
|
HomeserverName struct {
|
||||||
|
BaseUrl string `json:"base_url"`
|
||||||
|
} `json:"m.homeserver"`
|
||||||
|
}{
|
||||||
|
HomeserverName: struct {
|
||||||
|
BaseUrl string `json:"base_url"`
|
||||||
|
}{
|
||||||
|
BaseUrl: cfg.Matrix.WellKnownClientName,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
})).Methods(http.MethodGet, http.MethodOptions)
|
||||||
|
}
|
||||||
|
|
||||||
publicAPIMux.Handle("/versions",
|
publicAPIMux.Handle("/versions",
|
||||||
httputil.MakeExternalAPI("versions", func(req *http.Request) util.JSONResponse {
|
httputil.MakeExternalAPI("versions", func(req *http.Request) util.JSONResponse {
|
||||||
return util.JSONResponse{
|
return util.JSONResponse{
|
||||||
|
|
|
||||||
|
|
@ -63,9 +63,10 @@ var sendEventDuration = prometheus.NewHistogramVec(
|
||||||
)
|
)
|
||||||
|
|
||||||
// SendEvent implements:
|
// SendEvent implements:
|
||||||
// /rooms/{roomID}/send/{eventType}
|
//
|
||||||
// /rooms/{roomID}/send/{eventType}/{txnID}
|
// /rooms/{roomID}/send/{eventType}
|
||||||
// /rooms/{roomID}/state/{eventType}/{stateKey}
|
// /rooms/{roomID}/send/{eventType}/{txnID}
|
||||||
|
// /rooms/{roomID}/state/{eventType}/{stateKey}
|
||||||
func SendEvent(
|
func SendEvent(
|
||||||
req *http.Request,
|
req *http.Request,
|
||||||
device *userapi.Device,
|
device *userapi.Device,
|
||||||
|
|
|
||||||
|
|
@ -38,8 +38,9 @@ type threePIDsResponse struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequestEmailToken implements:
|
// RequestEmailToken implements:
|
||||||
// POST /account/3pid/email/requestToken
|
//
|
||||||
// POST /register/email/requestToken
|
// POST /account/3pid/email/requestToken
|
||||||
|
// POST /register/email/requestToken
|
||||||
func RequestEmailToken(req *http.Request, threePIDAPI api.ClientUserAPI, cfg *config.ClientAPI) util.JSONResponse {
|
func RequestEmailToken(req *http.Request, threePIDAPI api.ClientUserAPI, cfg *config.ClientAPI) util.JSONResponse {
|
||||||
var body threepid.EmailAssociationRequest
|
var body threepid.EmailAssociationRequest
|
||||||
if reqErr := httputil.UnmarshalJSONRequest(req, &body); reqErr != nil {
|
if reqErr := httputil.UnmarshalJSONRequest(req, &body); reqErr != nil {
|
||||||
|
|
|
||||||
|
|
@ -22,15 +22,17 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/matrix-org/gomatrix"
|
||||||
|
"github.com/matrix-org/util"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
"github.com/matrix-org/dendrite/setup/config"
|
||||||
"github.com/matrix-org/dendrite/userapi/api"
|
"github.com/matrix-org/dendrite/userapi/api"
|
||||||
"github.com/matrix-org/gomatrix"
|
|
||||||
"github.com/matrix-org/util"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// RequestTurnServer implements:
|
// RequestTurnServer implements:
|
||||||
// GET /voip/turnServer
|
//
|
||||||
|
// GET /voip/turnServer
|
||||||
func RequestTurnServer(req *http.Request, device *api.Device, cfg *config.ClientAPI) util.JSONResponse {
|
func RequestTurnServer(req *http.Request, device *api.Device, cfg *config.ClientAPI) util.JSONResponse {
|
||||||
turnConfig := cfg.TURN
|
turnConfig := cfg.TURN
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -19,7 +19,6 @@ import (
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
@ -157,7 +156,7 @@ func main() {
|
||||||
func getPassword(password, pwdFile string, pwdStdin bool, r io.Reader) (string, error) {
|
func getPassword(password, pwdFile string, pwdStdin bool, r io.Reader) (string, error) {
|
||||||
// read password from file
|
// read password from file
|
||||||
if pwdFile != "" {
|
if pwdFile != "" {
|
||||||
pw, err := ioutil.ReadFile(pwdFile)
|
pw, err := os.ReadFile(pwdFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("Unable to read password from file: %v", err)
|
return "", fmt.Errorf("Unable to read password from file: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -166,7 +165,7 @@ func getPassword(password, pwdFile string, pwdStdin bool, r io.Reader) (string,
|
||||||
|
|
||||||
// read password from stdin
|
// read password from stdin
|
||||||
if pwdStdin {
|
if pwdStdin {
|
||||||
data, err := ioutil.ReadAll(r)
|
data, err := io.ReadAll(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("Unable to read password from stdin: %v", err)
|
return "", fmt.Errorf("Unable to read password from stdin: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -21,7 +21,6 @@ import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
|
@ -76,11 +75,11 @@ func main() {
|
||||||
if pk, sk, err = ed25519.GenerateKey(nil); err != nil {
|
if pk, sk, err = ed25519.GenerateKey(nil); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
if err = ioutil.WriteFile(keyfile, sk, 0644); err != nil {
|
if err = os.WriteFile(keyfile, sk, 0644); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
} else if err == nil {
|
} else if err == nil {
|
||||||
if sk, err = ioutil.ReadFile(keyfile); err != nil {
|
if sk, err = os.ReadFile(keyfile); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
if len(sk) != ed25519.PrivateKeySize {
|
if len(sk) != ed25519.PrivateKeySize {
|
||||||
|
|
|
||||||
|
|
@ -20,7 +20,6 @@ import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
"log"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
|
|
@ -69,7 +68,7 @@ func Setup(instanceName, storageDirectory, peerURI string) (*Node, error) {
|
||||||
|
|
||||||
yggfile := fmt.Sprintf("%s/%s-yggdrasil.conf", storageDirectory, instanceName)
|
yggfile := fmt.Sprintf("%s/%s-yggdrasil.conf", storageDirectory, instanceName)
|
||||||
if _, err := os.Stat(yggfile); !os.IsNotExist(err) {
|
if _, err := os.Stat(yggfile); !os.IsNotExist(err) {
|
||||||
yggconf, e := ioutil.ReadFile(yggfile)
|
yggconf, e := os.ReadFile(yggfile)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
@ -88,7 +87,7 @@ func Setup(instanceName, storageDirectory, peerURI string) (*Node, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
if e := ioutil.WriteFile(yggfile, j, 0600); e != nil {
|
if e := os.WriteFile(yggfile, j, 0600); e != nil {
|
||||||
n.log.Printf("Couldn't write private key to file '%s': %s\n", yggfile, e)
|
n.log.Printf("Couldn't write private key to file '%s': %s\n", yggfile, e)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -6,7 +6,7 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
|
@ -37,6 +37,7 @@ var (
|
||||||
flagBuildConcurrency = flag.Int("build-concurrency", runtime.NumCPU(), "The amount of build concurrency when building images")
|
flagBuildConcurrency = flag.Int("build-concurrency", runtime.NumCPU(), "The amount of build concurrency when building images")
|
||||||
flagHead = flag.String("head", "", "Location to a dendrite repository to treat as HEAD instead of Github")
|
flagHead = flag.String("head", "", "Location to a dendrite repository to treat as HEAD instead of Github")
|
||||||
flagDockerHost = flag.String("docker-host", "localhost", "The hostname of the docker client. 'localhost' if running locally, 'host.docker.internal' if running in Docker.")
|
flagDockerHost = flag.String("docker-host", "localhost", "The hostname of the docker client. 'localhost' if running locally, 'host.docker.internal' if running in Docker.")
|
||||||
|
flagDirect = flag.Bool("direct", false, "If a direct upgrade from the defined FROM version to TO should be done")
|
||||||
alphaNumerics = regexp.MustCompile("[^a-zA-Z0-9]+")
|
alphaNumerics = regexp.MustCompile("[^a-zA-Z0-9]+")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -46,7 +47,7 @@ const HEAD = "HEAD"
|
||||||
// We cannot use the dockerfile associated with the repo with each version sadly due to changes in
|
// We cannot use the dockerfile associated with the repo with each version sadly due to changes in
|
||||||
// Docker versions. Specifically, earlier Dendrite versions are incompatible with newer Docker clients
|
// Docker versions. Specifically, earlier Dendrite versions are incompatible with newer Docker clients
|
||||||
// due to the error:
|
// due to the error:
|
||||||
// When using COPY with more than one source file, the destination must be a directory and end with a /
|
// When using COPY with more than one source file, the destination must be a directory and end with a /
|
||||||
// We need to run a postgres anyway, so use the dockerfile associated with Complement instead.
|
// We need to run a postgres anyway, so use the dockerfile associated with Complement instead.
|
||||||
const Dockerfile = `FROM golang:1.18-stretch as build
|
const Dockerfile = `FROM golang:1.18-stretch as build
|
||||||
RUN apt-get update && apt-get install -y postgresql
|
RUN apt-get update && apt-get install -y postgresql
|
||||||
|
|
@ -94,7 +95,9 @@ CMD /build/run_dendrite.sh `
|
||||||
const dendriteUpgradeTestLabel = "dendrite_upgrade_test"
|
const dendriteUpgradeTestLabel = "dendrite_upgrade_test"
|
||||||
|
|
||||||
// downloadArchive downloads an arbitrary github archive of the form:
|
// downloadArchive downloads an arbitrary github archive of the form:
|
||||||
// https://github.com/matrix-org/dendrite/archive/v0.3.11.tar.gz
|
//
|
||||||
|
// https://github.com/matrix-org/dendrite/archive/v0.3.11.tar.gz
|
||||||
|
//
|
||||||
// and re-tarballs it without the top-level directory which contains branch information. It inserts
|
// and re-tarballs it without the top-level directory which contains branch information. It inserts
|
||||||
// the contents of `dockerfile` as a root file `Dockerfile` in the re-tarballed directory such that
|
// the contents of `dockerfile` as a root file `Dockerfile` in the re-tarballed directory such that
|
||||||
// you can directly feed the retarballed archive to `ImageBuild` to have it run said dockerfile.
|
// you can directly feed the retarballed archive to `ImageBuild` to have it run said dockerfile.
|
||||||
|
|
@ -125,7 +128,7 @@ func downloadArchive(cli *http.Client, tmpDir, archiveURL string, dockerfile []b
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// add top level Dockerfile
|
// add top level Dockerfile
|
||||||
err = ioutil.WriteFile(path.Join(tmpDir, "Dockerfile"), dockerfile, os.ModePerm)
|
err = os.WriteFile(path.Join(tmpDir, "Dockerfile"), dockerfile, os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to inject /Dockerfile: %w", err)
|
return nil, fmt.Errorf("failed to inject /Dockerfile: %w", err)
|
||||||
}
|
}
|
||||||
|
|
@ -147,7 +150,7 @@ func buildDendrite(httpClient *http.Client, dockerClient *client.Client, tmpDir,
|
||||||
if branchOrTagName == HEAD && *flagHead != "" {
|
if branchOrTagName == HEAD && *flagHead != "" {
|
||||||
log.Printf("%s: Using %s as HEAD", branchOrTagName, *flagHead)
|
log.Printf("%s: Using %s as HEAD", branchOrTagName, *flagHead)
|
||||||
// add top level Dockerfile
|
// add top level Dockerfile
|
||||||
err = ioutil.WriteFile(path.Join(*flagHead, "Dockerfile"), []byte(Dockerfile), os.ModePerm)
|
err = os.WriteFile(path.Join(*flagHead, "Dockerfile"), []byte(Dockerfile), os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("custom HEAD: failed to inject /Dockerfile: %w", err)
|
return "", fmt.Errorf("custom HEAD: failed to inject /Dockerfile: %w", err)
|
||||||
}
|
}
|
||||||
|
|
@ -229,7 +232,7 @@ func getAndSortVersionsFromGithub(httpClient *http.Client) (semVers []*semver.Ve
|
||||||
return semVers, nil
|
return semVers, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func calculateVersions(cli *http.Client, from, to string) []string {
|
func calculateVersions(cli *http.Client, from, to string, direct bool) []string {
|
||||||
semvers, err := getAndSortVersionsFromGithub(cli)
|
semvers, err := getAndSortVersionsFromGithub(cli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("failed to collect semvers from github: %s", err)
|
log.Fatalf("failed to collect semvers from github: %s", err)
|
||||||
|
|
@ -284,6 +287,9 @@ func calculateVersions(cli *http.Client, from, to string) []string {
|
||||||
if to == HEAD {
|
if to == HEAD {
|
||||||
versions = append(versions, HEAD)
|
versions = append(versions, HEAD)
|
||||||
}
|
}
|
||||||
|
if direct {
|
||||||
|
versions = []string{versions[0], versions[len(versions)-1]}
|
||||||
|
}
|
||||||
return versions
|
return versions
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -382,7 +388,7 @@ func runImage(dockerClient *client.Client, volumeName, version, imageID string)
|
||||||
})
|
})
|
||||||
// ignore errors when cannot get logs, it's just for debugging anyways
|
// ignore errors when cannot get logs, it's just for debugging anyways
|
||||||
if err == nil {
|
if err == nil {
|
||||||
logbody, err := ioutil.ReadAll(logs)
|
logbody, err := io.ReadAll(logs)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
log.Printf("Container logs:\n\n%s\n\n", string(logbody))
|
log.Printf("Container logs:\n\n%s\n\n", string(logbody))
|
||||||
}
|
}
|
||||||
|
|
@ -461,7 +467,7 @@ func main() {
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
cleanup(dockerClient)
|
cleanup(dockerClient)
|
||||||
versions := calculateVersions(httpClient, *flagFrom, *flagTo)
|
versions := calculateVersions(httpClient, *flagFrom, *flagTo, *flagDirect)
|
||||||
log.Printf("Testing dendrite versions: %v\n", versions)
|
log.Printf("Testing dendrite versions: %v\n", versions)
|
||||||
|
|
||||||
branchToImageID := buildDendriteImages(httpClient, dockerClient, *flagTempDir, *flagBuildConcurrency, versions)
|
branchToImageID := buildDendriteImages(httpClient, dockerClient, *flagTempDir, *flagBuildConcurrency, versions)
|
||||||
|
|
|
||||||
|
|
@ -18,9 +18,9 @@ type user struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// runTests performs the following operations:
|
// runTests performs the following operations:
|
||||||
// - register alice and bob with branch name muxed into the localpart
|
// - register alice and bob with branch name muxed into the localpart
|
||||||
// - create a DM room for the 2 users and exchange messages
|
// - create a DM room for the 2 users and exchange messages
|
||||||
// - create/join a public #global room and exchange messages
|
// - create/join a public #global room and exchange messages
|
||||||
func runTests(baseURL, branchName string) error {
|
func runTests(baseURL, branchName string) error {
|
||||||
// register 2 users
|
// register 2 users
|
||||||
users := []user{
|
users := []user{
|
||||||
|
|
|
||||||
|
|
@ -9,7 +9,6 @@ import (
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
|
@ -30,7 +29,7 @@ func main() {
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err := ioutil.ReadFile(*requestKey)
|
data, err := os.ReadFile(*requestKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,109 +0,0 @@
|
||||||
## Database migrations
|
|
||||||
|
|
||||||
We use [goose](https://github.com/pressly/goose) to handle database migrations. This allows us to execute
|
|
||||||
both SQL deltas (e.g `ALTER TABLE ...`) as well as manipulate data in the database in Go using Go functions.
|
|
||||||
|
|
||||||
To run a migration, the `goose` binary in this directory needs to be built:
|
|
||||||
```
|
|
||||||
$ go build ./cmd/goose
|
|
||||||
```
|
|
||||||
|
|
||||||
This binary allows Dendrite databases to be upgraded and downgraded. Sample usage for upgrading the roomserver database:
|
|
||||||
|
|
||||||
```
|
|
||||||
# for sqlite
|
|
||||||
$ ./goose -dir roomserver/storage/sqlite3/deltas sqlite3 ./roomserver.db up
|
|
||||||
|
|
||||||
# for postgres
|
|
||||||
$ ./goose -dir roomserver/storage/postgres/deltas postgres "user=dendrite dbname=dendrite sslmode=disable" up
|
|
||||||
```
|
|
||||||
|
|
||||||
For a full list of options, including rollbacks, see https://github.com/pressly/goose or use `goose` with no args.
|
|
||||||
|
|
||||||
|
|
||||||
### Rationale
|
|
||||||
|
|
||||||
Dendrite creates tables on startup using `CREATE TABLE IF NOT EXISTS`, so you might think that we should also
|
|
||||||
apply version upgrades on startup as well. This is convenient and doesn't involve an additional binary to run
|
|
||||||
which complicates upgrades. However, combining the upgrade mechanism and the server binary makes it difficult
|
|
||||||
to handle rollbacks. Firstly, how do you specify you wish to rollback? We would have to add additional flags
|
|
||||||
to the main server binary to say "rollback to version X". Secondly, if you roll back the server binary from
|
|
||||||
version 5 to version 4, the version 4 binary doesn't know how to rollback the database from version 5 to
|
|
||||||
version 4! For these reasons, we prefer to have a separate "upgrade" binary which is run for database upgrades.
|
|
||||||
Rather than roll-our-own migration tool, we decided to use [goose](https://github.com/pressly/goose) as it supports
|
|
||||||
complex migrations in Go code in addition to just executing SQL deltas. Other alternatives like
|
|
||||||
`github.com/golang-migrate/migrate` [do not support](https://github.com/golang-migrate/migrate/issues/15) these
|
|
||||||
kinds of complex migrations.
|
|
||||||
|
|
||||||
### Adding new deltas
|
|
||||||
|
|
||||||
You can add `.sql` or `.go` files manually or you can use goose to create them for you.
|
|
||||||
|
|
||||||
If you only want to add a SQL delta then run:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ ./goose -dir serverkeyapi/storage/sqlite3/deltas sqlite3 ./foo.db create new_col sql
|
|
||||||
2020/09/09 14:37:43 Created new file: serverkeyapi/storage/sqlite3/deltas/20200909143743_new_col.sql
|
|
||||||
```
|
|
||||||
|
|
||||||
In this case, the version number is `20200909143743`. The important thing is that it is always increasing.
|
|
||||||
|
|
||||||
Then add up/downgrade SQL commands to the created file which looks like:
|
|
||||||
```sql
|
|
||||||
-- +goose Up
|
|
||||||
-- +goose StatementBegin
|
|
||||||
SELECT 'up SQL query';
|
|
||||||
-- +goose StatementEnd
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
-- +goose StatementBegin
|
|
||||||
SELECT 'down SQL query';
|
|
||||||
-- +goose StatementEnd
|
|
||||||
|
|
||||||
```
|
|
||||||
You __must__ keep the `+goose` annotations. You'll need to repeat this process for Postgres.
|
|
||||||
|
|
||||||
For complex Go migrations:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ ./goose -dir serverkeyapi/storage/sqlite3/deltas sqlite3 ./foo.db create complex_update go
|
|
||||||
2020/09/09 14:40:38 Created new file: serverkeyapi/storage/sqlite3/deltas/20200909144038_complex_update.go
|
|
||||||
```
|
|
||||||
|
|
||||||
Then modify the created `.go` file which looks like:
|
|
||||||
|
|
||||||
```go
|
|
||||||
package migrations
|
|
||||||
|
|
||||||
import (
|
|
||||||
"database/sql"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/pressly/goose"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
goose.AddMigration(upComplexUpdate, downComplexUpdate)
|
|
||||||
}
|
|
||||||
|
|
||||||
func upComplexUpdate(tx *sql.Tx) error {
|
|
||||||
// This code is executed when the migration is applied.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func downComplexUpdate(tx *sql.Tx) error {
|
|
||||||
// This code is executed when the migration is rolled back.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
You __must__ import the package in `/cmd/goose/main.go` so `func init()` gets called.
|
|
||||||
|
|
||||||
|
|
||||||
#### Database limitations
|
|
||||||
|
|
||||||
- SQLite3 does NOT support `ALTER TABLE table_name DROP COLUMN` - you would have to rename the column or drop the table
|
|
||||||
entirely and recreate it. ([example](https://github.com/matrix-org/dendrite/blob/master/userapi/storage/accounts/sqlite3/deltas/20200929203058_is_active.sql))
|
|
||||||
|
|
||||||
More information: [sqlite.org](https://www.sqlite.org/lang_altertable.html)
|
|
||||||
|
|
@ -1,154 +0,0 @@
|
||||||
// This is custom goose binary
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/pressly/goose"
|
|
||||||
|
|
||||||
pgusers "github.com/matrix-org/dendrite/userapi/storage/postgres/deltas"
|
|
||||||
slusers "github.com/matrix-org/dendrite/userapi/storage/sqlite3/deltas"
|
|
||||||
|
|
||||||
_ "github.com/lib/pq"
|
|
||||||
_ "github.com/mattn/go-sqlite3"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
AppService = "appservice"
|
|
||||||
FederationSender = "federationapi"
|
|
||||||
KeyServer = "keyserver"
|
|
||||||
MediaAPI = "mediaapi"
|
|
||||||
RoomServer = "roomserver"
|
|
||||||
SigningKeyServer = "signingkeyserver"
|
|
||||||
SyncAPI = "syncapi"
|
|
||||||
UserAPI = "userapi"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
dir = flags.String("dir", "", "directory with migration files")
|
|
||||||
flags = flag.NewFlagSet("goose", flag.ExitOnError)
|
|
||||||
component = flags.String("component", "", "dendrite component name")
|
|
||||||
knownDBs = []string{
|
|
||||||
AppService, FederationSender, KeyServer, MediaAPI, RoomServer, SigningKeyServer, SyncAPI, UserAPI,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// nolint: gocyclo
|
|
||||||
func main() {
|
|
||||||
err := flags.Parse(os.Args[1:])
|
|
||||||
if err != nil {
|
|
||||||
panic(err.Error())
|
|
||||||
}
|
|
||||||
args := flags.Args()
|
|
||||||
|
|
||||||
if len(args) < 3 {
|
|
||||||
fmt.Println(
|
|
||||||
`Usage: goose [OPTIONS] DRIVER DBSTRING COMMAND
|
|
||||||
|
|
||||||
Drivers:
|
|
||||||
postgres
|
|
||||||
sqlite3
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
goose -component roomserver sqlite3 ./roomserver.db status
|
|
||||||
goose -component roomserver sqlite3 ./roomserver.db up
|
|
||||||
|
|
||||||
goose -component roomserver postgres "user=dendrite dbname=dendrite sslmode=disable" status
|
|
||||||
|
|
||||||
Options:
|
|
||||||
-component string
|
|
||||||
Dendrite component name e.g roomserver, signingkeyserver, clientapi, syncapi
|
|
||||||
-table string
|
|
||||||
migrations table name (default "goose_db_version")
|
|
||||||
-h print help
|
|
||||||
-v enable verbose mode
|
|
||||||
-dir string
|
|
||||||
directory with migration files, only relevant when creating new migrations.
|
|
||||||
-version
|
|
||||||
print version
|
|
||||||
|
|
||||||
Commands:
|
|
||||||
up Migrate the DB to the most recent version available
|
|
||||||
up-by-one Migrate the DB up by 1
|
|
||||||
up-to VERSION Migrate the DB to a specific VERSION
|
|
||||||
down Roll back the version by 1
|
|
||||||
down-to VERSION Roll back to a specific VERSION
|
|
||||||
redo Re-run the latest migration
|
|
||||||
reset Roll back all migrations
|
|
||||||
status Dump the migration status for the current DB
|
|
||||||
version Print the current version of the database
|
|
||||||
create NAME [sql|go] Creates new migration file with the current timestamp
|
|
||||||
fix Apply sequential ordering to migrations`,
|
|
||||||
)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
engine := args[0]
|
|
||||||
if engine != "sqlite3" && engine != "postgres" {
|
|
||||||
fmt.Println("engine must be one of 'sqlite3' or 'postgres'")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
knownComponent := false
|
|
||||||
for _, c := range knownDBs {
|
|
||||||
if c == *component {
|
|
||||||
knownComponent = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !knownComponent {
|
|
||||||
fmt.Printf("component must be one of %v\n", knownDBs)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if engine == "sqlite3" {
|
|
||||||
loadSQLiteDeltas(*component)
|
|
||||||
} else {
|
|
||||||
loadPostgresDeltas(*component)
|
|
||||||
}
|
|
||||||
|
|
||||||
dbstring, command := args[1], args[2]
|
|
||||||
|
|
||||||
db, err := goose.OpenDBWithDriver(engine, dbstring)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("goose: failed to open DB: %v\n", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if err := db.Close(); err != nil {
|
|
||||||
log.Fatalf("goose: failed to close DB: %v\n", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
arguments := []string{}
|
|
||||||
if len(args) > 3 {
|
|
||||||
arguments = append(arguments, args[3:]...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// goose demands a directory even though we don't use it for upgrades
|
|
||||||
d := *dir
|
|
||||||
if d == "" {
|
|
||||||
d = os.TempDir()
|
|
||||||
}
|
|
||||||
if err := goose.Run(command, db, d, arguments...); err != nil {
|
|
||||||
log.Fatalf("goose %v: %v", command, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadSQLiteDeltas(component string) {
|
|
||||||
switch component {
|
|
||||||
case UserAPI:
|
|
||||||
slusers.LoadFromGoose()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadPostgresDeltas(component string) {
|
|
||||||
switch component {
|
|
||||||
case UserAPI:
|
|
||||||
pgusers.LoadFromGoose()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -64,6 +64,10 @@ global:
|
||||||
# e.g. localhost:443
|
# e.g. localhost:443
|
||||||
well_known_server_name: ""
|
well_known_server_name: ""
|
||||||
|
|
||||||
|
# The server name to delegate client-server communications to, with optional port
|
||||||
|
# e.g. localhost:443
|
||||||
|
well_known_client_name: ""
|
||||||
|
|
||||||
# Lists of domains that the server will trust as identity servers to verify third
|
# Lists of domains that the server will trust as identity servers to verify third
|
||||||
# party identifiers such as phone numbers and email addresses.
|
# party identifiers such as phone numbers and email addresses.
|
||||||
trusted_third_party_id_servers:
|
trusted_third_party_id_servers:
|
||||||
|
|
@ -109,6 +113,11 @@ global:
|
||||||
addresses:
|
addresses:
|
||||||
# - localhost:4222
|
# - localhost:4222
|
||||||
|
|
||||||
|
# Disable the validation of TLS certificates of NATS. This is
|
||||||
|
# not recommended in production since it may allow NATS traffic
|
||||||
|
# to be sent to an insecure endpoint.
|
||||||
|
disable_tls_validation: false
|
||||||
|
|
||||||
# Persistent directory to store JetStream streams in. This directory should be
|
# Persistent directory to store JetStream streams in. This directory should be
|
||||||
# preserved across Dendrite restarts.
|
# preserved across Dendrite restarts.
|
||||||
storage_path: ./
|
storage_path: ./
|
||||||
|
|
@ -179,13 +188,16 @@ client_api:
|
||||||
|
|
||||||
# TURN server information that this homeserver should send to clients.
|
# TURN server information that this homeserver should send to clients.
|
||||||
turn:
|
turn:
|
||||||
turn_user_lifetime: ""
|
turn_user_lifetime: "5m"
|
||||||
turn_uris:
|
turn_uris:
|
||||||
# - turn:turn.server.org?transport=udp
|
# - turn:turn.server.org?transport=udp
|
||||||
# - turn:turn.server.org?transport=tcp
|
# - turn:turn.server.org?transport=tcp
|
||||||
turn_shared_secret: ""
|
turn_shared_secret: ""
|
||||||
turn_username: ""
|
# If your TURN server requires static credentials, then you will need to enter
|
||||||
turn_password: ""
|
# them here instead of supplying a shared secret. Note that these credentials
|
||||||
|
# will be visible to clients!
|
||||||
|
# turn_username: ""
|
||||||
|
# turn_password: ""
|
||||||
|
|
||||||
# Settings for rate-limited endpoints. Rate limiting kicks in after the threshold
|
# Settings for rate-limited endpoints. Rate limiting kicks in after the threshold
|
||||||
# number of "slots" have been taken by requests from a specific host. Each "slot"
|
# number of "slots" have been taken by requests from a specific host. Each "slot"
|
||||||
|
|
@ -193,7 +205,7 @@ client_api:
|
||||||
# and appservice users are exempt from rate limiting by default.
|
# and appservice users are exempt from rate limiting by default.
|
||||||
rate_limiting:
|
rate_limiting:
|
||||||
enabled: true
|
enabled: true
|
||||||
threshold: 5
|
threshold: 20
|
||||||
cooloff_ms: 500
|
cooloff_ms: 500
|
||||||
exempt_user_ids:
|
exempt_user_ids:
|
||||||
# - "@user:domain.com"
|
# - "@user:domain.com"
|
||||||
|
|
|
||||||
|
|
@ -54,6 +54,10 @@ global:
|
||||||
# e.g. localhost:443
|
# e.g. localhost:443
|
||||||
well_known_server_name: ""
|
well_known_server_name: ""
|
||||||
|
|
||||||
|
# The server name to delegate client-server communications to, with optional port
|
||||||
|
# e.g. localhost:443
|
||||||
|
well_known_client_name: ""
|
||||||
|
|
||||||
# Lists of domains that the server will trust as identity servers to verify third
|
# Lists of domains that the server will trust as identity servers to verify third
|
||||||
# party identifiers such as phone numbers and email addresses.
|
# party identifiers such as phone numbers and email addresses.
|
||||||
trusted_third_party_id_servers:
|
trusted_third_party_id_servers:
|
||||||
|
|
@ -99,6 +103,11 @@ global:
|
||||||
addresses:
|
addresses:
|
||||||
- hostname:4222
|
- hostname:4222
|
||||||
|
|
||||||
|
# Disable the validation of TLS certificates of NATS. This is
|
||||||
|
# not recommended in production since it may allow NATS traffic
|
||||||
|
# to be sent to an insecure endpoint.
|
||||||
|
disable_tls_validation: false
|
||||||
|
|
||||||
# The prefix to use for stream names for this homeserver - really only useful
|
# The prefix to use for stream names for this homeserver - really only useful
|
||||||
# if you are running more than one Dendrite server on the same NATS deployment.
|
# if you are running more than one Dendrite server on the same NATS deployment.
|
||||||
topic_prefix: Dendrite
|
topic_prefix: Dendrite
|
||||||
|
|
@ -125,7 +134,7 @@ app_service_api:
|
||||||
|
|
||||||
# Database configuration for this component.
|
# Database configuration for this component.
|
||||||
database:
|
database:
|
||||||
connection_string: postgresql://username@password:hostname/dendrite_appservice?sslmode=disable
|
connection_string: postgresql://username:password@hostname/dendrite_appservice?sslmode=disable
|
||||||
max_open_conns: 10
|
max_open_conns: 10
|
||||||
max_idle_conns: 2
|
max_idle_conns: 2
|
||||||
conn_max_lifetime: -1
|
conn_max_lifetime: -1
|
||||||
|
|
@ -182,13 +191,16 @@ client_api:
|
||||||
|
|
||||||
# TURN server information that this homeserver should send to clients.
|
# TURN server information that this homeserver should send to clients.
|
||||||
turn:
|
turn:
|
||||||
turn_user_lifetime: ""
|
turn_user_lifetime: "5m"
|
||||||
turn_uris:
|
turn_uris:
|
||||||
# - turn:turn.server.org?transport=udp
|
# - turn:turn.server.org?transport=udp
|
||||||
# - turn:turn.server.org?transport=tcp
|
# - turn:turn.server.org?transport=tcp
|
||||||
turn_shared_secret: ""
|
turn_shared_secret: ""
|
||||||
turn_username: ""
|
# If your TURN server requires static credentials, then you will need to enter
|
||||||
turn_password: ""
|
# them here instead of supplying a shared secret. Note that these credentials
|
||||||
|
# will be visible to clients!
|
||||||
|
# turn_username: ""
|
||||||
|
# turn_password: ""
|
||||||
|
|
||||||
# Settings for rate-limited endpoints. Rate limiting kicks in after the threshold
|
# Settings for rate-limited endpoints. Rate limiting kicks in after the threshold
|
||||||
# number of "slots" have been taken by requests from a specific host. Each "slot"
|
# number of "slots" have been taken by requests from a specific host. Each "slot"
|
||||||
|
|
@ -196,7 +208,7 @@ client_api:
|
||||||
# and appservice users are exempt from rate limiting by default.
|
# and appservice users are exempt from rate limiting by default.
|
||||||
rate_limiting:
|
rate_limiting:
|
||||||
enabled: true
|
enabled: true
|
||||||
threshold: 5
|
threshold: 20
|
||||||
cooloff_ms: 500
|
cooloff_ms: 500
|
||||||
exempt_user_ids:
|
exempt_user_ids:
|
||||||
# - "@user:domain.com"
|
# - "@user:domain.com"
|
||||||
|
|
@ -209,7 +221,7 @@ federation_api:
|
||||||
external_api:
|
external_api:
|
||||||
listen: http://[::]:8072
|
listen: http://[::]:8072
|
||||||
database:
|
database:
|
||||||
connection_string: postgresql://username@password:hostname/dendrite_federationapi?sslmode=disable
|
connection_string: postgresql://username:password@hostname/dendrite_federationapi?sslmode=disable
|
||||||
max_open_conns: 10
|
max_open_conns: 10
|
||||||
max_idle_conns: 2
|
max_idle_conns: 2
|
||||||
conn_max_lifetime: -1
|
conn_max_lifetime: -1
|
||||||
|
|
@ -246,7 +258,7 @@ key_server:
|
||||||
listen: http://[::]:7779 # The listen address for incoming API requests
|
listen: http://[::]:7779 # The listen address for incoming API requests
|
||||||
connect: http://key_server:7779 # The connect address for other components to use
|
connect: http://key_server:7779 # The connect address for other components to use
|
||||||
database:
|
database:
|
||||||
connection_string: postgresql://username@password:hostname/dendrite_keyserver?sslmode=disable
|
connection_string: postgresql://username:password@hostname/dendrite_keyserver?sslmode=disable
|
||||||
max_open_conns: 10
|
max_open_conns: 10
|
||||||
max_idle_conns: 2
|
max_idle_conns: 2
|
||||||
conn_max_lifetime: -1
|
conn_max_lifetime: -1
|
||||||
|
|
@ -259,7 +271,7 @@ media_api:
|
||||||
external_api:
|
external_api:
|
||||||
listen: http://[::]:8074
|
listen: http://[::]:8074
|
||||||
database:
|
database:
|
||||||
connection_string: postgresql://username@password:hostname/dendrite_mediaapi?sslmode=disable
|
connection_string: postgresql://username:password@hostname/dendrite_mediaapi?sslmode=disable
|
||||||
max_open_conns: 5
|
max_open_conns: 5
|
||||||
max_idle_conns: 2
|
max_idle_conns: 2
|
||||||
conn_max_lifetime: -1
|
conn_max_lifetime: -1
|
||||||
|
|
@ -296,7 +308,7 @@ mscs:
|
||||||
# - msc2836 # (Threading, see https://github.com/matrix-org/matrix-doc/pull/2836)
|
# - msc2836 # (Threading, see https://github.com/matrix-org/matrix-doc/pull/2836)
|
||||||
# - msc2946 # (Spaces Summary, see https://github.com/matrix-org/matrix-doc/pull/2946)
|
# - msc2946 # (Spaces Summary, see https://github.com/matrix-org/matrix-doc/pull/2946)
|
||||||
database:
|
database:
|
||||||
connection_string: postgresql://username@password:hostname/dendrite_mscs?sslmode=disable
|
connection_string: postgresql://username:password@hostname/dendrite_mscs?sslmode=disable
|
||||||
max_open_conns: 5
|
max_open_conns: 5
|
||||||
max_idle_conns: 2
|
max_idle_conns: 2
|
||||||
conn_max_lifetime: -1
|
conn_max_lifetime: -1
|
||||||
|
|
@ -307,7 +319,7 @@ room_server:
|
||||||
listen: http://[::]:7770 # The listen address for incoming API requests
|
listen: http://[::]:7770 # The listen address for incoming API requests
|
||||||
connect: http://room_server:7770 # The connect address for other components to use
|
connect: http://room_server:7770 # The connect address for other components to use
|
||||||
database:
|
database:
|
||||||
connection_string: postgresql://username@password:hostname/dendrite_roomserver?sslmode=disable
|
connection_string: postgresql://username:password@hostname/dendrite_roomserver?sslmode=disable
|
||||||
max_open_conns: 10
|
max_open_conns: 10
|
||||||
max_idle_conns: 2
|
max_idle_conns: 2
|
||||||
conn_max_lifetime: -1
|
conn_max_lifetime: -1
|
||||||
|
|
@ -320,7 +332,7 @@ sync_api:
|
||||||
external_api:
|
external_api:
|
||||||
listen: http://[::]:8073
|
listen: http://[::]:8073
|
||||||
database:
|
database:
|
||||||
connection_string: postgresql://username@password:hostname/dendrite_syncapi?sslmode=disable
|
connection_string: postgresql://username:password@hostname/dendrite_syncapi?sslmode=disable
|
||||||
max_open_conns: 10
|
max_open_conns: 10
|
||||||
max_idle_conns: 2
|
max_idle_conns: 2
|
||||||
conn_max_lifetime: -1
|
conn_max_lifetime: -1
|
||||||
|
|
@ -336,7 +348,7 @@ user_api:
|
||||||
listen: http://[::]:7781 # The listen address for incoming API requests
|
listen: http://[::]:7781 # The listen address for incoming API requests
|
||||||
connect: http://user_api:7781 # The connect address for other components to use
|
connect: http://user_api:7781 # The connect address for other components to use
|
||||||
account_database:
|
account_database:
|
||||||
connection_string: postgresql://username@password:hostname/dendrite_userapi?sslmode=disable
|
connection_string: postgresql://username:password@hostname/dendrite_userapi?sslmode=disable
|
||||||
max_open_conns: 10
|
max_open_conns: 10
|
||||||
max_idle_conns: 2
|
max_idle_conns: 2
|
||||||
conn_max_lifetime: -1
|
conn_max_lifetime: -1
|
||||||
|
|
|
||||||
|
|
@ -24,7 +24,7 @@ Unfortunately we can't accept contributions without it.
|
||||||
|
|
||||||
## Getting up and running
|
## Getting up and running
|
||||||
|
|
||||||
See the [Installation](INSTALL.md) section for information on how to build an
|
See the [Installation](installation) section for information on how to build an
|
||||||
instance of Dendrite. You will likely need this in order to test your changes.
|
instance of Dendrite. You will likely need this in order to test your changes.
|
||||||
|
|
||||||
## Code style
|
## Code style
|
||||||
|
|
@ -64,7 +64,7 @@ comment. Please avoid doing this if you can.
|
||||||
We also have unit tests which we run via:
|
We also have unit tests which we run via:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
go test ./...
|
go test --race ./...
|
||||||
```
|
```
|
||||||
|
|
||||||
In general, we like submissions that come with tests. Anything that proves that the
|
In general, we like submissions that come with tests. Anything that proves that the
|
||||||
|
|
|
||||||
|
|
@ -86,9 +86,12 @@ would be a huge help too, as that will help us to understand where the memory us
|
||||||
|
|
||||||
You may need to revisit the connection limit of your PostgreSQL server and/or make changes to the `max_connections` lines in your Dendrite configuration. Be aware that each Dendrite component opens its own database connections and has its own connection limit, even in monolith mode!
|
You may need to revisit the connection limit of your PostgreSQL server and/or make changes to the `max_connections` lines in your Dendrite configuration. Be aware that each Dendrite component opens its own database connections and has its own connection limit, even in monolith mode!
|
||||||
|
|
||||||
## What is being reported when enabling anonymous stats?
|
## What is being reported when enabling phone-home statistics?
|
||||||
|
|
||||||
If anonymous stats reporting is enabled, the following data is send to the defined endpoint.
|
Phone-home statistics contain your server's domain name, some configuration information about
|
||||||
|
your deployment and aggregated information about active users on your deployment. They are sent
|
||||||
|
to the endpoint URL configured in your Dendrite configuration file only. The following is an
|
||||||
|
example of the data that is sent:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
|
|
@ -106,7 +109,7 @@ If anonymous stats reporting is enabled, the following data is send to the defin
|
||||||
"go_arch": "amd64",
|
"go_arch": "amd64",
|
||||||
"go_os": "linux",
|
"go_os": "linux",
|
||||||
"go_version": "go1.16.13",
|
"go_version": "go1.16.13",
|
||||||
"homeserver": "localhost:8800",
|
"homeserver": "my.domain.com",
|
||||||
"log_level": "trace",
|
"log_level": "trace",
|
||||||
"memory_rss": 93452,
|
"memory_rss": 93452,
|
||||||
"monolith": true,
|
"monolith": true,
|
||||||
|
|
|
||||||
|
|
@ -233,6 +233,8 @@ GEM
|
||||||
multipart-post (2.1.1)
|
multipart-post (2.1.1)
|
||||||
nokogiri (1.13.6-arm64-darwin)
|
nokogiri (1.13.6-arm64-darwin)
|
||||||
racc (~> 1.4)
|
racc (~> 1.4)
|
||||||
|
nokogiri (1.13.6-x86_64-linux)
|
||||||
|
racc (~> 1.4)
|
||||||
octokit (4.22.0)
|
octokit (4.22.0)
|
||||||
faraday (>= 0.9)
|
faraday (>= 0.9)
|
||||||
sawyer (~> 0.8.0, >= 0.5.3)
|
sawyer (~> 0.8.0, >= 0.5.3)
|
||||||
|
|
@ -263,7 +265,7 @@ GEM
|
||||||
thread_safe (0.3.6)
|
thread_safe (0.3.6)
|
||||||
typhoeus (1.4.0)
|
typhoeus (1.4.0)
|
||||||
ethon (>= 0.9.0)
|
ethon (>= 0.9.0)
|
||||||
tzinfo (1.2.9)
|
tzinfo (1.2.10)
|
||||||
thread_safe (~> 0.1)
|
thread_safe (~> 0.1)
|
||||||
unf (0.1.4)
|
unf (0.1.4)
|
||||||
unf_ext
|
unf_ext
|
||||||
|
|
@ -273,11 +275,11 @@ GEM
|
||||||
|
|
||||||
PLATFORMS
|
PLATFORMS
|
||||||
arm64-darwin-21
|
arm64-darwin-21
|
||||||
|
x86_64-linux
|
||||||
|
|
||||||
DEPENDENCIES
|
DEPENDENCIES
|
||||||
github-pages (~> 226)
|
github-pages (~> 226)
|
||||||
jekyll-feed (~> 0.15.1)
|
jekyll-feed (~> 0.15.1)
|
||||||
minima (~> 2.5.1)
|
|
||||||
|
|
||||||
BUNDLED WITH
|
BUNDLED WITH
|
||||||
2.3.7
|
2.3.7
|
||||||
|
|
|
||||||
|
|
@ -32,6 +32,15 @@ To create a new **admin account**, add the `-admin` flag:
|
||||||
./bin/create-account -config /path/to/dendrite.yaml -username USERNAME -admin
|
./bin/create-account -config /path/to/dendrite.yaml -username USERNAME -admin
|
||||||
```
|
```
|
||||||
|
|
||||||
|
An example of using `create-account` when running in **Docker**, having found the `CONTAINERNAME` from `docker ps`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker exec -it CONTAINERNAME /usr/bin/create-account -config /path/to/dendrite.yaml -username USERNAME
|
||||||
|
```
|
||||||
|
```bash
|
||||||
|
docker exec -it CONTAINERNAME /usr/bin/create-account -config /path/to/dendrite.yaml -username USERNAME -admin
|
||||||
|
```
|
||||||
|
|
||||||
## Using shared secret registration
|
## Using shared secret registration
|
||||||
|
|
||||||
Dendrite supports the Synapse-compatible shared secret registration endpoint.
|
Dendrite supports the Synapse-compatible shared secret registration endpoint.
|
||||||
|
|
|
||||||
|
|
@ -1,68 +0,0 @@
|
||||||
{
|
|
||||||
# debug
|
|
||||||
admin off
|
|
||||||
email example@example.com
|
|
||||||
default_sni example.com
|
|
||||||
# Debug endpoint
|
|
||||||
# acme_ca https://acme-staging-v02.api.letsencrypt.org/directory
|
|
||||||
}
|
|
||||||
|
|
||||||
#######################################################################
|
|
||||||
# Snippets
|
|
||||||
#______________________________________________________________________
|
|
||||||
|
|
||||||
(handle_errors_maintenance) {
|
|
||||||
handle_errors {
|
|
||||||
@maintenance expression {http.error.status_code} == 502
|
|
||||||
rewrite @maintenance maintenance.html
|
|
||||||
root * "/path/to/service/pages"
|
|
||||||
file_server
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
(matrix-well-known-header) {
|
|
||||||
# Headers
|
|
||||||
header Access-Control-Allow-Origin "*"
|
|
||||||
header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS"
|
|
||||||
header Access-Control-Allow-Headers "Origin, X-Requested-With, Content-Type, Accept, Authorization"
|
|
||||||
header Content-Type "application/json"
|
|
||||||
}
|
|
||||||
|
|
||||||
#######################################################################
|
|
||||||
|
|
||||||
example.com {
|
|
||||||
|
|
||||||
# ...
|
|
||||||
|
|
||||||
handle /.well-known/matrix/server {
|
|
||||||
import matrix-well-known-header
|
|
||||||
respond `{ "m.server": "matrix.example.com:443" }` 200
|
|
||||||
}
|
|
||||||
|
|
||||||
handle /.well-known/matrix/client {
|
|
||||||
import matrix-well-known-header
|
|
||||||
respond `{ "m.homeserver": { "base_url": "https://matrix.example.com" } }` 200
|
|
||||||
}
|
|
||||||
|
|
||||||
import handle_errors_maintenance
|
|
||||||
}
|
|
||||||
|
|
||||||
example.com:8448 {
|
|
||||||
# server<->server HTTPS traffic
|
|
||||||
reverse_proxy http://dendrite-host:8008
|
|
||||||
}
|
|
||||||
|
|
||||||
matrix.example.com {
|
|
||||||
|
|
||||||
handle /_matrix/* {
|
|
||||||
# client<->server HTTPS traffic
|
|
||||||
reverse_proxy http://dendrite-host:8008
|
|
||||||
}
|
|
||||||
|
|
||||||
handle_path /* {
|
|
||||||
# Client webapp (Element SPA or ...)
|
|
||||||
file_server {
|
|
||||||
root /path/to/www/example.com/matrix-web-client/
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
57
docs/caddy/monolith/Caddyfile
Normal file
57
docs/caddy/monolith/Caddyfile
Normal file
|
|
@ -0,0 +1,57 @@
|
||||||
|
# Sample Caddyfile for using Caddy in front of Dendrite.
|
||||||
|
#
|
||||||
|
# Customize email address and domain names.
|
||||||
|
# Optional settings commented out.
|
||||||
|
#
|
||||||
|
# BE SURE YOUR DOMAINS ARE POINTED AT YOUR SERVER FIRST.
|
||||||
|
# Documentation: https://caddyserver.com/docs/
|
||||||
|
#
|
||||||
|
# Bonus tip: If your IP address changes, use Caddy's
|
||||||
|
# dynamic DNS plugin to update your DNS records to
|
||||||
|
# point to your new IP automatically:
|
||||||
|
# https://github.com/mholt/caddy-dynamicdns
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
# Global options block
|
||||||
|
{
|
||||||
|
# In case there is a problem with your certificates.
|
||||||
|
# email example@example.com
|
||||||
|
|
||||||
|
# Turn off the admin endpoint if you don't need graceful config
|
||||||
|
# changes and/or are running untrusted code on your machine.
|
||||||
|
# admin off
|
||||||
|
|
||||||
|
# Enable this if your clients don't send ServerName in TLS handshakes.
|
||||||
|
# default_sni example.com
|
||||||
|
|
||||||
|
# Enable debug mode for verbose logging.
|
||||||
|
# debug
|
||||||
|
|
||||||
|
# Use Let's Encrypt's staging endpoint for testing.
|
||||||
|
# acme_ca https://acme-staging-v02.api.letsencrypt.org/directory
|
||||||
|
|
||||||
|
# If you're port-forwarding HTTP/HTTPS ports from 80/443 to something
|
||||||
|
# else, enable these and put the alternate port numbers here.
|
||||||
|
# http_port 8080
|
||||||
|
# https_port 8443
|
||||||
|
}
|
||||||
|
|
||||||
|
# The server name of your matrix homeserver. This example shows
|
||||||
|
# "well-known delegation" from the registered domain to a subdomain,
|
||||||
|
# which is only needed if your server_name doesn't match your Matrix
|
||||||
|
# homeserver URL (i.e. you can show users a vanity domain that looks
|
||||||
|
# nice and is easy to remember but still have your Matrix server on
|
||||||
|
# its own subdomain or hosted service).
|
||||||
|
example.com {
|
||||||
|
header /.well-known/matrix/* Content-Type application/json
|
||||||
|
header /.well-known/matrix/* Access-Control-Allow-Origin *
|
||||||
|
respond /.well-known/matrix/server `{"m.server": "matrix.example.com:443"}`
|
||||||
|
respond /.well-known/matrix/client `{"m.homeserver": {"base_url": "https://matrix.example.com"}}`
|
||||||
|
}
|
||||||
|
|
||||||
|
# The actual domain name whereby your Matrix server is accessed.
|
||||||
|
matrix.example.com {
|
||||||
|
# Set localhost:8008 to the address of your Dendrite server, if different
|
||||||
|
reverse_proxy /_matrix/* localhost:8008
|
||||||
|
}
|
||||||
66
docs/caddy/polylith/Caddyfile
Normal file
66
docs/caddy/polylith/Caddyfile
Normal file
|
|
@ -0,0 +1,66 @@
|
||||||
|
# Sample Caddyfile for using Caddy in front of Dendrite.
|
||||||
|
#
|
||||||
|
# Customize email address and domain names.
|
||||||
|
# Optional settings commented out.
|
||||||
|
#
|
||||||
|
# BE SURE YOUR DOMAINS ARE POINTED AT YOUR SERVER FIRST.
|
||||||
|
# Documentation: https://caddyserver.com/docs/
|
||||||
|
#
|
||||||
|
# Bonus tip: If your IP address changes, use Caddy's
|
||||||
|
# dynamic DNS plugin to update your DNS records to
|
||||||
|
# point to your new IP automatically:
|
||||||
|
# https://github.com/mholt/caddy-dynamicdns
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
# Global options block
|
||||||
|
{
|
||||||
|
# In case there is a problem with your certificates.
|
||||||
|
# email example@example.com
|
||||||
|
|
||||||
|
# Turn off the admin endpoint if you don't need graceful config
|
||||||
|
# changes and/or are running untrusted code on your machine.
|
||||||
|
# admin off
|
||||||
|
|
||||||
|
# Enable this if your clients don't send ServerName in TLS handshakes.
|
||||||
|
# default_sni example.com
|
||||||
|
|
||||||
|
# Enable debug mode for verbose logging.
|
||||||
|
# debug
|
||||||
|
|
||||||
|
# Use Let's Encrypt's staging endpoint for testing.
|
||||||
|
# acme_ca https://acme-staging-v02.api.letsencrypt.org/directory
|
||||||
|
|
||||||
|
# If you're port-forwarding HTTP/HTTPS ports from 80/443 to something
|
||||||
|
# else, enable these and put the alternate port numbers here.
|
||||||
|
# http_port 8080
|
||||||
|
# https_port 8443
|
||||||
|
}
|
||||||
|
|
||||||
|
# The server name of your matrix homeserver. This example shows
|
||||||
|
# "well-known delegation" from the registered domain to a subdomain,
|
||||||
|
# which is only needed if your server_name doesn't match your Matrix
|
||||||
|
# homeserver URL (i.e. you can show users a vanity domain that looks
|
||||||
|
# nice and is easy to remember but still have your Matrix server on
|
||||||
|
# its own subdomain or hosted service).
|
||||||
|
example.com {
|
||||||
|
header /.well-known/matrix/* Content-Type application/json
|
||||||
|
header /.well-known/matrix/* Access-Control-Allow-Origin *
|
||||||
|
respond /.well-known/matrix/server `{"m.server": "matrix.example.com:443"}`
|
||||||
|
respond /.well-known/matrix/client `{"m.homeserver": {"base_url": "https://matrix.example.com"}}`
|
||||||
|
}
|
||||||
|
|
||||||
|
# The actual domain name whereby your Matrix server is accessed.
|
||||||
|
matrix.example.com {
|
||||||
|
# Change the end of each reverse_proxy line to the correct
|
||||||
|
# address for your various services.
|
||||||
|
@sync_api {
|
||||||
|
path_regexp /_matrix/client/.*?/(sync|user/.*?/filter/?.*|keys/changes|rooms/.*?/messages)$
|
||||||
|
}
|
||||||
|
reverse_proxy @sync_api sync_api:8073
|
||||||
|
|
||||||
|
reverse_proxy /_matrix/client* client_api:8071
|
||||||
|
reverse_proxy /_matrix/federation* federation_api:8071
|
||||||
|
reverse_proxy /_matrix/key* federation_api:8071
|
||||||
|
reverse_proxy /_matrix/media* media_api:8071
|
||||||
|
}
|
||||||
|
|
@ -2,7 +2,7 @@
|
||||||
title: Starting the polylith
|
title: Starting the polylith
|
||||||
parent: Installation
|
parent: Installation
|
||||||
has_toc: true
|
has_toc: true
|
||||||
nav_order: 9
|
nav_order: 10
|
||||||
permalink: /installation/start/polylith
|
permalink: /installation/start/polylith
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
@ -2,7 +2,7 @@
|
||||||
title: Optimise your installation
|
title: Optimise your installation
|
||||||
parent: Installation
|
parent: Installation
|
||||||
has_toc: true
|
has_toc: true
|
||||||
nav_order: 10
|
nav_order: 11
|
||||||
permalink: /installation/start/optimisation
|
permalink: /installation/start/optimisation
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
@ -95,12 +95,13 @@ enabled.
|
||||||
To do so, follow the [NATS Server installation instructions](https://docs.nats.io/running-a-nats-service/introduction/installation) and then [start your NATS deployment](https://docs.nats.io/running-a-nats-service/introduction/running). JetStream must be enabled, either by passing the `-js` flag to `nats-server`,
|
To do so, follow the [NATS Server installation instructions](https://docs.nats.io/running-a-nats-service/introduction/installation) and then [start your NATS deployment](https://docs.nats.io/running-a-nats-service/introduction/running). JetStream must be enabled, either by passing the `-js` flag to `nats-server`,
|
||||||
or by specifying the `store_dir` option in the the `jetstream` configuration.
|
or by specifying the `store_dir` option in the the `jetstream` configuration.
|
||||||
|
|
||||||
### Reverse proxy (polylith deployments)
|
### Reverse proxy
|
||||||
|
|
||||||
Polylith deployments require a reverse proxy, such as [NGINX](https://www.nginx.com) or
|
A reverse proxy such as [Caddy](https://caddyserver.com), [NGINX](https://www.nginx.com) or
|
||||||
[HAProxy](http://www.haproxy.org). Configuring those is not covered in this documentation,
|
[HAProxy](http://www.haproxy.org) is required for polylith deployments and is useful for monolith
|
||||||
although a [sample configuration for NGINX](https://github.com/matrix-org/dendrite/blob/main/docs/nginx/polylith-sample.conf)
|
deployments. Configuring those is not covered in this documentation, although sample configurations
|
||||||
is provided.
|
for [Caddy](https://github.com/matrix-org/dendrite/blob/main/docs/caddy) and
|
||||||
|
[NGINX](https://github.com/matrix-org/dendrite/blob/main/docs/nginx) are provided.
|
||||||
|
|
||||||
### Windows
|
### Windows
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -14,27 +14,38 @@ that take the format `@user:example.com`.
|
||||||
For federation to work, the server name must be resolvable by other homeservers on the internet
|
For federation to work, the server name must be resolvable by other homeservers on the internet
|
||||||
— that is, the domain must be registered and properly configured with the relevant DNS records.
|
— that is, the domain must be registered and properly configured with the relevant DNS records.
|
||||||
|
|
||||||
Matrix servers discover each other when federating using the following methods:
|
Matrix servers usually discover each other when federating using the following methods:
|
||||||
|
|
||||||
1. If a well-known delegation exists on `example.com`, use the path server from the
|
1. If a well-known delegation exists on `example.com`, use the domain and port from the
|
||||||
well-known file to connect to the remote homeserver;
|
well-known file to connect to the remote homeserver;
|
||||||
2. If a DNS SRV delegation exists on `example.com`, use the hostname and port from the DNS SRV
|
2. If a DNS SRV delegation exists on `example.com`, use the IP address and port from the DNS SRV
|
||||||
record to connect to the remote homeserver;
|
record to connect to the remote homeserver;
|
||||||
3. If neither well-known or DNS SRV delegation are configured, attempt to connect to the remote
|
3. If neither well-known or DNS SRV delegation are configured, attempt to connect to the remote
|
||||||
homeserver by connecting to `example.com` port TCP/8448 using HTTPS.
|
homeserver by connecting to `example.com` port TCP/8448 using HTTPS.
|
||||||
|
|
||||||
|
The exact details of how server name resolution works can be found in
|
||||||
|
[the spec](https://spec.matrix.org/v1.3/server-server-api/#resolving-server-names).
|
||||||
|
|
||||||
## TLS certificates
|
## TLS certificates
|
||||||
|
|
||||||
Matrix federation requires that valid TLS certificates are present on the domain. You must
|
Matrix federation requires that valid TLS certificates are present on the domain. You must
|
||||||
obtain certificates from a publicly accepted Certificate Authority (CA). [LetsEncrypt](https://letsencrypt.org)
|
obtain certificates from a publicly-trusted certificate authority (CA). [Let's Encrypt](https://letsencrypt.org)
|
||||||
is an example of such a CA that can be used. Self-signed certificates are not suitable for
|
is a popular choice of CA because the certificates are publicly-trusted, free, and automated
|
||||||
federation and will typically not be accepted by other homeservers.
|
via the ACME protocol. (Self-signed certificates are not suitable for federation and will typically
|
||||||
|
not be accepted by other homeservers.)
|
||||||
|
|
||||||
A common practice to help ease the management of certificates is to install a reverse proxy in
|
Automating the renewal of TLS certificates is best practice. There are many tools for this,
|
||||||
front of Dendrite which manages the TLS certificates and HTTPS proxying itself. Software such as
|
but the simplest way to achieve TLS automation is to have your reverse proxy do it for you.
|
||||||
[NGINX](https://www.nginx.com) and [HAProxy](http://www.haproxy.org) can be used for the task.
|
[Caddy](https://caddyserver.com) is recommended as a production-grade reverse proxy with
|
||||||
Although the finer details of configuring these are not described here, you must reverse proxy
|
automatic TLS which is commonly used in front of Dendrite. It obtains and renews TLS certificates
|
||||||
all `/_matrix` paths to your Dendrite server.
|
automatically and by default as long as your domain name is pointed at your server first.
|
||||||
|
Although the finer details of [configuring Caddy](https://caddyserver.com/docs/) is not described
|
||||||
|
here, in general, you must reverse proxy all `/_matrix` paths to your Dendrite server. For example,
|
||||||
|
with Caddy:
|
||||||
|
|
||||||
|
```
|
||||||
|
reverse_proxy /_matrix/* localhost:8008
|
||||||
|
```
|
||||||
|
|
||||||
It is possible for the reverse proxy to listen on the standard HTTPS port TCP/443 so long as your
|
It is possible for the reverse proxy to listen on the standard HTTPS port TCP/443 so long as your
|
||||||
domain delegation is configured to point to port TCP/443.
|
domain delegation is configured to point to port TCP/443.
|
||||||
|
|
@ -51,17 +62,12 @@ you will be able to delegate from `example.com` to `matrix.example.com` so that
|
||||||
|
|
||||||
Delegation can be performed in one of two ways:
|
Delegation can be performed in one of two ways:
|
||||||
|
|
||||||
* **Well-known delegation**: A well-known text file is served over HTTPS on the domain name
|
* **Well-known delegation (preferred)**: A well-known text file is served over HTTPS on the domain
|
||||||
that you want to use, pointing to your server on `matrix.example.com` port 8448;
|
name that you want to use, pointing to your server on `matrix.example.com` port 8448;
|
||||||
* **DNS SRV delegation**: A DNS SRV record is created on the domain name that you want to
|
* **DNS SRV delegation (not recommended)**: See the SRV delegation section below for details.
|
||||||
use, pointing to your server on `matrix.example.com` port TCP/8448.
|
|
||||||
|
|
||||||
If you are using a reverse proxy to forward `/_matrix` to Dendrite, your well-known or DNS SRV
|
If you are using a reverse proxy to forward `/_matrix` to Dendrite, your well-known or delegation
|
||||||
delegation must refer to the hostname and port that the reverse proxy is listening on instead.
|
must refer to the hostname and port that the reverse proxy is listening on instead.
|
||||||
|
|
||||||
Well-known delegation is typically easier to set up and usually preferred. However, you can use
|
|
||||||
either or both methods to delegate. If you configure both methods of delegation, it is important
|
|
||||||
that they both agree and refer to the same hostname and port.
|
|
||||||
|
|
||||||
## Well-known delegation
|
## Well-known delegation
|
||||||
|
|
||||||
|
|
@ -74,20 +80,46 @@ and contain the following JSON document:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"m.server": "https://matrix.example.com:8448"
|
"m.server": "matrix.example.com:8448"
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
For example, this can be done with the following Caddy config:
|
||||||
|
|
||||||
|
```
|
||||||
|
handle /.well-known/matrix/client {
|
||||||
|
header Content-Type application/json
|
||||||
|
header Access-Control-Allow-Origin *
|
||||||
|
respond `{"m.homeserver": {"base_url": "https://matrix.example.com:8448"}}`
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also serve `.well-known` with Dendrite itself by setting the `well_known_server_name` config
|
||||||
|
option to the value you want for `m.server`. This is primarily useful if Dendrite is exposed on
|
||||||
|
`example.com:443` and you don't want to set up a separate webserver just for serving the `.well-known`
|
||||||
|
file.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
global:
|
||||||
|
...
|
||||||
|
well_known_server_name: "example.com:443"
|
||||||
|
```
|
||||||
|
|
||||||
## DNS SRV delegation
|
## DNS SRV delegation
|
||||||
|
|
||||||
Using DNS SRV delegation requires creating DNS SRV records on the `example.com` zone which
|
This method is not recommended, as the behavior of SRV records in Matrix is rather unintuitive:
|
||||||
refer to your Dendrite installation.
|
SRV records will only change the IP address and port that other servers connect to, they won't
|
||||||
|
affect the domain name. In technical terms, the `Host` header and TLS SNI of federation requests
|
||||||
|
will still be `example.com` even if the SRV record points at `matrix.example.com`.
|
||||||
|
|
||||||
Assuming that your Dendrite installation is listening for HTTPS connections at `matrix.example.com`
|
In practice, this means that the server must be configured with valid TLS certificates for
|
||||||
port 8448, the DNS SRV record must have the following fields:
|
`example.com`, rather than `matrix.example.com` as one might intuitively expect. If there's a
|
||||||
|
reverse proxy in between, the proxy configuration must be written as if it's `example.com`, as the
|
||||||
|
proxy will never see the name `matrix.example.com` in incoming requests.
|
||||||
|
|
||||||
* Name: `@` (or whichever term your DNS provider uses to signal the root)
|
This behavior also means that if `example.com` and `matrix.example.com` point at the same IP
|
||||||
* Service: `_matrix`
|
address, there is no reason to have a SRV record pointing at `matrix.example.com`. It can still
|
||||||
* Protocol: `_tcp`
|
be used to change the port number, but it won't do anything else.
|
||||||
* Port: `8448`
|
|
||||||
* Target: `matrix.example.com`
|
If you understand how SRV records work and still want to use them, the service name is `_matrix` and
|
||||||
|
the protocol is `_tcp`.
|
||||||
|
|
|
||||||
38
docs/installation/3_build.md
Normal file
38
docs/installation/3_build.md
Normal file
|
|
@ -0,0 +1,38 @@
|
||||||
|
---
|
||||||
|
title: Building Dendrite
|
||||||
|
parent: Installation
|
||||||
|
has_toc: true
|
||||||
|
nav_order: 3
|
||||||
|
permalink: /installation/build
|
||||||
|
---
|
||||||
|
|
||||||
|
# Build all Dendrite commands
|
||||||
|
|
||||||
|
Dendrite has numerous utility commands in addition to the actual server binaries.
|
||||||
|
Build them all from the root of the source repo with `build.sh` (Linux/Mac):
|
||||||
|
|
||||||
|
```sh
|
||||||
|
./build.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
or `build.cmd` (Windows):
|
||||||
|
|
||||||
|
```powershell
|
||||||
|
build.cmd
|
||||||
|
```
|
||||||
|
|
||||||
|
The resulting binaries will be placed in the `bin` subfolder.
|
||||||
|
|
||||||
|
# Installing as a monolith
|
||||||
|
|
||||||
|
You can install the Dendrite monolith binary into `$GOPATH/bin` by using `go install`:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
go install ./cmd/dendrite-monolith-server
|
||||||
|
```
|
||||||
|
|
||||||
|
Alternatively, you can specify a custom path for the binary to be written to using `go build`:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
go build -o /usr/local/bin/ ./cmd/dendrite-monolith-server
|
||||||
|
```
|
||||||
|
|
@ -17,7 +17,9 @@ filenames in the Dendrite configuration file and start Dendrite. The databases w
|
||||||
and populated automatically.
|
and populated automatically.
|
||||||
|
|
||||||
Note that Dendrite **cannot share a single SQLite database across multiple components**. Each
|
Note that Dendrite **cannot share a single SQLite database across multiple components**. Each
|
||||||
component must be configured with its own SQLite database filename.
|
component must be configured with its own SQLite database filename. You will have to remove
|
||||||
|
the `global.database` section from your Dendrite config and add it to each individual section
|
||||||
|
instead in order to use SQLite.
|
||||||
|
|
||||||
### Connection strings
|
### Connection strings
|
||||||
|
|
||||||
|
|
@ -29,5 +29,6 @@ Polylith deployments require a reverse proxy in order to ensure that requests ar
|
||||||
sent to the correct endpoint. You must ensure that a suitable reverse proxy is installed
|
sent to the correct endpoint. You must ensure that a suitable reverse proxy is installed
|
||||||
and configured.
|
and configured.
|
||||||
|
|
||||||
A [sample configuration file](https://github.com/matrix-org/dendrite/blob/main/docs/nginx/polylith-sample.conf)
|
Sample configurations are provided
|
||||||
is provided for [NGINX](https://www.nginx.com).
|
for [Caddy](https://github.com/matrix-org/dendrite/blob/main/docs/caddy/polylith/Caddyfile)
|
||||||
|
and [NGINX](https://github.com/matrix-org/dendrite/blob/main/docs/nginx/polylith-sample.conf).
|
||||||
|
|
@ -1,13 +1,13 @@
|
||||||
---
|
---
|
||||||
title: Populate the configuration
|
title: Configuring Dendrite
|
||||||
parent: Installation
|
parent: Installation
|
||||||
nav_order: 7
|
nav_order: 7
|
||||||
permalink: /installation/configuration
|
permalink: /installation/configuration
|
||||||
---
|
---
|
||||||
|
|
||||||
# Populate the configuration
|
# Configuring Dendrite
|
||||||
|
|
||||||
The configuration file is used to configure Dendrite. Sample configuration files are
|
A YAML configuration file is used to configure Dendrite. Sample configuration files are
|
||||||
present in the top level of the Dendrite repository:
|
present in the top level of the Dendrite repository:
|
||||||
|
|
||||||
* [`dendrite-sample.monolith.yaml`](https://github.com/matrix-org/dendrite/blob/main/dendrite-sample.monolith.yaml)
|
* [`dendrite-sample.monolith.yaml`](https://github.com/matrix-org/dendrite/blob/main/dendrite-sample.monolith.yaml)
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
title: Generating signing keys
|
title: Generating signing keys
|
||||||
parent: Installation
|
parent: Installation
|
||||||
nav_order: 4
|
nav_order: 8
|
||||||
permalink: /installation/signingkeys
|
permalink: /installation/signingkeys
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
@ -15,8 +15,9 @@ you can start your Dendrite monolith deployment by starting the `dendrite-monoli
|
||||||
./dendrite-monolith-server -config /path/to/dendrite.yaml
|
./dendrite-monolith-server -config /path/to/dendrite.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
If you want to change the addresses or ports that Dendrite listens on, you
|
By default, Dendrite will listen HTTP on port 8008. If you want to change the addresses
|
||||||
can use the `-http-bind-address` and `-https-bind-address` command line arguments:
|
or ports that Dendrite listens on, you can use the `-http-bind-address` and
|
||||||
|
`-https-bind-address` command line arguments:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./dendrite-monolith-server -config /path/to/dendrite.yaml \
|
./dendrite-monolith-server -config /path/to/dendrite.yaml \
|
||||||
|
|
@ -208,9 +208,11 @@ func (s *OutputRoomEventConsumer) processMessage(ore api.OutputNewRoomEvent, rew
|
||||||
// joinedHostsAtEvent works out a list of matrix servers that were joined to
|
// joinedHostsAtEvent works out a list of matrix servers that were joined to
|
||||||
// the room at the event (including peeking ones)
|
// the room at the event (including peeking ones)
|
||||||
// It is important to use the state at the event for sending messages because:
|
// It is important to use the state at the event for sending messages because:
|
||||||
// 1) We shouldn't send messages to servers that weren't in the room.
|
//
|
||||||
// 2) If a server is kicked from the rooms it should still be told about the
|
// 1. We shouldn't send messages to servers that weren't in the room.
|
||||||
// kick event,
|
// 2. If a server is kicked from the rooms it should still be told about the
|
||||||
|
// kick event.
|
||||||
|
//
|
||||||
// Usually the list can be calculated locally, but sometimes it will need fetch
|
// Usually the list can be calculated locally, but sometimes it will need fetch
|
||||||
// events from the room server.
|
// events from the room server.
|
||||||
// Returns an error if there was a problem talking to the room server.
|
// Returns an error if there was a problem talking to the room server.
|
||||||
|
|
|
||||||
|
|
@ -6,7 +6,7 @@ import (
|
||||||
"crypto/ed25519"
|
"crypto/ed25519"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
@ -66,7 +66,7 @@ func TestMain(m *testing.M) {
|
||||||
s.cache = caching.NewRistrettoCache(8*1024*1024, time.Hour, false)
|
s.cache = caching.NewRistrettoCache(8*1024*1024, time.Hour, false)
|
||||||
|
|
||||||
// Create a temporary directory for JetStream.
|
// Create a temporary directory for JetStream.
|
||||||
d, err := ioutil.TempDir("./", "jetstream*")
|
d, err := os.MkdirTemp("./", "jetstream*")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
@ -136,7 +136,7 @@ func (m *MockRoundTripper) RoundTrip(req *http.Request) (res *http.Response, err
|
||||||
// And respond.
|
// And respond.
|
||||||
res = &http.Response{
|
res = &http.Response{
|
||||||
StatusCode: 200,
|
StatusCode: 200,
|
||||||
Body: ioutil.NopCloser(bytes.NewReader(body)),
|
Body: io.NopCloser(bytes.NewReader(body)),
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
|
@ -48,6 +49,7 @@ func (f *fedRoomserverAPI) QueryRoomsForUser(ctx context.Context, req *rsapi.Que
|
||||||
|
|
||||||
// TODO: This struct isn't generic, only works for TestFederationAPIJoinThenKeyUpdate
|
// TODO: This struct isn't generic, only works for TestFederationAPIJoinThenKeyUpdate
|
||||||
type fedClient struct {
|
type fedClient struct {
|
||||||
|
fedClientMutex sync.Mutex
|
||||||
api.FederationClient
|
api.FederationClient
|
||||||
allowJoins []*test.Room
|
allowJoins []*test.Room
|
||||||
keys map[gomatrixserverlib.ServerName]struct {
|
keys map[gomatrixserverlib.ServerName]struct {
|
||||||
|
|
@ -59,6 +61,8 @@ type fedClient struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fedClient) GetServerKeys(ctx context.Context, matrixServer gomatrixserverlib.ServerName) (gomatrixserverlib.ServerKeys, error) {
|
func (f *fedClient) GetServerKeys(ctx context.Context, matrixServer gomatrixserverlib.ServerName) (gomatrixserverlib.ServerKeys, error) {
|
||||||
|
f.fedClientMutex.Lock()
|
||||||
|
defer f.fedClientMutex.Unlock()
|
||||||
fmt.Println("GetServerKeys:", matrixServer)
|
fmt.Println("GetServerKeys:", matrixServer)
|
||||||
var keys gomatrixserverlib.ServerKeys
|
var keys gomatrixserverlib.ServerKeys
|
||||||
var keyID gomatrixserverlib.KeyID
|
var keyID gomatrixserverlib.KeyID
|
||||||
|
|
@ -122,6 +126,8 @@ func (f *fedClient) MakeJoin(ctx context.Context, s gomatrixserverlib.ServerName
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
func (f *fedClient) SendJoin(ctx context.Context, s gomatrixserverlib.ServerName, event *gomatrixserverlib.Event) (res gomatrixserverlib.RespSendJoin, err error) {
|
func (f *fedClient) SendJoin(ctx context.Context, s gomatrixserverlib.ServerName, event *gomatrixserverlib.Event) (res gomatrixserverlib.RespSendJoin, err error) {
|
||||||
|
f.fedClientMutex.Lock()
|
||||||
|
defer f.fedClientMutex.Unlock()
|
||||||
for _, r := range f.allowJoins {
|
for _, r := range f.allowJoins {
|
||||||
if r.ID == event.RoomID() {
|
if r.ID == event.RoomID() {
|
||||||
r.InsertEvent(f.t, event.Headered(r.Version))
|
r.InsertEvent(f.t, event.Headered(r.Version))
|
||||||
|
|
@ -134,6 +140,8 @@ func (f *fedClient) SendJoin(ctx context.Context, s gomatrixserverlib.ServerName
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fedClient) SendTransaction(ctx context.Context, t gomatrixserverlib.Transaction) (res gomatrixserverlib.RespSend, err error) {
|
func (f *fedClient) SendTransaction(ctx context.Context, t gomatrixserverlib.Transaction) (res gomatrixserverlib.RespSend, err error) {
|
||||||
|
f.fedClientMutex.Lock()
|
||||||
|
defer f.fedClientMutex.Unlock()
|
||||||
for _, edu := range t.EDUs {
|
for _, edu := range t.EDUs {
|
||||||
if edu.Type == gomatrixserverlib.MDeviceListUpdate {
|
if edu.Type == gomatrixserverlib.MDeviceListUpdate {
|
||||||
f.sentTxn = true
|
f.sentTxn = true
|
||||||
|
|
@ -242,6 +250,8 @@ func testFederationAPIJoinThenKeyUpdate(t *testing.T, dbType test.DBType) {
|
||||||
|
|
||||||
testrig.MustPublishMsgs(t, jsctx, msg)
|
testrig.MustPublishMsgs(t, jsctx, msg)
|
||||||
time.Sleep(500 * time.Millisecond)
|
time.Sleep(500 * time.Millisecond)
|
||||||
|
fc.fedClientMutex.Lock()
|
||||||
|
defer fc.fedClientMutex.Unlock()
|
||||||
if !fc.sentTxn {
|
if !fc.sentTxn {
|
||||||
t.Fatalf("did not send device list update")
|
t.Fatalf("did not send device list update")
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -158,7 +158,7 @@ func (oqs *OutgoingQueues) getQueue(destination gomatrixserverlib.ServerName) *d
|
||||||
oqs.queuesMutex.Lock()
|
oqs.queuesMutex.Lock()
|
||||||
defer oqs.queuesMutex.Unlock()
|
defer oqs.queuesMutex.Unlock()
|
||||||
oq, ok := oqs.queues[destination]
|
oq, ok := oqs.queues[destination]
|
||||||
if !ok || oq != nil {
|
if !ok || oq == nil {
|
||||||
destinationQueueTotal.Inc()
|
destinationQueueTotal.Inc()
|
||||||
oq = &destinationQueue{
|
oq = &destinationQueue{
|
||||||
queues: oqs,
|
queues: oqs,
|
||||||
|
|
|
||||||
|
|
@ -26,7 +26,6 @@ import (
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
"github.com/matrix-org/dendrite/setup/config"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
"github.com/matrix-org/util"
|
"github.com/matrix-org/util"
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// InviteV2 implements /_matrix/federation/v2/invite/{roomID}/{eventID}
|
// InviteV2 implements /_matrix/federation/v2/invite/{roomID}/{eventID}
|
||||||
|
|
@ -144,7 +143,6 @@ func processInvite(
|
||||||
// Check that the event is signed by the server sending the request.
|
// Check that the event is signed by the server sending the request.
|
||||||
redacted, err := gomatrixserverlib.RedactEventJSON(event.JSON(), event.Version())
|
redacted, err := gomatrixserverlib.RedactEventJSON(event.JSON(), event.Version())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.WithError(err).Errorf("XXX: invite.go")
|
|
||||||
return util.JSONResponse{
|
return util.JSONResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
JSON: jsonerror.BadJSON("The event JSON could not be redacted"),
|
JSON: jsonerror.BadJSON("The event JSON could not be redacted"),
|
||||||
|
|
|
||||||
|
|
@ -21,13 +21,14 @@ import (
|
||||||
"sort"
|
"sort"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
"github.com/matrix-org/util"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||||
"github.com/matrix-org/dendrite/internal/eventutil"
|
"github.com/matrix-org/dendrite/internal/eventutil"
|
||||||
"github.com/matrix-org/dendrite/roomserver/api"
|
"github.com/matrix-org/dendrite/roomserver/api"
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
"github.com/matrix-org/dendrite/setup/config"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
|
||||||
"github.com/matrix-org/util"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// MakeJoin implements the /make_join API
|
// MakeJoin implements the /make_join API
|
||||||
|
|
@ -202,6 +203,14 @@ func SendJoin(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check that the event is from the server sending the request.
|
||||||
|
if event.Origin() != request.Origin() {
|
||||||
|
return util.JSONResponse{
|
||||||
|
Code: http.StatusForbidden,
|
||||||
|
JSON: jsonerror.Forbidden("The join must be sent by the server it originated on"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Check that a state key is provided.
|
// Check that a state key is provided.
|
||||||
if event.StateKey() == nil || event.StateKeyEquals("") {
|
if event.StateKey() == nil || event.StateKeyEquals("") {
|
||||||
return util.JSONResponse{
|
return util.JSONResponse{
|
||||||
|
|
@ -216,6 +225,22 @@ func SendJoin(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check that the sender belongs to the server that is sending us
|
||||||
|
// the request. By this point we've already asserted that the sender
|
||||||
|
// and the state key are equal so we don't need to check both.
|
||||||
|
var domain gomatrixserverlib.ServerName
|
||||||
|
if _, domain, err = gomatrixserverlib.SplitID('@', event.Sender()); err != nil {
|
||||||
|
return util.JSONResponse{
|
||||||
|
Code: http.StatusForbidden,
|
||||||
|
JSON: jsonerror.Forbidden("The sender of the join is invalid"),
|
||||||
|
}
|
||||||
|
} else if domain != request.Origin() {
|
||||||
|
return util.JSONResponse{
|
||||||
|
Code: http.StatusForbidden,
|
||||||
|
JSON: jsonerror.Forbidden("The sender of the join must belong to the origin server"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Check that the room ID is correct.
|
// Check that the room ID is correct.
|
||||||
if event.RoomID() != roomID {
|
if event.RoomID() != roomID {
|
||||||
return util.JSONResponse{
|
return util.JSONResponse{
|
||||||
|
|
@ -242,14 +267,6 @@ func SendJoin(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check that the event is from the server sending the request.
|
|
||||||
if event.Origin() != request.Origin() {
|
|
||||||
return util.JSONResponse{
|
|
||||||
Code: http.StatusForbidden,
|
|
||||||
JSON: jsonerror.Forbidden("The join must be sent by the server it originated on"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check that this is in fact a join event
|
// Check that this is in fact a join event
|
||||||
membership, err := event.Membership()
|
membership, err := event.Membership()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -419,13 +436,13 @@ func SendJoin(
|
||||||
// a restricted room join. If the room version does not support restricted
|
// a restricted room join. If the room version does not support restricted
|
||||||
// joins then this function returns with no side effects. This returns three
|
// joins then this function returns with no side effects. This returns three
|
||||||
// values:
|
// values:
|
||||||
// * an optional JSON response body (i.e. M_UNABLE_TO_AUTHORISE_JOIN) which
|
// - an optional JSON response body (i.e. M_UNABLE_TO_AUTHORISE_JOIN) which
|
||||||
// should always be sent back to the client if one is specified
|
// should always be sent back to the client if one is specified
|
||||||
// * a user ID of an authorising user, typically a user that has power to
|
// - a user ID of an authorising user, typically a user that has power to
|
||||||
// issue invites in the room, if one has been found
|
// issue invites in the room, if one has been found
|
||||||
// * an error if there was a problem finding out if this was allowable,
|
// - an error if there was a problem finding out if this was allowable,
|
||||||
// like if the room version isn't known or a problem happened talking to
|
// like if the room version isn't known or a problem happened talking to
|
||||||
// the roomserver
|
// the roomserver
|
||||||
func checkRestrictedJoin(
|
func checkRestrictedJoin(
|
||||||
httpReq *http.Request,
|
httpReq *http.Request,
|
||||||
rsAPI api.FederationRoomserverAPI,
|
rsAPI api.FederationRoomserverAPI,
|
||||||
|
|
|
||||||
|
|
@ -15,23 +15,13 @@
|
||||||
package deltas
|
package deltas
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
|
||||||
"github.com/pressly/goose"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func LoadFromGoose() {
|
func UpRemoveRoomsTable(ctx context.Context, tx *sql.Tx) error {
|
||||||
goose.AddMigration(UpRemoveRoomsTable, DownRemoveRoomsTable)
|
_, err := tx.ExecContext(ctx, `
|
||||||
}
|
|
||||||
|
|
||||||
func LoadRemoveRoomsTable(m *sqlutil.Migrations) {
|
|
||||||
m.AddMigration(UpRemoveRoomsTable, DownRemoveRoomsTable)
|
|
||||||
}
|
|
||||||
|
|
||||||
func UpRemoveRoomsTable(tx *sql.Tx) error {
|
|
||||||
_, err := tx.Exec(`
|
|
||||||
DROP TABLE IF EXISTS federationsender_rooms;
|
DROP TABLE IF EXISTS federationsender_rooms;
|
||||||
`)
|
`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
||||||
|
|
@ -82,9 +82,13 @@ func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions,
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
m := sqlutil.NewMigrations()
|
m := sqlutil.NewMigrator(d.db)
|
||||||
deltas.LoadRemoveRoomsTable(m)
|
m.AddMigrations(sqlutil.Migration{
|
||||||
if err = m.RunDeltas(d.db, dbProperties); err != nil {
|
Version: "federationsender: drop federationsender_rooms",
|
||||||
|
Up: deltas.UpRemoveRoomsTable,
|
||||||
|
})
|
||||||
|
err = m.Up(base.Context())
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
d.Database = shared.Database{
|
d.Database = shared.Database{
|
||||||
|
|
|
||||||
|
|
@ -15,23 +15,13 @@
|
||||||
package deltas
|
package deltas
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
|
||||||
"github.com/pressly/goose"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func LoadFromGoose() {
|
func UpRemoveRoomsTable(ctx context.Context, tx *sql.Tx) error {
|
||||||
goose.AddMigration(UpRemoveRoomsTable, DownRemoveRoomsTable)
|
_, err := tx.ExecContext(ctx, `
|
||||||
}
|
|
||||||
|
|
||||||
func LoadRemoveRoomsTable(m *sqlutil.Migrations) {
|
|
||||||
m.AddMigration(UpRemoveRoomsTable, DownRemoveRoomsTable)
|
|
||||||
}
|
|
||||||
|
|
||||||
func UpRemoveRoomsTable(tx *sql.Tx) error {
|
|
||||||
_, err := tx.Exec(`
|
|
||||||
DROP TABLE IF EXISTS federationsender_rooms;
|
DROP TABLE IF EXISTS federationsender_rooms;
|
||||||
`)
|
`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
||||||
|
|
@ -81,9 +81,13 @@ func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions,
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
m := sqlutil.NewMigrations()
|
m := sqlutil.NewMigrator(d.db)
|
||||||
deltas.LoadRemoveRoomsTable(m)
|
m.AddMigrations(sqlutil.Migration{
|
||||||
if err = m.RunDeltas(d.db, dbProperties); err != nil {
|
Version: "federationsender: drop federationsender_rooms",
|
||||||
|
Up: deltas.UpRemoveRoomsTable,
|
||||||
|
})
|
||||||
|
err = m.Up(base.Context())
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
d.Database = shared.Database{
|
d.Database = shared.Database{
|
||||||
|
|
|
||||||
28
go.mod
28
go.mod
|
|
@ -1,9 +1,5 @@
|
||||||
module github.com/matrix-org/dendrite
|
module github.com/matrix-org/dendrite
|
||||||
|
|
||||||
replace github.com/nats-io/nats-server/v2 => github.com/neilalexander/nats-server/v2 v2.8.3-0.20220513095553-73a9a246d34f
|
|
||||||
|
|
||||||
replace github.com/nats-io/nats.go => github.com/neilalexander/nats.go v1.13.1-0.20220621084451-ac518c356673
|
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Arceliar/ironwood v0.0.0-20220306165321-319147a02d98
|
github.com/Arceliar/ironwood v0.0.0-20220306165321-319147a02d98
|
||||||
github.com/Arceliar/phony v0.0.0-20210209235338-dde1a8dca979
|
github.com/Arceliar/phony v0.0.0-20210209235338-dde1a8dca979
|
||||||
|
|
@ -25,19 +21,18 @@ require (
|
||||||
github.com/matrix-org/dugong v0.0.0-20210921133753-66e6b1c67e2e
|
github.com/matrix-org/dugong v0.0.0-20210921133753-66e6b1c67e2e
|
||||||
github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91
|
github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91
|
||||||
github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16
|
github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16
|
||||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20220713083127-fc2ea1e62e46
|
github.com/matrix-org/gomatrixserverlib v0.0.0-20220725104114-b6003e522771
|
||||||
github.com/matrix-org/pinecone v0.0.0-20220708135211-1ce778fcde6a
|
github.com/matrix-org/pinecone v0.0.0-20220803093810-b7a830c08fb9
|
||||||
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4
|
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4
|
||||||
github.com/mattn/go-sqlite3 v1.14.13
|
github.com/mattn/go-sqlite3 v1.14.13
|
||||||
github.com/nats-io/nats-server/v2 v2.7.4-0.20220309205833-773636c1c5bb
|
github.com/nats-io/nats-server/v2 v2.8.5-0.20220731184415-903a06a5b4ee
|
||||||
github.com/nats-io/nats.go v1.14.0
|
github.com/nats-io/nats.go v1.16.1-0.20220731182438-87bbea85922b
|
||||||
github.com/neilalexander/utp v0.1.1-0.20210727203401-54ae7b1cd5f9
|
github.com/neilalexander/utp v0.1.1-0.20210727203401-54ae7b1cd5f9
|
||||||
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646
|
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646
|
||||||
github.com/ngrok/sqlmw v0.0.0-20220520173518-97c9c04efc79
|
github.com/ngrok/sqlmw v0.0.0-20220520173518-97c9c04efc79
|
||||||
github.com/opentracing/opentracing-go v1.2.0
|
github.com/opentracing/opentracing-go v1.2.0
|
||||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
github.com/pressly/goose v2.7.0+incompatible
|
|
||||||
github.com/prometheus/client_golang v1.12.2
|
github.com/prometheus/client_golang v1.12.2
|
||||||
github.com/sirupsen/logrus v1.8.1
|
github.com/sirupsen/logrus v1.8.1
|
||||||
github.com/spruceid/siwe-go v0.2.0
|
github.com/spruceid/siwe-go v0.2.0
|
||||||
|
|
@ -52,7 +47,7 @@ require (
|
||||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e
|
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e
|
||||||
golang.org/x/image v0.0.0-20220413100746-70e8d0d3baa9
|
golang.org/x/image v0.0.0-20220413100746-70e8d0d3baa9
|
||||||
golang.org/x/mobile v0.0.0-20220518205345-8578da9835fd
|
golang.org/x/mobile v0.0.0-20220518205345-8578da9835fd
|
||||||
golang.org/x/net v0.0.0-20220524220425-1d687d428aca
|
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e
|
||||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467
|
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467
|
||||||
gopkg.in/h2non/bimg.v1 v1.1.9
|
gopkg.in/h2non/bimg.v1 v1.1.9
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
|
|
@ -83,17 +78,18 @@ require (
|
||||||
github.com/h2non/filetype v1.1.3 // indirect
|
github.com/h2non/filetype v1.1.3 // indirect
|
||||||
github.com/juju/errors v0.0.0-20220203013757-bd733f3c86b9 // indirect
|
github.com/juju/errors v0.0.0-20220203013757-bd733f3c86b9 // indirect
|
||||||
github.com/juju/testing v0.0.0-20220203020004-a0ff61f03494 // indirect
|
github.com/juju/testing v0.0.0-20220203020004-a0ff61f03494 // indirect
|
||||||
github.com/klauspost/compress v1.14.4 // indirect
|
github.com/klauspost/compress v1.15.9 // indirect
|
||||||
github.com/lucas-clemente/quic-go v0.26.0 // indirect
|
github.com/lucas-clemente/quic-go v0.28.1 // indirect
|
||||||
github.com/marten-seemann/qtls-go1-16 v0.1.5 // indirect
|
github.com/marten-seemann/qtls-go1-16 v0.1.5 // indirect
|
||||||
github.com/marten-seemann/qtls-go1-17 v0.1.1 // indirect
|
github.com/marten-seemann/qtls-go1-17 v0.1.2 // indirect
|
||||||
github.com/marten-seemann/qtls-go1-18 v0.1.1 // indirect
|
github.com/marten-seemann/qtls-go1-18 v0.1.2 // indirect
|
||||||
|
github.com/marten-seemann/qtls-go1-19 v0.1.0-beta.1 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||||
github.com/miekg/dns v1.1.49 // indirect
|
github.com/miekg/dns v1.1.49 // indirect
|
||||||
github.com/minio/highwayhash v1.0.2 // indirect
|
github.com/minio/highwayhash v1.0.2 // indirect
|
||||||
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297 // indirect
|
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297 // indirect
|
||||||
github.com/morikuni/aec v1.0.0 // indirect
|
github.com/morikuni/aec v1.0.0 // indirect
|
||||||
github.com/nats-io/jwt/v2 v2.2.1-0.20220330180145-442af02fd36a // indirect
|
github.com/nats-io/jwt/v2 v2.3.0 // indirect
|
||||||
github.com/nats-io/nkeys v0.3.0 // indirect
|
github.com/nats-io/nkeys v0.3.0 // indirect
|
||||||
github.com/nats-io/nuid v1.0.1 // indirect
|
github.com/nats-io/nuid v1.0.1 // indirect
|
||||||
github.com/nxadm/tail v1.4.8 // indirect
|
github.com/nxadm/tail v1.4.8 // indirect
|
||||||
|
|
@ -111,7 +107,7 @@ require (
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
|
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
|
||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
|
||||||
golang.org/x/text v0.3.8-0.20211004125949-5bd84dd9b33b // indirect
|
golang.org/x/text v0.3.8-0.20211004125949-5bd84dd9b33b // indirect
|
||||||
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect
|
golang.org/x/time v0.0.0-20220411224347-583f2d630306 // indirect
|
||||||
golang.org/x/tools v0.1.10 // indirect
|
golang.org/x/tools v0.1.10 // indirect
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||||
google.golang.org/protobuf v1.27.1 // indirect
|
google.golang.org/protobuf v1.27.1 // indirect
|
||||||
|
|
|
||||||
51
go.sum
51
go.sum
|
|
@ -426,8 +426,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
|
||||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||||
github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||||
github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||||
github.com/klauspost/compress v1.14.4 h1:eijASRJcobkVtSt81Olfh7JX43osYLwy5krOJo6YEu4=
|
github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY=
|
||||||
github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
||||||
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||||
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
|
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
|
||||||
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||||
|
|
@ -454,8 +454,8 @@ github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgx
|
||||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||||
github.com/lib/pq v1.10.5 h1:J+gdV2cUmX7ZqL2B0lFcW0m+egaHC2V3lpO8nWxyYiQ=
|
github.com/lib/pq v1.10.5 h1:J+gdV2cUmX7ZqL2B0lFcW0m+egaHC2V3lpO8nWxyYiQ=
|
||||||
github.com/lib/pq v1.10.5/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
github.com/lib/pq v1.10.5/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||||
github.com/lucas-clemente/quic-go v0.26.0 h1:ALBQXr9UJ8A1LyzvceX4jd9QFsHvlI0RR6BkV16o00A=
|
github.com/lucas-clemente/quic-go v0.28.1 h1:Uo0lvVxWg5la9gflIF9lwa39ONq85Xq2D91YNEIslzU=
|
||||||
github.com/lucas-clemente/quic-go v0.26.0/go.mod h1:AzgQoPda7N+3IqMMMkywBKggIFo2KT6pfnlrQ2QieeI=
|
github.com/lucas-clemente/quic-go v0.28.1/go.mod h1:oGz5DKK41cJt5+773+BSO9BXDsREY4HLf7+0odGAPO0=
|
||||||
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
||||||
github.com/lxn/walk v0.0.0-20210112085537-c389da54e794/go.mod h1:E23UucZGqpuUANJooIbHWCufXvOcT6E7Stq81gU+CSQ=
|
github.com/lxn/walk v0.0.0-20210112085537-c389da54e794/go.mod h1:E23UucZGqpuUANJooIbHWCufXvOcT6E7Stq81gU+CSQ=
|
||||||
github.com/lxn/win v0.0.0-20210218163916-a377121e959e/go.mod h1:KxxjdtRkfNoYDCUP5ryK7XJJNTnpC8atvtmTheChOtk=
|
github.com/lxn/win v0.0.0-20210218163916-a377121e959e/go.mod h1:KxxjdtRkfNoYDCUP5ryK7XJJNTnpC8atvtmTheChOtk=
|
||||||
|
|
@ -465,10 +465,12 @@ github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN
|
||||||
github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc=
|
github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc=
|
||||||
github.com/marten-seemann/qtls-go1-16 v0.1.5 h1:o9JrYPPco/Nukd/HpOHMHZoBDXQqoNtUCmny98/1uqQ=
|
github.com/marten-seemann/qtls-go1-16 v0.1.5 h1:o9JrYPPco/Nukd/HpOHMHZoBDXQqoNtUCmny98/1uqQ=
|
||||||
github.com/marten-seemann/qtls-go1-16 v0.1.5/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk=
|
github.com/marten-seemann/qtls-go1-16 v0.1.5/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk=
|
||||||
github.com/marten-seemann/qtls-go1-17 v0.1.1 h1:DQjHPq+aOzUeh9/lixAGunn6rIOQyWChPSI4+hgW7jc=
|
github.com/marten-seemann/qtls-go1-17 v0.1.2 h1:JADBlm0LYiVbuSySCHeY863dNkcpMmDR7s0bLKJeYlQ=
|
||||||
github.com/marten-seemann/qtls-go1-17 v0.1.1/go.mod h1:C2ekUKcDdz9SDWxec1N/MvcXBpaX9l3Nx67XaR84L5s=
|
github.com/marten-seemann/qtls-go1-17 v0.1.2/go.mod h1:C2ekUKcDdz9SDWxec1N/MvcXBpaX9l3Nx67XaR84L5s=
|
||||||
github.com/marten-seemann/qtls-go1-18 v0.1.1 h1:qp7p7XXUFL7fpBvSS1sWD+uSqPvzNQK43DH+/qEkj0Y=
|
github.com/marten-seemann/qtls-go1-18 v0.1.2 h1:JH6jmzbduz0ITVQ7ShevK10Av5+jBEKAHMntXmIV7kM=
|
||||||
github.com/marten-seemann/qtls-go1-18 v0.1.1/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4=
|
github.com/marten-seemann/qtls-go1-18 v0.1.2/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4=
|
||||||
|
github.com/marten-seemann/qtls-go1-19 v0.1.0-beta.1 h1:7m/WlWcSROrcK5NxuXaxYD32BZqe/LEEnBrWcH/cOqQ=
|
||||||
|
github.com/marten-seemann/qtls-go1-19 v0.1.0-beta.1/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI=
|
||||||
github.com/matrix-org/dugong v0.0.0-20210921133753-66e6b1c67e2e h1:DP5RC0Z3XdyBEW5dKt8YPeN6vZbm6OzVaGVp7f1BQRM=
|
github.com/matrix-org/dugong v0.0.0-20210921133753-66e6b1c67e2e h1:DP5RC0Z3XdyBEW5dKt8YPeN6vZbm6OzVaGVp7f1BQRM=
|
||||||
github.com/matrix-org/dugong v0.0.0-20210921133753-66e6b1c67e2e/go.mod h1:NgPCr+UavRGH6n5jmdX8DuqFZ4JiCWIJoZiuhTRLSUg=
|
github.com/matrix-org/dugong v0.0.0-20210921133753-66e6b1c67e2e/go.mod h1:NgPCr+UavRGH6n5jmdX8DuqFZ4JiCWIJoZiuhTRLSUg=
|
||||||
github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91 h1:s7fexw2QV3YD/fRrzEDPNGgTlJlvXY0EHHnT87wF3OA=
|
github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91 h1:s7fexw2QV3YD/fRrzEDPNGgTlJlvXY0EHHnT87wF3OA=
|
||||||
|
|
@ -476,10 +478,10 @@ github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91/go.mod h1
|
||||||
github.com/matrix-org/gomatrix v0.0.0-20190528120928-7df988a63f26/go.mod h1:3fxX6gUjWyI/2Bt7J1OLhpCzOfO/bB3AiX0cJtEKud0=
|
github.com/matrix-org/gomatrix v0.0.0-20190528120928-7df988a63f26/go.mod h1:3fxX6gUjWyI/2Bt7J1OLhpCzOfO/bB3AiX0cJtEKud0=
|
||||||
github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16 h1:ZtO5uywdd5dLDCud4r0r55eP4j9FuUNpl60Gmntcop4=
|
github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16 h1:ZtO5uywdd5dLDCud4r0r55eP4j9FuUNpl60Gmntcop4=
|
||||||
github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16/go.mod h1:/gBX06Kw0exX1HrwmoBibFA98yBk/jxKpGVeyQbff+s=
|
github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16/go.mod h1:/gBX06Kw0exX1HrwmoBibFA98yBk/jxKpGVeyQbff+s=
|
||||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20220713083127-fc2ea1e62e46 h1:5X/kXY3nwqKOwwrE9tnMKrjbsi3PHigQYvrvDBSntO8=
|
github.com/matrix-org/gomatrixserverlib v0.0.0-20220725104114-b6003e522771 h1:ZIPHFIPNDS9dmEbPEiJbNmyCGJtn9exfpLC7JOcn/bE=
|
||||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20220713083127-fc2ea1e62e46/go.mod h1:jX38yp3SSLJNftBg3PXU1ayd0PCLIiDHQ4xAc9DIixk=
|
github.com/matrix-org/gomatrixserverlib v0.0.0-20220725104114-b6003e522771/go.mod h1:jX38yp3SSLJNftBg3PXU1ayd0PCLIiDHQ4xAc9DIixk=
|
||||||
github.com/matrix-org/pinecone v0.0.0-20220708135211-1ce778fcde6a h1:DdG8vXMlZ65EAtc4V+3t7zHZ2Gqs24pSnyXS+4BRHUs=
|
github.com/matrix-org/pinecone v0.0.0-20220803093810-b7a830c08fb9 h1:ed8yvWhTLk7+sNeK/eOZRTvESFTOHDRevoRoyeqPtvY=
|
||||||
github.com/matrix-org/pinecone v0.0.0-20220708135211-1ce778fcde6a/go.mod h1:ulJzsVOTssIVp1j/m5eI//4VpAGDkMt5NrRuAVX7wpc=
|
github.com/matrix-org/pinecone v0.0.0-20220803093810-b7a830c08fb9/go.mod h1:P4MqPf+u83OPulPJ+XTbSDbbWrdFYNY4LZ/B1PIduFE=
|
||||||
github.com/matrix-org/util v0.0.0-20190711121626-527ce5ddefc7/go.mod h1:vVQlW/emklohkZnOPwD3LrZUBqdfsbiyO3p1lNV8F6U=
|
github.com/matrix-org/util v0.0.0-20190711121626-527ce5ddefc7/go.mod h1:vVQlW/emklohkZnOPwD3LrZUBqdfsbiyO3p1lNV8F6U=
|
||||||
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4 h1:eCEHXWDv9Rm335MSuB49mFUK44bwZPFSDde3ORE3syk=
|
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4 h1:eCEHXWDv9Rm335MSuB49mFUK44bwZPFSDde3ORE3syk=
|
||||||
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4/go.mod h1:vVQlW/emklohkZnOPwD3LrZUBqdfsbiyO3p1lNV8F6U=
|
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4/go.mod h1:vVQlW/emklohkZnOPwD3LrZUBqdfsbiyO3p1lNV8F6U=
|
||||||
|
|
@ -532,8 +534,12 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0=
|
github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0=
|
||||||
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E=
|
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E=
|
||||||
github.com/nats-io/jwt/v2 v2.2.1-0.20220330180145-442af02fd36a h1:lem6QCvxR0Y28gth9P+wV2K/zYUUAkJ+55U8cpS0p5I=
|
github.com/nats-io/jwt/v2 v2.3.0 h1:z2mA1a7tIf5ShggOFlR1oBPgd6hGqcDYsISxZByUzdI=
|
||||||
github.com/nats-io/jwt/v2 v2.2.1-0.20220330180145-442af02fd36a/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k=
|
github.com/nats-io/jwt/v2 v2.3.0/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k=
|
||||||
|
github.com/nats-io/nats-server/v2 v2.8.5-0.20220731184415-903a06a5b4ee h1:vAtoZ+LW6eIUjkCWWwO1DZ6o16UGrVOG+ot/AkwejO8=
|
||||||
|
github.com/nats-io/nats-server/v2 v2.8.5-0.20220731184415-903a06a5b4ee/go.mod h1:3Yg3ApyQxPlAs1KKHKV5pobV5VtZk+TtOiUJx/iqkkg=
|
||||||
|
github.com/nats-io/nats.go v1.16.1-0.20220731182438-87bbea85922b h1:CE9wSYLvwq8aC/0+6zH8lhhtZYvJ9p8PzwvZeYgdBc0=
|
||||||
|
github.com/nats-io/nats.go v1.16.1-0.20220731182438-87bbea85922b/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w=
|
||||||
github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8=
|
github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8=
|
||||||
github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4=
|
github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4=
|
||||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||||
|
|
@ -541,10 +547,6 @@ github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OS
|
||||||
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms=
|
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms=
|
||||||
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
|
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
|
||||||
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
|
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
|
||||||
github.com/neilalexander/nats-server/v2 v2.8.3-0.20220513095553-73a9a246d34f h1:Fc+TjdV1mOy0oISSzfoxNWdTqjg7tN/Vdgf+B2cwvdo=
|
|
||||||
github.com/neilalexander/nats-server/v2 v2.8.3-0.20220513095553-73a9a246d34f/go.mod h1:vIdpKz3OG+DCg4q/xVPdXHoztEyKDWRtykQ4N7hd7C4=
|
|
||||||
github.com/neilalexander/nats.go v1.13.1-0.20220621084451-ac518c356673 h1:TcKfa3Tf0qwUotv63PQVu2d1bBoLi2iEA4RHVMGDh5M=
|
|
||||||
github.com/neilalexander/nats.go v1.13.1-0.20220621084451-ac518c356673/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w=
|
|
||||||
github.com/neilalexander/utp v0.1.1-0.20210727203401-54ae7b1cd5f9 h1:lrVQzBtkeQEGGYUHwSX1XPe1E5GL6U3KYCNe2G4bncQ=
|
github.com/neilalexander/utp v0.1.1-0.20210727203401-54ae7b1cd5f9 h1:lrVQzBtkeQEGGYUHwSX1XPe1E5GL6U3KYCNe2G4bncQ=
|
||||||
github.com/neilalexander/utp v0.1.1-0.20210727203401-54ae7b1cd5f9/go.mod h1:NPHGhPc0/wudcaCqL/H5AOddkRf8GPRhzOujuUKGQu8=
|
github.com/neilalexander/utp v0.1.1-0.20210727203401-54ae7b1cd5f9/go.mod h1:NPHGhPc0/wudcaCqL/H5AOddkRf8GPRhzOujuUKGQu8=
|
||||||
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ=
|
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ=
|
||||||
|
|
@ -595,8 +597,6 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
||||||
github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ=
|
github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/pressly/goose v2.7.0+incompatible h1:PWejVEv07LCerQEzMMeAtjuyCKbyprZ/LBa6K5P0OCQ=
|
|
||||||
github.com/pressly/goose v2.7.0+incompatible/go.mod h1:m+QHWCqxR3k8D9l7qfzuC/djtlfzxr34mozWDYEu1z8=
|
|
||||||
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
|
|
@ -746,7 +746,6 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
|
||||||
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
|
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
|
||||||
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||||
|
|
@ -866,8 +865,8 @@ golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qx
|
||||||
golang.org/x/net v0.0.0-20210927181540-4e4d966f7476/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210927181540-4e4d966f7476/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20211011170408-caeb26a5c8c0/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20211011170408-caeb26a5c8c0/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20211101193420-4a448f8816b3/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20211101193420-4a448f8816b3/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20220524220425-1d687d428aca h1:xTaFYiPROfpPhqrfTIDXj0ri1SpfueYT951s4bAuDO8=
|
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e h1:TsQ7F31D3bUCLeqPT0u+yjp1guoArKaNKmCr22PYgTQ=
|
||||||
golang.org/x/net v0.0.0-20220524220425-1d687d428aca/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
|
|
@ -969,6 +968,7 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbuf
|
||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM=
|
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM=
|
||||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
|
@ -979,6 +979,7 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
golang.org/x/text v0.3.8-0.20211004125949-5bd84dd9b33b h1:NXqSWXSRUSCaFuvitrWtU169I3876zRTalMRbfd6LL0=
|
golang.org/x/text v0.3.8-0.20211004125949-5bd84dd9b33b h1:NXqSWXSRUSCaFuvitrWtU169I3876zRTalMRbfd6LL0=
|
||||||
golang.org/x/text v0.3.8-0.20211004125949-5bd84dd9b33b/go.mod h1:EFNZuWvGYxIRUEX+K8UmCFwYmZjqcrnq15ZuVldZkZ0=
|
golang.org/x/text v0.3.8-0.20211004125949-5bd84dd9b33b/go.mod h1:EFNZuWvGYxIRUEX+K8UmCFwYmZjqcrnq15ZuVldZkZ0=
|
||||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
|
@ -987,8 +988,8 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb
|
||||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 h1:GZokNIeuVkl3aZHJchRrr13WCsols02MLUcz1U9is6M=
|
golang.org/x/time v0.0.0-20220411224347-583f2d630306 h1:+gHMid33q6pen7kv9xvT+JRinntgeXO2AeZVd0AWD3w=
|
||||||
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20220411224347-583f2d630306/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
|
|
||||||
18
internal/caching/cache_eventstatekeys.go
Normal file
18
internal/caching/cache_eventstatekeys.go
Normal file
|
|
@ -0,0 +1,18 @@
|
||||||
|
package caching
|
||||||
|
|
||||||
|
import "github.com/matrix-org/dendrite/roomserver/types"
|
||||||
|
|
||||||
|
// EventStateKeyCache contains the subset of functions needed for
|
||||||
|
// a room event state key cache.
|
||||||
|
type EventStateKeyCache interface {
|
||||||
|
GetEventStateKey(eventStateKeyNID types.EventStateKeyNID) (string, bool)
|
||||||
|
StoreEventStateKey(eventStateKeyNID types.EventStateKeyNID, eventStateKey string)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c Caches) GetEventStateKey(eventStateKeyNID types.EventStateKeyNID) (string, bool) {
|
||||||
|
return c.RoomServerStateKeys.Get(eventStateKeyNID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c Caches) StoreEventStateKey(eventStateKeyNID types.EventStateKeyNID, eventStateKey string) {
|
||||||
|
c.RoomServerStateKeys.Set(eventStateKeyNID, eventStateKey)
|
||||||
|
}
|
||||||
|
|
@ -14,6 +14,7 @@ type lazyLoadingCacheKey struct {
|
||||||
type LazyLoadCache interface {
|
type LazyLoadCache interface {
|
||||||
StoreLazyLoadedUser(device *userapi.Device, roomID, userID, eventID string)
|
StoreLazyLoadedUser(device *userapi.Device, roomID, userID, eventID string)
|
||||||
IsLazyLoadedUserCached(device *userapi.Device, roomID, userID string) (string, bool)
|
IsLazyLoadedUserCached(device *userapi.Device, roomID, userID string) (string, bool)
|
||||||
|
InvalidateLazyLoadedUser(device *userapi.Device, roomID, userID string)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Caches) StoreLazyLoadedUser(device *userapi.Device, roomID, userID, eventID string) {
|
func (c Caches) StoreLazyLoadedUser(device *userapi.Device, roomID, userID, eventID string) {
|
||||||
|
|
@ -33,3 +34,12 @@ func (c Caches) IsLazyLoadedUserCached(device *userapi.Device, roomID, userID st
|
||||||
TargetUserID: userID,
|
TargetUserID: userID,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c Caches) InvalidateLazyLoadedUser(device *userapi.Device, roomID, userID string) {
|
||||||
|
c.LazyLoading.Unset(lazyLoadingCacheKey{
|
||||||
|
UserID: device.UserID,
|
||||||
|
DeviceID: device.ID,
|
||||||
|
RoomID: roomID,
|
||||||
|
TargetUserID: userID,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,33 +0,0 @@
|
||||||
package caching
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/matrix-org/dendrite/roomserver/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
// WARNING: This cache is mutable because it's entirely possible that
|
|
||||||
// the IsStub or StateSnaphotNID fields can change, even though the
|
|
||||||
// room version and room NID fields will not. This is only safe because
|
|
||||||
// the RoomInfoCache is used ONLY within the roomserver and because it
|
|
||||||
// will be kept up-to-date by the latest events updater. It MUST NOT be
|
|
||||||
// used from other components as we currently have no way to invalidate
|
|
||||||
// the cache in downstream components.
|
|
||||||
|
|
||||||
// RoomInfosCache contains the subset of functions needed for
|
|
||||||
// a room Info cache. It must only be used from the roomserver only
|
|
||||||
// It is not safe for use from other components.
|
|
||||||
type RoomInfoCache interface {
|
|
||||||
GetRoomInfo(roomID string) (roomInfo *types.RoomInfo, ok bool)
|
|
||||||
StoreRoomInfo(roomID string, roomInfo *types.RoomInfo)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRoomInfo must only be called from the roomserver only. It is not
|
|
||||||
// safe for use from other components.
|
|
||||||
func (c Caches) GetRoomInfo(roomID string) (*types.RoomInfo, bool) {
|
|
||||||
return c.RoomInfos.Get(roomID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StoreRoomInfo must only be called from the roomserver only. It is not
|
|
||||||
// safe for use from other components.
|
|
||||||
func (c Caches) StoreRoomInfo(roomID string, roomInfo *types.RoomInfo) {
|
|
||||||
c.RoomInfos.Set(roomID, roomInfo)
|
|
||||||
}
|
|
||||||
|
|
@ -7,8 +7,8 @@ import (
|
||||||
type RoomServerCaches interface {
|
type RoomServerCaches interface {
|
||||||
RoomServerNIDsCache
|
RoomServerNIDsCache
|
||||||
RoomVersionCache
|
RoomVersionCache
|
||||||
RoomInfoCache
|
|
||||||
RoomServerEventsCache
|
RoomServerEventsCache
|
||||||
|
EventStateKeyCache
|
||||||
}
|
}
|
||||||
|
|
||||||
// RoomServerNIDsCache contains the subset of functions needed for
|
// RoomServerNIDsCache contains the subset of functions needed for
|
||||||
|
|
@ -19,9 +19,9 @@ type RoomServerNIDsCache interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Caches) GetRoomServerRoomID(roomNID types.RoomNID) (string, bool) {
|
func (c Caches) GetRoomServerRoomID(roomNID types.RoomNID) (string, bool) {
|
||||||
return c.RoomServerRoomIDs.Get(int64(roomNID))
|
return c.RoomServerRoomIDs.Get(roomNID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Caches) StoreRoomServerRoomID(roomNID types.RoomNID, roomID string) {
|
func (c Caches) StoreRoomServerRoomID(roomNID types.RoomNID, roomID string) {
|
||||||
c.RoomServerRoomIDs.Set(int64(roomNID), roomID)
|
c.RoomServerRoomIDs.Set(roomNID, roomID)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -23,16 +23,16 @@ import (
|
||||||
// different implementations as long as they satisfy the Cache
|
// different implementations as long as they satisfy the Cache
|
||||||
// interface.
|
// interface.
|
||||||
type Caches struct {
|
type Caches struct {
|
||||||
RoomVersions Cache[string, gomatrixserverlib.RoomVersion] // room ID -> room version
|
RoomVersions Cache[string, gomatrixserverlib.RoomVersion] // room ID -> room version
|
||||||
ServerKeys Cache[string, gomatrixserverlib.PublicKeyLookupResult] // server name -> server keys
|
ServerKeys Cache[string, gomatrixserverlib.PublicKeyLookupResult] // server name -> server keys
|
||||||
RoomServerRoomNIDs Cache[string, types.RoomNID] // room ID -> room NID
|
RoomServerRoomNIDs Cache[string, types.RoomNID] // room ID -> room NID
|
||||||
RoomServerRoomIDs Cache[int64, string] // room NID -> room ID
|
RoomServerRoomIDs Cache[types.RoomNID, string] // room NID -> room ID
|
||||||
RoomServerEvents Cache[int64, *gomatrixserverlib.Event] // event NID -> event
|
RoomServerEvents Cache[int64, *gomatrixserverlib.Event] // event NID -> event
|
||||||
RoomInfos Cache[string, *types.RoomInfo] // room ID -> room info
|
RoomServerStateKeys Cache[types.EventStateKeyNID, string] // event NID -> event state key
|
||||||
FederationPDUs Cache[int64, *gomatrixserverlib.HeaderedEvent] // queue NID -> PDU
|
FederationPDUs Cache[int64, *gomatrixserverlib.HeaderedEvent] // queue NID -> PDU
|
||||||
FederationEDUs Cache[int64, *gomatrixserverlib.EDU] // queue NID -> EDU
|
FederationEDUs Cache[int64, *gomatrixserverlib.EDU] // queue NID -> EDU
|
||||||
SpaceSummaryRooms Cache[string, gomatrixserverlib.MSC2946SpacesResponse] // room ID -> space response
|
SpaceSummaryRooms Cache[string, gomatrixserverlib.MSC2946SpacesResponse] // room ID -> space response
|
||||||
LazyLoading Cache[lazyLoadingCacheKey, string] // composite key -> event ID
|
LazyLoading Cache[lazyLoadingCacheKey, string] // composite key -> event ID
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cache is the interface that an implementation must satisfy.
|
// Cache is the interface that an implementation must satisfy.
|
||||||
|
|
@ -44,7 +44,7 @@ type Cache[K keyable, T any] interface {
|
||||||
|
|
||||||
type keyable interface {
|
type keyable interface {
|
||||||
// from https://github.com/dgraph-io/ristretto/blob/8e850b710d6df0383c375ec6a7beae4ce48fc8d5/z/z.go#L34
|
// from https://github.com/dgraph-io/ristretto/blob/8e850b710d6df0383c375ec6a7beae4ce48fc8d5/z/z.go#L34
|
||||||
uint64 | string | []byte | byte | int | int32 | uint32 | int64 | lazyLoadingCacheKey
|
~uint64 | ~string | []byte | byte | ~int | ~int32 | ~uint32 | ~int64 | lazyLoadingCacheKey
|
||||||
}
|
}
|
||||||
|
|
||||||
type costable interface {
|
type costable interface {
|
||||||
|
|
|
||||||
|
|
@ -35,18 +35,18 @@ const (
|
||||||
roomNIDsCache
|
roomNIDsCache
|
||||||
roomIDsCache
|
roomIDsCache
|
||||||
roomEventsCache
|
roomEventsCache
|
||||||
roomInfosCache
|
|
||||||
federationPDUsCache
|
federationPDUsCache
|
||||||
federationEDUsCache
|
federationEDUsCache
|
||||||
spaceSummaryRoomsCache
|
spaceSummaryRoomsCache
|
||||||
lazyLoadingCache
|
lazyLoadingCache
|
||||||
|
eventStateKeyCache
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewRistrettoCache(maxCost config.DataUnit, maxAge time.Duration, enablePrometheus bool) *Caches {
|
func NewRistrettoCache(maxCost config.DataUnit, maxAge time.Duration, enablePrometheus bool) *Caches {
|
||||||
cache, err := ristretto.NewCache(&ristretto.Config{
|
cache, err := ristretto.NewCache(&ristretto.Config{
|
||||||
NumCounters: 1e5, // 10x number of expected cache items, affects bloom filter size, gives us room for 10,000 currently
|
NumCounters: int64((maxCost / 1024) * 10), // 10 counters per 1KB data, affects bloom filter size
|
||||||
BufferItems: 64, // recommended by the ristretto godocs as a sane buffer size value
|
BufferItems: 64, // recommended by the ristretto godocs as a sane buffer size value
|
||||||
MaxCost: int64(maxCost),
|
MaxCost: int64(maxCost), // max cost is in bytes, as per the Dendrite config
|
||||||
Metrics: true,
|
Metrics: true,
|
||||||
KeyToHash: func(key interface{}) (uint64, uint64) {
|
KeyToHash: func(key interface{}) (uint64, uint64) {
|
||||||
return z.KeyToHash(key)
|
return z.KeyToHash(key)
|
||||||
|
|
@ -88,7 +88,7 @@ func NewRistrettoCache(maxCost config.DataUnit, maxAge time.Duration, enableProm
|
||||||
Prefix: roomNIDsCache,
|
Prefix: roomNIDsCache,
|
||||||
MaxAge: maxAge,
|
MaxAge: maxAge,
|
||||||
},
|
},
|
||||||
RoomServerRoomIDs: &RistrettoCachePartition[int64, string]{ // room NID -> room ID
|
RoomServerRoomIDs: &RistrettoCachePartition[types.RoomNID, string]{ // room NID -> room ID
|
||||||
cache: cache,
|
cache: cache,
|
||||||
Prefix: roomIDsCache,
|
Prefix: roomIDsCache,
|
||||||
MaxAge: maxAge,
|
MaxAge: maxAge,
|
||||||
|
|
@ -100,11 +100,10 @@ func NewRistrettoCache(maxCost config.DataUnit, maxAge time.Duration, enableProm
|
||||||
MaxAge: maxAge,
|
MaxAge: maxAge,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
RoomInfos: &RistrettoCachePartition[string, *types.RoomInfo]{ // room ID -> room info
|
RoomServerStateKeys: &RistrettoCachePartition[types.EventStateKeyNID, string]{ // event NID -> event state key
|
||||||
cache: cache,
|
cache: cache,
|
||||||
Prefix: roomInfosCache,
|
Prefix: eventStateKeyCache,
|
||||||
Mutable: true,
|
MaxAge: maxAge,
|
||||||
MaxAge: maxAge,
|
|
||||||
},
|
},
|
||||||
FederationPDUs: &RistrettoCostedCachePartition[int64, *gomatrixserverlib.HeaderedEvent]{ // queue NID -> PDU
|
FederationPDUs: &RistrettoCostedCachePartition[int64, *gomatrixserverlib.HeaderedEvent]{ // queue NID -> PDU
|
||||||
&RistrettoCachePartition[int64, *gomatrixserverlib.HeaderedEvent]{
|
&RistrettoCachePartition[int64, *gomatrixserverlib.HeaderedEvent]{
|
||||||
|
|
|
||||||
|
|
@ -27,9 +27,10 @@ import (
|
||||||
|
|
||||||
"github.com/matrix-org/util"
|
"github.com/matrix-org/util"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
|
||||||
"github.com/matrix-org/dugong"
|
"github.com/matrix-org/dugong"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
"github.com/matrix-org/dendrite/setup/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
type utcFormatter struct {
|
type utcFormatter struct {
|
||||||
|
|
@ -145,7 +146,7 @@ func setupFileHook(hook config.LogrusHook, level logrus.Level, componentName str
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
//CloseAndLogIfError Closes io.Closer and logs the error if any
|
// CloseAndLogIfError Closes io.Closer and logs the error if any
|
||||||
func CloseAndLogIfError(ctx context.Context, closer io.Closer, message string) {
|
func CloseAndLogIfError(ctx context.Context, closer io.Closer, message string) {
|
||||||
if closer == nil {
|
if closer == nil {
|
||||||
return
|
return
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,7 @@
|
||||||
package internal
|
package internal
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io/ioutil"
|
"io"
|
||||||
"log/syslog"
|
"log/syslog"
|
||||||
|
|
||||||
"github.com/MFAshby/stdemuxerhook"
|
"github.com/MFAshby/stdemuxerhook"
|
||||||
|
|
@ -63,7 +63,7 @@ func SetupHookLogging(hooks []config.LogrusHook, componentName string) {
|
||||||
setupStdLogHook(logrus.InfoLevel)
|
setupStdLogHook(logrus.InfoLevel)
|
||||||
}
|
}
|
||||||
// Hooks are now configured for stdout/err, so throw away the default logger output
|
// Hooks are now configured for stdout/err, so throw away the default logger output
|
||||||
logrus.SetOutput(ioutil.Discard)
|
logrus.SetOutput(io.Discard)
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkSyslogHookParams(params map[string]interface{}) {
|
func checkSyslogHookParams(params map[string]interface{}) {
|
||||||
|
|
|
||||||
|
|
@ -1,130 +1,142 @@
|
||||||
|
// Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
package sqlutil
|
package sqlutil
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"fmt"
|
"fmt"
|
||||||
"runtime"
|
"sync"
|
||||||
"sort"
|
"time"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
"github.com/matrix-org/dendrite/internal"
|
||||||
"github.com/pressly/goose"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Migrations struct {
|
const createDBMigrationsSQL = "" +
|
||||||
registeredGoMigrations map[int64]*goose.Migration
|
"CREATE TABLE IF NOT EXISTS db_migrations (" +
|
||||||
|
" version TEXT PRIMARY KEY NOT NULL," +
|
||||||
|
" time TEXT NOT NULL," +
|
||||||
|
" dendrite_version TEXT NOT NULL" +
|
||||||
|
");"
|
||||||
|
|
||||||
|
const insertVersionSQL = "" +
|
||||||
|
"INSERT INTO db_migrations (version, time, dendrite_version)" +
|
||||||
|
" VALUES ($1, $2, $3)"
|
||||||
|
|
||||||
|
const selectDBMigrationsSQL = "SELECT version FROM db_migrations"
|
||||||
|
|
||||||
|
// Migration defines a migration to be run.
|
||||||
|
type Migration struct {
|
||||||
|
// Version is a simple description/name of this migration.
|
||||||
|
Version string
|
||||||
|
// Up defines the function to execute for an upgrade.
|
||||||
|
Up func(ctx context.Context, txn *sql.Tx) error
|
||||||
|
// Down defines the function to execute for a downgrade (not implemented yet).
|
||||||
|
Down func(ctx context.Context, txn *sql.Tx) error
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewMigrations() *Migrations {
|
// Migrator
|
||||||
return &Migrations{
|
type Migrator struct {
|
||||||
registeredGoMigrations: make(map[int64]*goose.Migration),
|
db *sql.DB
|
||||||
|
migrations []Migration
|
||||||
|
knownMigrations map[string]struct{}
|
||||||
|
mutex *sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMigrator creates a new DB migrator.
|
||||||
|
func NewMigrator(db *sql.DB) *Migrator {
|
||||||
|
return &Migrator{
|
||||||
|
db: db,
|
||||||
|
migrations: []Migration{},
|
||||||
|
knownMigrations: make(map[string]struct{}),
|
||||||
|
mutex: &sync.Mutex{},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy-pasted from goose directly to store migrations into a map we control
|
// AddMigrations appends migrations to the list of migrations. Migrations are executed
|
||||||
|
// in the order they are added to the list. De-duplicates migrations using their Version field.
|
||||||
// AddMigration adds a migration.
|
func (m *Migrator) AddMigrations(migrations ...Migration) {
|
||||||
func (m *Migrations) AddMigration(up func(*sql.Tx) error, down func(*sql.Tx) error) {
|
m.mutex.Lock()
|
||||||
_, filename, _, _ := runtime.Caller(1)
|
defer m.mutex.Unlock()
|
||||||
m.AddNamedMigration(filename, up, down)
|
for _, mig := range migrations {
|
||||||
}
|
if _, ok := m.knownMigrations[mig.Version]; !ok {
|
||||||
|
m.migrations = append(m.migrations, mig)
|
||||||
// AddNamedMigration : Add a named migration.
|
m.knownMigrations[mig.Version] = struct{}{}
|
||||||
func (m *Migrations) AddNamedMigration(filename string, up func(*sql.Tx) error, down func(*sql.Tx) error) {
|
}
|
||||||
v, _ := goose.NumericComponent(filename)
|
|
||||||
migration := &goose.Migration{Version: v, Next: -1, Previous: -1, Registered: true, UpFn: up, DownFn: down, Source: filename}
|
|
||||||
|
|
||||||
if existing, ok := m.registeredGoMigrations[v]; ok {
|
|
||||||
panic(fmt.Sprintf("failed to add migration %q: version conflicts with %q", filename, existing.Source))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
m.registeredGoMigrations[v] = migration
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// RunDeltas up to the latest version.
|
// Up executes all migrations in order they were added.
|
||||||
func (m *Migrations) RunDeltas(db *sql.DB, props *config.DatabaseOptions) error {
|
func (m *Migrator) Up(ctx context.Context) error {
|
||||||
maxVer := goose.MaxVersion
|
var (
|
||||||
minVer := int64(0)
|
err error
|
||||||
migrations, err := m.collect(minVer, maxVer)
|
dendriteVersion = internal.VersionString()
|
||||||
|
)
|
||||||
|
// ensure there is a table for known migrations
|
||||||
|
executedMigrations, err := m.ExecutedMigrations(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("runDeltas: Failed to collect migrations: %w", err)
|
return fmt.Errorf("unable to create/get migrations: %w", err)
|
||||||
}
|
}
|
||||||
if props.ConnectionString.IsPostgres() {
|
|
||||||
if err = goose.SetDialect("postgres"); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else if props.ConnectionString.IsSQLite() {
|
|
||||||
if err = goose.SetDialect("sqlite3"); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return fmt.Errorf("unknown connection string: %s", props.ConnectionString)
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
current, err := goose.EnsureDBVersion(db)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("runDeltas: Failed to EnsureDBVersion: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
next, err := migrations.Next(current)
|
return WithTransaction(m.db, func(txn *sql.Tx) error {
|
||||||
if err != nil {
|
for i := range m.migrations {
|
||||||
if err == goose.ErrNoNextVersion {
|
now := time.Now().UTC().Format(time.RFC3339)
|
||||||
return nil
|
migration := m.migrations[i]
|
||||||
|
logrus.Debugf("Executing database migration '%s'", migration.Version)
|
||||||
|
// Skip migration if it was already executed
|
||||||
|
if _, ok := executedMigrations[migration.Version]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
err = migration.Up(ctx, txn)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to execute migration '%s': %w", migration.Version, err)
|
||||||
|
}
|
||||||
|
_, err = txn.ExecContext(ctx, insertVersionSQL,
|
||||||
|
migration.Version,
|
||||||
|
now,
|
||||||
|
dendriteVersion,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to insert executed migrations: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("runDeltas: Failed to load next migration to %+v : %w", next, err)
|
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
if err = next.Up(db); err != nil {
|
})
|
||||||
return fmt.Errorf("runDeltas: Failed run migration: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Migrations) collect(current, target int64) (goose.Migrations, error) {
|
// ExecutedMigrations returns a map with already executed migrations in addition to creating the
|
||||||
var migrations goose.Migrations
|
// migrations table, if it doesn't exist.
|
||||||
|
func (m *Migrator) ExecutedMigrations(ctx context.Context) (map[string]struct{}, error) {
|
||||||
// Go migrations registered via goose.AddMigration().
|
result := make(map[string]struct{})
|
||||||
for _, migration := range m.registeredGoMigrations {
|
_, err := m.db.ExecContext(ctx, createDBMigrationsSQL)
|
||||||
v, err := goose.NumericComponent(migration.Source)
|
if err != nil {
|
||||||
if err != nil {
|
return nil, fmt.Errorf("unable to create db_migrations: %w", err)
|
||||||
return nil, err
|
}
|
||||||
}
|
rows, err := m.db.QueryContext(ctx, selectDBMigrationsSQL)
|
||||||
if versionFilter(v, current, target) {
|
if err != nil {
|
||||||
migrations = append(migrations, migration)
|
return nil, fmt.Errorf("unable to query db_migrations: %w", err)
|
||||||
|
}
|
||||||
|
defer internal.CloseAndLogIfError(ctx, rows, "ExecutedMigrations: rows.close() failed")
|
||||||
|
var version string
|
||||||
|
for rows.Next() {
|
||||||
|
if err = rows.Scan(&version); err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to scan version: %w", err)
|
||||||
}
|
}
|
||||||
|
result[version] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
migrations = sortAndConnectMigrations(migrations)
|
return result, rows.Err()
|
||||||
|
|
||||||
return migrations, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func sortAndConnectMigrations(migrations goose.Migrations) goose.Migrations {
|
|
||||||
sort.Sort(migrations)
|
|
||||||
|
|
||||||
// now that we're sorted in the appropriate direction,
|
|
||||||
// populate next and previous for each migration
|
|
||||||
for i, m := range migrations {
|
|
||||||
prev := int64(-1)
|
|
||||||
if i > 0 {
|
|
||||||
prev = migrations[i-1].Version
|
|
||||||
migrations[i-1].Next = m.Version
|
|
||||||
}
|
|
||||||
migrations[i].Previous = prev
|
|
||||||
}
|
|
||||||
|
|
||||||
return migrations
|
|
||||||
}
|
|
||||||
|
|
||||||
func versionFilter(v, current, target int64) bool {
|
|
||||||
|
|
||||||
if target > current {
|
|
||||||
return v > current && v <= target
|
|
||||||
}
|
|
||||||
|
|
||||||
if target < current {
|
|
||||||
return v <= current && v > target
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
112
internal/sqlutil/migrate_test.go
Normal file
112
internal/sqlutil/migrate_test.go
Normal file
|
|
@ -0,0 +1,112 @@
|
||||||
|
package sqlutil_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||||
|
"github.com/matrix-org/dendrite/test"
|
||||||
|
_ "github.com/mattn/go-sqlite3"
|
||||||
|
)
|
||||||
|
|
||||||
|
var dummyMigrations = []sqlutil.Migration{
|
||||||
|
{
|
||||||
|
Version: "init",
|
||||||
|
Up: func(ctx context.Context, txn *sql.Tx) error {
|
||||||
|
_, err := txn.ExecContext(ctx, "CREATE TABLE IF NOT EXISTS dummy ( test TEXT );")
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Version: "v2",
|
||||||
|
Up: func(ctx context.Context, txn *sql.Tx) error {
|
||||||
|
_, err := txn.ExecContext(ctx, "ALTER TABLE dummy ADD COLUMN test2 TEXT;")
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Version: "v2", // duplicate, this migration will be skipped
|
||||||
|
Up: func(ctx context.Context, txn *sql.Tx) error {
|
||||||
|
_, err := txn.ExecContext(ctx, "ALTER TABLE dummy ADD COLUMN test2 TEXT;")
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Version: "multiple execs",
|
||||||
|
Up: func(ctx context.Context, txn *sql.Tx) error {
|
||||||
|
_, err := txn.ExecContext(ctx, "ALTER TABLE dummy ADD COLUMN test3 TEXT;")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = txn.ExecContext(ctx, "ALTER TABLE dummy ADD COLUMN test4 TEXT;")
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var failMigration = sqlutil.Migration{
|
||||||
|
Version: "iFail",
|
||||||
|
Up: func(ctx context.Context, txn *sql.Tx) error {
|
||||||
|
return fmt.Errorf("iFail")
|
||||||
|
},
|
||||||
|
Down: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_migrations_Up(t *testing.T) {
|
||||||
|
withFail := append(dummyMigrations, failMigration)
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
migrations []sqlutil.Migration
|
||||||
|
wantResult map[string]struct{}
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "dummy migration",
|
||||||
|
migrations: dummyMigrations,
|
||||||
|
wantResult: map[string]struct{}{
|
||||||
|
"init": {},
|
||||||
|
"v2": {},
|
||||||
|
"multiple execs": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with fail",
|
||||||
|
migrations: withFail,
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
test.WithAllDatabases(t, func(t *testing.T, dbType test.DBType) {
|
||||||
|
conStr, close := test.PrepareDBConnectionString(t, dbType)
|
||||||
|
defer close()
|
||||||
|
driverName := "sqlite3"
|
||||||
|
if dbType == test.DBTypePostgres {
|
||||||
|
driverName = "postgres"
|
||||||
|
}
|
||||||
|
db, err := sql.Open(driverName, conStr)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("unable to open database: %v", err)
|
||||||
|
}
|
||||||
|
m := sqlutil.NewMigrator(db)
|
||||||
|
m.AddMigrations(tt.migrations...)
|
||||||
|
if err = m.Up(ctx); (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("Up() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
}
|
||||||
|
result, err := m.ExecutedMigrations(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("unable to get executed migrations: %v", err)
|
||||||
|
}
|
||||||
|
if !tt.wantErr && !reflect.DeepEqual(result, tt.wantResult) {
|
||||||
|
t.Errorf("expected: %+v, got %v", tt.wantResult, result)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -16,8 +16,8 @@ var build string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
VersionMajor = 0
|
VersionMajor = 0
|
||||||
VersionMinor = 8
|
VersionMinor = 9
|
||||||
VersionPatch = 9
|
VersionPatch = 1
|
||||||
VersionTag = "" // example: "rc1"
|
VersionTag = "" // example: "rc1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -22,12 +22,13 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
fedsenderapi "github.com/matrix-org/dendrite/federationapi/api"
|
|
||||||
"github.com/matrix-org/dendrite/keyserver/api"
|
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
"github.com/matrix-org/util"
|
"github.com/matrix-org/util"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
fedsenderapi "github.com/matrix-org/dendrite/federationapi/api"
|
||||||
|
"github.com/matrix-org/dendrite/keyserver/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|
@ -66,12 +67,14 @@ func init() {
|
||||||
// - We don't have unbounded growth in proportion to the number of servers (this is more important in a P2P world where
|
// - We don't have unbounded growth in proportion to the number of servers (this is more important in a P2P world where
|
||||||
// we have many many servers)
|
// we have many many servers)
|
||||||
// - We can adjust concurrency (at the cost of memory usage) by tuning N, to accommodate mobile devices vs servers.
|
// - We can adjust concurrency (at the cost of memory usage) by tuning N, to accommodate mobile devices vs servers.
|
||||||
|
//
|
||||||
// The downsides are that:
|
// The downsides are that:
|
||||||
// - Query requests can get queued behind other servers if they hash to the same worker, even if there are other free
|
// - Query requests can get queued behind other servers if they hash to the same worker, even if there are other free
|
||||||
// workers elsewhere. Whilst suboptimal, provided we cap how long a single request can last (e.g using context timeouts)
|
// workers elsewhere. Whilst suboptimal, provided we cap how long a single request can last (e.g using context timeouts)
|
||||||
// we guarantee we will get around to it. Also, more users on a given server does not increase the number of requests
|
// we guarantee we will get around to it. Also, more users on a given server does not increase the number of requests
|
||||||
// (as /keys/query allows multiple users to be specified) so being stuck behind matrix.org won't materially be any worse
|
// (as /keys/query allows multiple users to be specified) so being stuck behind matrix.org won't materially be any worse
|
||||||
// than being stuck behind foo.bar
|
// than being stuck behind foo.bar
|
||||||
|
//
|
||||||
// In the event that the query fails, a lock is acquired and the server name along with the time to wait before retrying is
|
// In the event that the query fails, a lock is acquired and the server name along with the time to wait before retrying is
|
||||||
// set in a map. A restarter goroutine periodically probes this map and injects servers which are ready to be retried.
|
// set in a map. A restarter goroutine periodically probes this map and injects servers which are ready to be retried.
|
||||||
type DeviceListUpdater struct {
|
type DeviceListUpdater struct {
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/ed25519"
|
"crypto/ed25519"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
|
@ -27,8 +27,9 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/keyserver/api"
|
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
|
||||||
|
"github.com/matrix-org/dendrite/keyserver/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|
@ -202,7 +203,7 @@ func TestUpdateNoPrevID(t *testing.T) {
|
||||||
}
|
}
|
||||||
return &http.Response{
|
return &http.Response{
|
||||||
StatusCode: 200,
|
StatusCode: 200,
|
||||||
Body: ioutil.NopCloser(strings.NewReader(`
|
Body: io.NopCloser(strings.NewReader(`
|
||||||
{
|
{
|
||||||
"user_id": "` + remoteUserID + `",
|
"user_id": "` + remoteUserID + `",
|
||||||
"stream_id": 5,
|
"stream_id": 5,
|
||||||
|
|
@ -317,7 +318,7 @@ func TestDebounce(t *testing.T) {
|
||||||
// now send the response over federation
|
// now send the response over federation
|
||||||
fedCh <- &http.Response{
|
fedCh <- &http.Response{
|
||||||
StatusCode: 200,
|
StatusCode: 200,
|
||||||
Body: ioutil.NopCloser(strings.NewReader(`
|
Body: io.NopCloser(strings.NewReader(`
|
||||||
{
|
{
|
||||||
"user_id": "` + userID + `",
|
"user_id": "` + userID + `",
|
||||||
"stream_id": 5,
|
"stream_id": 5,
|
||||||
|
|
|
||||||
|
|
@ -18,6 +18,7 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
@ -314,6 +315,11 @@ func (a *KeyInternalAPI) QueryKeys(ctx context.Context, req *api.QueryKeysReques
|
||||||
for targetKeyID := range masterKey.Keys {
|
for targetKeyID := range masterKey.Keys {
|
||||||
sigMap, err := a.DB.CrossSigningSigsForTarget(ctx, req.UserID, targetUserID, targetKeyID)
|
sigMap, err := a.DB.CrossSigningSigsForTarget(ctx, req.UserID, targetUserID, targetKeyID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// Stop executing the function if the context was canceled/the deadline was exceeded,
|
||||||
|
// as we can't continue without a valid context.
|
||||||
|
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
||||||
|
return
|
||||||
|
}
|
||||||
logrus.WithError(err).Errorf("a.DB.CrossSigningSigsForTarget failed")
|
logrus.WithError(err).Errorf("a.DB.CrossSigningSigsForTarget failed")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
@ -335,6 +341,11 @@ func (a *KeyInternalAPI) QueryKeys(ctx context.Context, req *api.QueryKeysReques
|
||||||
for targetKeyID, key := range forUserID {
|
for targetKeyID, key := range forUserID {
|
||||||
sigMap, err := a.DB.CrossSigningSigsForTarget(ctx, req.UserID, targetUserID, gomatrixserverlib.KeyID(targetKeyID))
|
sigMap, err := a.DB.CrossSigningSigsForTarget(ctx, req.UserID, targetUserID, gomatrixserverlib.KeyID(targetKeyID))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// Stop executing the function if the context was canceled/the deadline was exceeded,
|
||||||
|
// as we can't continue without a valid context.
|
||||||
|
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
||||||
|
return
|
||||||
|
}
|
||||||
logrus.WithError(err).Errorf("a.DB.CrossSigningSigsForTarget failed")
|
logrus.WithError(err).Errorf("a.DB.CrossSigningSigsForTarget failed")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -21,6 +21,7 @@ import (
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/internal"
|
"github.com/matrix-org/dendrite/internal"
|
||||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||||
|
"github.com/matrix-org/dendrite/keyserver/storage/postgres/deltas"
|
||||||
"github.com/matrix-org/dendrite/keyserver/storage/tables"
|
"github.com/matrix-org/dendrite/keyserver/storage/tables"
|
||||||
"github.com/matrix-org/dendrite/keyserver/types"
|
"github.com/matrix-org/dendrite/keyserver/types"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
|
@ -66,6 +67,16 @@ func NewPostgresCrossSigningSigsTable(db *sql.DB) (tables.CrossSigningSigs, erro
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
m := sqlutil.NewMigrator(db)
|
||||||
|
m.AddMigrations(sqlutil.Migration{
|
||||||
|
Version: "keyserver: cross signing signature indexes",
|
||||||
|
Up: deltas.UpFixCrossSigningSignatureIndexes,
|
||||||
|
})
|
||||||
|
if err = m.Up(context.Background()); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return s, sqlutil.StatementList{
|
return s, sqlutil.StatementList{
|
||||||
{&s.selectCrossSigningSigsForTargetStmt, selectCrossSigningSigsForTargetSQL},
|
{&s.selectCrossSigningSigsForTargetStmt, selectCrossSigningSigsForTargetSQL},
|
||||||
{&s.upsertCrossSigningSigsForTargetStmt, upsertCrossSigningSigsForTargetSQL},
|
{&s.upsertCrossSigningSigsForTargetStmt, upsertCrossSigningSigsForTargetSQL},
|
||||||
|
|
|
||||||
|
|
@ -15,37 +15,27 @@
|
||||||
package deltas
|
package deltas
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
|
||||||
"github.com/pressly/goose"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func LoadFromGoose() {
|
func UpRefactorKeyChanges(ctx context.Context, tx *sql.Tx) error {
|
||||||
goose.AddMigration(UpRefactorKeyChanges, DownRefactorKeyChanges)
|
|
||||||
}
|
|
||||||
|
|
||||||
func LoadRefactorKeyChanges(m *sqlutil.Migrations) {
|
|
||||||
m.AddMigration(UpRefactorKeyChanges, DownRefactorKeyChanges)
|
|
||||||
}
|
|
||||||
|
|
||||||
func UpRefactorKeyChanges(tx *sql.Tx) error {
|
|
||||||
// start counting from the last max offset, else 0. We need to do a count(*) first to see if there
|
// start counting from the last max offset, else 0. We need to do a count(*) first to see if there
|
||||||
// even are entries in this table to know if we can query for log_offset. Without the count then
|
// even are entries in this table to know if we can query for log_offset. Without the count then
|
||||||
// the query to SELECT the max log offset fails on new Dendrite instances as log_offset doesn't
|
// the query to SELECT the max log offset fails on new Dendrite instances as log_offset doesn't
|
||||||
// exist on that table. Even though we discard the error, the txn is tainted and gets aborted :/
|
// exist on that table. Even though we discard the error, the txn is tainted and gets aborted :/
|
||||||
var count int
|
var count int
|
||||||
_ = tx.QueryRow(`SELECT count(*) FROM keyserver_key_changes`).Scan(&count)
|
_ = tx.QueryRowContext(ctx, `SELECT count(*) FROM keyserver_key_changes`).Scan(&count)
|
||||||
if count > 0 {
|
if count > 0 {
|
||||||
var maxOffset int64
|
var maxOffset int64
|
||||||
_ = tx.QueryRow(`SELECT coalesce(MAX(log_offset), 0) AS offset FROM keyserver_key_changes`).Scan(&maxOffset)
|
_ = tx.QueryRowContext(ctx, `SELECT coalesce(MAX(log_offset), 0) AS offset FROM keyserver_key_changes`).Scan(&maxOffset)
|
||||||
if _, err := tx.Exec(fmt.Sprintf(`CREATE SEQUENCE IF NOT EXISTS keyserver_key_changes_seq START %d`, maxOffset)); err != nil {
|
if _, err := tx.ExecContext(ctx, fmt.Sprintf(`CREATE SEQUENCE IF NOT EXISTS keyserver_key_changes_seq START %d`, maxOffset)); err != nil {
|
||||||
return fmt.Errorf("failed to CREATE SEQUENCE for key changes, starting at %d: %s", maxOffset, err)
|
return fmt.Errorf("failed to CREATE SEQUENCE for key changes, starting at %d: %s", maxOffset, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := tx.Exec(`
|
_, err := tx.ExecContext(ctx, `
|
||||||
-- make the new table
|
-- make the new table
|
||||||
DROP TABLE IF EXISTS keyserver_key_changes;
|
DROP TABLE IF EXISTS keyserver_key_changes;
|
||||||
CREATE TABLE IF NOT EXISTS keyserver_key_changes (
|
CREATE TABLE IF NOT EXISTS keyserver_key_changes (
|
||||||
|
|
@ -60,8 +50,8 @@ func UpRefactorKeyChanges(tx *sql.Tx) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func DownRefactorKeyChanges(tx *sql.Tx) error {
|
func DownRefactorKeyChanges(ctx context.Context, tx *sql.Tx) error {
|
||||||
_, err := tx.Exec(`
|
_, err := tx.ExecContext(ctx, `
|
||||||
-- Drop all data and revert back, we can't keep the data as Kafka offsets determine the numbers
|
-- Drop all data and revert back, we can't keep the data as Kafka offsets determine the numbers
|
||||||
DROP SEQUENCE IF EXISTS keyserver_key_changes_seq;
|
DROP SEQUENCE IF EXISTS keyserver_key_changes_seq;
|
||||||
DROP TABLE IF EXISTS keyserver_key_changes;
|
DROP TABLE IF EXISTS keyserver_key_changes;
|
||||||
|
|
|
||||||
|
|
@ -15,18 +15,13 @@
|
||||||
package deltas
|
package deltas
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func LoadFixCrossSigningSignatureIndexes(m *sqlutil.Migrations) {
|
func UpFixCrossSigningSignatureIndexes(ctx context.Context, tx *sql.Tx) error {
|
||||||
m.AddMigration(UpFixCrossSigningSignatureIndexes, DownFixCrossSigningSignatureIndexes)
|
_, err := tx.ExecContext(ctx, `
|
||||||
}
|
|
||||||
|
|
||||||
func UpFixCrossSigningSignatureIndexes(tx *sql.Tx) error {
|
|
||||||
_, err := tx.Exec(`
|
|
||||||
ALTER TABLE keyserver_cross_signing_sigs DROP CONSTRAINT keyserver_cross_signing_sigs_pkey;
|
ALTER TABLE keyserver_cross_signing_sigs DROP CONSTRAINT keyserver_cross_signing_sigs_pkey;
|
||||||
ALTER TABLE keyserver_cross_signing_sigs ADD PRIMARY KEY (origin_user_id, origin_key_id, target_user_id, target_key_id);
|
ALTER TABLE keyserver_cross_signing_sigs ADD PRIMARY KEY (origin_user_id, origin_key_id, target_user_id, target_key_id);
|
||||||
|
|
||||||
|
|
@ -38,8 +33,8 @@ func UpFixCrossSigningSignatureIndexes(tx *sql.Tx) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func DownFixCrossSigningSignatureIndexes(tx *sql.Tx) error {
|
func DownFixCrossSigningSignatureIndexes(ctx context.Context, tx *sql.Tx) error {
|
||||||
_, err := tx.Exec(`
|
_, err := tx.ExecContext(ctx, `
|
||||||
ALTER TABLE keyserver_cross_signing_sigs DROP CONSTRAINT keyserver_cross_signing_sigs_pkey;
|
ALTER TABLE keyserver_cross_signing_sigs DROP CONSTRAINT keyserver_cross_signing_sigs_pkey;
|
||||||
ALTER TABLE keyserver_cross_signing_sigs ADD PRIMARY KEY (origin_user_id, target_user_id, target_key_id);
|
ALTER TABLE keyserver_cross_signing_sigs ADD PRIMARY KEY (origin_user_id, target_user_id, target_key_id);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,11 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"database/sql"
|
"database/sql"
|
||||||
|
|
||||||
|
"github.com/lib/pq"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/internal"
|
"github.com/matrix-org/dendrite/internal"
|
||||||
|
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||||
|
"github.com/matrix-org/dendrite/keyserver/storage/postgres/deltas"
|
||||||
"github.com/matrix-org/dendrite/keyserver/storage/tables"
|
"github.com/matrix-org/dendrite/keyserver/storage/tables"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -55,7 +59,34 @@ func NewPostgresKeyChangesTable(db *sql.DB) (tables.KeyChanges, error) {
|
||||||
db: db,
|
db: db,
|
||||||
}
|
}
|
||||||
_, err := db.Exec(keyChangesSchema)
|
_, err := db.Exec(keyChangesSchema)
|
||||||
return s, err
|
if err != nil {
|
||||||
|
return s, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Remove when we are sure we are not having goose artefacts in the db
|
||||||
|
// This forces an error, which indicates the migration is already applied, since the
|
||||||
|
// column partition was removed from the table
|
||||||
|
var count int
|
||||||
|
err = db.QueryRow("SELECT partition FROM keyserver_key_changes LIMIT 1;").Scan(&count)
|
||||||
|
if err == nil {
|
||||||
|
m := sqlutil.NewMigrator(db)
|
||||||
|
m.AddMigrations(sqlutil.Migration{
|
||||||
|
Version: "keyserver: refactor key changes",
|
||||||
|
Up: deltas.UpRefactorKeyChanges,
|
||||||
|
})
|
||||||
|
return s, m.Up(context.Background())
|
||||||
|
} else {
|
||||||
|
switch e := err.(type) {
|
||||||
|
case *pq.Error:
|
||||||
|
// ignore undefined_column (42703) errors, as this is expected at this point
|
||||||
|
if e.Code != "42703" {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *keyChangesStatements) Prepare() (err error) {
|
func (s *keyChangesStatements) Prepare() (err error) {
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,6 @@ package postgres
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||||
"github.com/matrix-org/dendrite/keyserver/storage/postgres/deltas"
|
|
||||||
"github.com/matrix-org/dendrite/keyserver/storage/shared"
|
"github.com/matrix-org/dendrite/keyserver/storage/shared"
|
||||||
"github.com/matrix-org/dendrite/setup/base"
|
"github.com/matrix-org/dendrite/setup/base"
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
"github.com/matrix-org/dendrite/setup/config"
|
||||||
|
|
@ -53,12 +52,6 @@ func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
m := sqlutil.NewMigrations()
|
|
||||||
deltas.LoadRefactorKeyChanges(m)
|
|
||||||
deltas.LoadFixCrossSigningSignatureIndexes(m)
|
|
||||||
if err = m.RunDeltas(db, dbProperties); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err = kc.Prepare(); err != nil {
|
if err = kc.Prepare(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -21,6 +21,7 @@ import (
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/internal"
|
"github.com/matrix-org/dendrite/internal"
|
||||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||||
|
"github.com/matrix-org/dendrite/keyserver/storage/sqlite3/deltas"
|
||||||
"github.com/matrix-org/dendrite/keyserver/storage/tables"
|
"github.com/matrix-org/dendrite/keyserver/storage/tables"
|
||||||
"github.com/matrix-org/dendrite/keyserver/types"
|
"github.com/matrix-org/dendrite/keyserver/types"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
|
@ -65,6 +66,15 @@ func NewSqliteCrossSigningSigsTable(db *sql.DB) (tables.CrossSigningSigs, error)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
m := sqlutil.NewMigrator(db)
|
||||||
|
m.AddMigrations(sqlutil.Migration{
|
||||||
|
Version: "keyserver: cross signing signature indexes",
|
||||||
|
Up: deltas.UpFixCrossSigningSignatureIndexes,
|
||||||
|
})
|
||||||
|
if err = m.Up(context.Background()); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return s, sqlutil.StatementList{
|
return s, sqlutil.StatementList{
|
||||||
{&s.selectCrossSigningSigsForTargetStmt, selectCrossSigningSigsForTargetSQL},
|
{&s.selectCrossSigningSigsForTargetStmt, selectCrossSigningSigsForTargetSQL},
|
||||||
{&s.upsertCrossSigningSigsForTargetStmt, upsertCrossSigningSigsForTargetSQL},
|
{&s.upsertCrossSigningSigsForTargetStmt, upsertCrossSigningSigsForTargetSQL},
|
||||||
|
|
|
||||||
|
|
@ -15,28 +15,18 @@
|
||||||
package deltas
|
package deltas
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
|
||||||
"github.com/pressly/goose"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func LoadFromGoose() {
|
func UpRefactorKeyChanges(ctx context.Context, tx *sql.Tx) error {
|
||||||
goose.AddMigration(UpRefactorKeyChanges, DownRefactorKeyChanges)
|
|
||||||
}
|
|
||||||
|
|
||||||
func LoadRefactorKeyChanges(m *sqlutil.Migrations) {
|
|
||||||
m.AddMigration(UpRefactorKeyChanges, DownRefactorKeyChanges)
|
|
||||||
}
|
|
||||||
|
|
||||||
func UpRefactorKeyChanges(tx *sql.Tx) error {
|
|
||||||
// start counting from the last max offset, else 0.
|
// start counting from the last max offset, else 0.
|
||||||
var maxOffset int64
|
var maxOffset int64
|
||||||
var userID string
|
var userID string
|
||||||
_ = tx.QueryRow(`SELECT user_id, MAX(log_offset) FROM keyserver_key_changes GROUP BY user_id`).Scan(&userID, &maxOffset)
|
_ = tx.QueryRowContext(ctx, `SELECT user_id, MAX(log_offset) FROM keyserver_key_changes GROUP BY user_id`).Scan(&userID, &maxOffset)
|
||||||
|
|
||||||
_, err := tx.Exec(`
|
_, err := tx.ExecContext(ctx, `
|
||||||
-- make the new table
|
-- make the new table
|
||||||
DROP TABLE IF EXISTS keyserver_key_changes;
|
DROP TABLE IF EXISTS keyserver_key_changes;
|
||||||
CREATE TABLE IF NOT EXISTS keyserver_key_changes (
|
CREATE TABLE IF NOT EXISTS keyserver_key_changes (
|
||||||
|
|
@ -51,14 +41,14 @@ func UpRefactorKeyChanges(tx *sql.Tx) error {
|
||||||
}
|
}
|
||||||
// to start counting from maxOffset, insert a row with that value
|
// to start counting from maxOffset, insert a row with that value
|
||||||
if userID != "" {
|
if userID != "" {
|
||||||
_, err = tx.Exec(`INSERT INTO keyserver_key_changes(change_id, user_id) VALUES($1, $2)`, maxOffset, userID)
|
_, err = tx.ExecContext(ctx, `INSERT INTO keyserver_key_changes(change_id, user_id) VALUES($1, $2)`, maxOffset, userID)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func DownRefactorKeyChanges(tx *sql.Tx) error {
|
func DownRefactorKeyChanges(ctx context.Context, tx *sql.Tx) error {
|
||||||
_, err := tx.Exec(`
|
_, err := tx.ExecContext(ctx, `
|
||||||
-- Drop all data and revert back, we can't keep the data as Kafka offsets determine the numbers
|
-- Drop all data and revert back, we can't keep the data as Kafka offsets determine the numbers
|
||||||
DROP TABLE IF EXISTS keyserver_key_changes;
|
DROP TABLE IF EXISTS keyserver_key_changes;
|
||||||
CREATE TABLE IF NOT EXISTS keyserver_key_changes (
|
CREATE TABLE IF NOT EXISTS keyserver_key_changes (
|
||||||
|
|
|
||||||
|
|
@ -15,18 +15,13 @@
|
||||||
package deltas
|
package deltas
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func LoadFixCrossSigningSignatureIndexes(m *sqlutil.Migrations) {
|
func UpFixCrossSigningSignatureIndexes(ctx context.Context, tx *sql.Tx) error {
|
||||||
m.AddMigration(UpFixCrossSigningSignatureIndexes, DownFixCrossSigningSignatureIndexes)
|
_, err := tx.ExecContext(ctx, `
|
||||||
}
|
|
||||||
|
|
||||||
func UpFixCrossSigningSignatureIndexes(tx *sql.Tx) error {
|
|
||||||
_, err := tx.Exec(`
|
|
||||||
CREATE TABLE IF NOT EXISTS keyserver_cross_signing_sigs_tmp (
|
CREATE TABLE IF NOT EXISTS keyserver_cross_signing_sigs_tmp (
|
||||||
origin_user_id TEXT NOT NULL,
|
origin_user_id TEXT NOT NULL,
|
||||||
origin_key_id TEXT NOT NULL,
|
origin_key_id TEXT NOT NULL,
|
||||||
|
|
@ -50,8 +45,8 @@ func UpFixCrossSigningSignatureIndexes(tx *sql.Tx) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func DownFixCrossSigningSignatureIndexes(tx *sql.Tx) error {
|
func DownFixCrossSigningSignatureIndexes(ctx context.Context, tx *sql.Tx) error {
|
||||||
_, err := tx.Exec(`
|
_, err := tx.ExecContext(ctx, `
|
||||||
CREATE TABLE IF NOT EXISTS keyserver_cross_signing_sigs_tmp (
|
CREATE TABLE IF NOT EXISTS keyserver_cross_signing_sigs_tmp (
|
||||||
origin_user_id TEXT NOT NULL,
|
origin_user_id TEXT NOT NULL,
|
||||||
origin_key_id TEXT NOT NULL,
|
origin_key_id TEXT NOT NULL,
|
||||||
|
|
|
||||||
|
|
@ -19,6 +19,8 @@ import (
|
||||||
"database/sql"
|
"database/sql"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/internal"
|
"github.com/matrix-org/dendrite/internal"
|
||||||
|
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||||
|
"github.com/matrix-org/dendrite/keyserver/storage/sqlite3/deltas"
|
||||||
"github.com/matrix-org/dendrite/keyserver/storage/tables"
|
"github.com/matrix-org/dendrite/keyserver/storage/tables"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -53,7 +55,24 @@ func NewSqliteKeyChangesTable(db *sql.DB) (tables.KeyChanges, error) {
|
||||||
db: db,
|
db: db,
|
||||||
}
|
}
|
||||||
_, err := db.Exec(keyChangesSchema)
|
_, err := db.Exec(keyChangesSchema)
|
||||||
return s, err
|
if err != nil {
|
||||||
|
return s, err
|
||||||
|
}
|
||||||
|
// TODO: Remove when we are sure we are not having goose artefacts in the db
|
||||||
|
// This forces an error, which indicates the migration is already applied, since the
|
||||||
|
// column partition was removed from the table
|
||||||
|
var count int
|
||||||
|
err = db.QueryRow("SELECT partition FROM keyserver_key_changes LIMIT 1;").Scan(&count)
|
||||||
|
if err == nil {
|
||||||
|
m := sqlutil.NewMigrator(db)
|
||||||
|
m.AddMigrations(sqlutil.Migration{
|
||||||
|
Version: "keyserver: refactor key changes",
|
||||||
|
Up: deltas.UpRefactorKeyChanges,
|
||||||
|
})
|
||||||
|
return s, m.Up(context.Background())
|
||||||
|
}
|
||||||
|
|
||||||
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *keyChangesStatements) Prepare() (err error) {
|
func (s *keyChangesStatements) Prepare() (err error) {
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,6 @@ package sqlite3
|
||||||
import (
|
import (
|
||||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||||
"github.com/matrix-org/dendrite/keyserver/storage/shared"
|
"github.com/matrix-org/dendrite/keyserver/storage/shared"
|
||||||
"github.com/matrix-org/dendrite/keyserver/storage/sqlite3/deltas"
|
|
||||||
"github.com/matrix-org/dendrite/setup/base"
|
"github.com/matrix-org/dendrite/setup/base"
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
"github.com/matrix-org/dendrite/setup/config"
|
||||||
)
|
)
|
||||||
|
|
@ -52,12 +51,6 @@ func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
m := sqlutil.NewMigrations()
|
|
||||||
deltas.LoadRefactorKeyChanges(m)
|
|
||||||
deltas.LoadFixCrossSigningSignatureIndexes(m)
|
|
||||||
if err = m.RunDeltas(db, dbProperties); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err = kc.Prepare(); err != nil {
|
if err = kc.Prepare(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@ package storage_test
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/keyserver/api"
|
"github.com/matrix-org/dendrite/keyserver/api"
|
||||||
|
|
@ -103,6 +104,9 @@ func TestKeyChangesUpperLimit(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var dbLock sync.Mutex
|
||||||
|
var deviceArray = []string{"AAA", "another_device"}
|
||||||
|
|
||||||
// The purpose of this test is to make sure that the storage layer is generating sequential stream IDs per user,
|
// The purpose of this test is to make sure that the storage layer is generating sequential stream IDs per user,
|
||||||
// and that they are returned correctly when querying for device keys.
|
// and that they are returned correctly when querying for device keys.
|
||||||
func TestDeviceKeysStreamIDGeneration(t *testing.T) {
|
func TestDeviceKeysStreamIDGeneration(t *testing.T) {
|
||||||
|
|
@ -169,8 +173,11 @@ func TestDeviceKeysStreamIDGeneration(t *testing.T) {
|
||||||
t.Fatalf("Expected StoreLocalDeviceKeys to set StreamID=3 (new key same device) but got %d", msgs[0].StreamID)
|
t.Fatalf("Expected StoreLocalDeviceKeys to set StreamID=3 (new key same device) but got %d", msgs[0].StreamID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dbLock.Lock()
|
||||||
|
defer dbLock.Unlock()
|
||||||
// Querying for device keys returns the latest stream IDs
|
// Querying for device keys returns the latest stream IDs
|
||||||
msgs, err = db.DeviceKeysForUser(ctx, alice, []string{"AAA", "another_device"}, false)
|
msgs, err = db.DeviceKeysForUser(ctx, alice, deviceArray, false)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("DeviceKeysForUser returned error: %s", err)
|
t.Fatalf("DeviceKeysForUser returned error: %s", err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -21,7 +21,6 @@ import (
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
@ -180,7 +179,7 @@ func createTempDir(baseDirectory config.Path) (types.Path, error) {
|
||||||
if err := os.MkdirAll(baseTmpDir, 0770); err != nil {
|
if err := os.MkdirAll(baseTmpDir, 0770); err != nil {
|
||||||
return "", fmt.Errorf("failed to create base temp dir: %w", err)
|
return "", fmt.Errorf("failed to create base temp dir: %w", err)
|
||||||
}
|
}
|
||||||
tmpDir, err := ioutil.TempDir(baseTmpDir, "")
|
tmpDir, err := os.MkdirTemp(baseTmpDir, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("failed to create temp dir: %w", err)
|
return "", fmt.Errorf("failed to create temp dir: %w", err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -19,7 +19,6 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"mime"
|
"mime"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
@ -695,7 +694,7 @@ func (r *downloadRequest) GetContentLengthAndReader(contentLengthHeader string,
|
||||||
|
|
||||||
// We successfully parsed the Content-Length, so we'll return a limited
|
// We successfully parsed the Content-Length, so we'll return a limited
|
||||||
// reader that restricts us to reading only up to this size.
|
// reader that restricts us to reading only up to this size.
|
||||||
reader = ioutil.NopCloser(io.LimitReader(*body, parsedLength))
|
reader = io.NopCloser(io.LimitReader(*body, parsedLength))
|
||||||
contentLength = parsedLength
|
contentLength = parsedLength
|
||||||
} else {
|
} else {
|
||||||
// Content-Length header is missing. If we have a maximum file size
|
// Content-Length header is missing. If we have a maximum file size
|
||||||
|
|
@ -704,7 +703,7 @@ func (r *downloadRequest) GetContentLengthAndReader(contentLengthHeader string,
|
||||||
// ultimately it will get rewritten later when the temp file is written
|
// ultimately it will get rewritten later when the temp file is written
|
||||||
// to disk.
|
// to disk.
|
||||||
if maxFileSizeBytes > 0 {
|
if maxFileSizeBytes > 0 {
|
||||||
reader = ioutil.NopCloser(io.LimitReader(*body, int64(maxFileSizeBytes)))
|
reader = io.NopCloser(io.LimitReader(*body, int64(maxFileSizeBytes)))
|
||||||
}
|
}
|
||||||
contentLength = 0
|
contentLength = 0
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -149,6 +149,9 @@ func makeDownloadAPI(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Cache media for at least one day.
|
||||||
|
w.Header().Set("Cache-Control", "public,max-age=86400,s-maxage=86400")
|
||||||
|
|
||||||
Download(
|
Download(
|
||||||
w,
|
w,
|
||||||
req,
|
req,
|
||||||
|
|
|
||||||
|
|
@ -50,14 +50,14 @@ func CheckForSoftFail(
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("db.RoomNID: %w", err)
|
return false, fmt.Errorf("db.RoomNID: %w", err)
|
||||||
}
|
}
|
||||||
if roomInfo == nil || roomInfo.IsStub {
|
if roomInfo == nil || roomInfo.IsStub() {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Then get the state entries for the current state snapshot.
|
// Then get the state entries for the current state snapshot.
|
||||||
// We'll use this to check if the event is allowed right now.
|
// We'll use this to check if the event is allowed right now.
|
||||||
roomState := state.NewStateResolution(db, roomInfo)
|
roomState := state.NewStateResolution(db, roomInfo)
|
||||||
authStateEntries, err = roomState.LoadStateAtSnapshot(ctx, roomInfo.StateSnapshotNID)
|
authStateEntries, err = roomState.LoadStateAtSnapshot(ctx, roomInfo.StateSnapshotNID())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return true, fmt.Errorf("roomState.LoadStateAtSnapshot: %w", err)
|
return true, fmt.Errorf("roomState.LoadStateAtSnapshot: %w", err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -12,6 +12,7 @@ import (
|
||||||
"github.com/matrix-org/dendrite/roomserver/state"
|
"github.com/matrix-org/dendrite/roomserver/state"
|
||||||
"github.com/matrix-org/dendrite/roomserver/storage"
|
"github.com/matrix-org/dendrite/roomserver/storage"
|
||||||
"github.com/matrix-org/dendrite/roomserver/storage/shared"
|
"github.com/matrix-org/dendrite/roomserver/storage/shared"
|
||||||
|
"github.com/matrix-org/dendrite/roomserver/storage/tables"
|
||||||
"github.com/matrix-org/dendrite/roomserver/types"
|
"github.com/matrix-org/dendrite/roomserver/types"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
"github.com/matrix-org/util"
|
"github.com/matrix-org/util"
|
||||||
|
|
@ -21,14 +22,14 @@ import (
|
||||||
// Move these to a more sensible place.
|
// Move these to a more sensible place.
|
||||||
|
|
||||||
func UpdateToInviteMembership(
|
func UpdateToInviteMembership(
|
||||||
mu *shared.MembershipUpdater, add *gomatrixserverlib.Event, updates []api.OutputEvent,
|
mu *shared.MembershipUpdater, add *types.Event, updates []api.OutputEvent,
|
||||||
roomVersion gomatrixserverlib.RoomVersion,
|
roomVersion gomatrixserverlib.RoomVersion,
|
||||||
) ([]api.OutputEvent, error) {
|
) ([]api.OutputEvent, error) {
|
||||||
// We may have already sent the invite to the user, either because we are
|
// We may have already sent the invite to the user, either because we are
|
||||||
// reprocessing this event, or because the we received this invite from a
|
// reprocessing this event, or because the we received this invite from a
|
||||||
// remote server via the federation invite API. In those cases we don't need
|
// remote server via the federation invite API. In those cases we don't need
|
||||||
// to send the event.
|
// to send the event.
|
||||||
needsSending, err := mu.SetToInvite(add)
|
needsSending, retired, err := mu.Update(tables.MembershipStateInvite, add)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
@ -38,13 +39,23 @@ func UpdateToInviteMembership(
|
||||||
// room event stream. This ensures that the consumers only have to
|
// room event stream. This ensures that the consumers only have to
|
||||||
// consider a single stream of events when determining whether a user
|
// consider a single stream of events when determining whether a user
|
||||||
// is invited, rather than having to combine multiple streams themselves.
|
// is invited, rather than having to combine multiple streams themselves.
|
||||||
onie := api.OutputNewInviteEvent{
|
|
||||||
Event: add.Headered(roomVersion),
|
|
||||||
RoomVersion: roomVersion,
|
|
||||||
}
|
|
||||||
updates = append(updates, api.OutputEvent{
|
updates = append(updates, api.OutputEvent{
|
||||||
Type: api.OutputTypeNewInviteEvent,
|
Type: api.OutputTypeNewInviteEvent,
|
||||||
NewInviteEvent: &onie,
|
NewInviteEvent: &api.OutputNewInviteEvent{
|
||||||
|
Event: add.Headered(roomVersion),
|
||||||
|
RoomVersion: roomVersion,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
for _, eventID := range retired {
|
||||||
|
updates = append(updates, api.OutputEvent{
|
||||||
|
Type: api.OutputTypeRetireInviteEvent,
|
||||||
|
RetireInviteEvent: &api.OutputRetireInviteEvent{
|
||||||
|
EventID: eventID,
|
||||||
|
Membership: gomatrixserverlib.Join,
|
||||||
|
RetiredByEventID: add.EventID(),
|
||||||
|
TargetUserID: *add.StateKey(),
|
||||||
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return updates, nil
|
return updates, nil
|
||||||
|
|
@ -225,13 +236,34 @@ func LoadStateEvents(
|
||||||
func CheckServerAllowedToSeeEvent(
|
func CheckServerAllowedToSeeEvent(
|
||||||
ctx context.Context, db storage.Database, info *types.RoomInfo, eventID string, serverName gomatrixserverlib.ServerName, isServerInRoom bool,
|
ctx context.Context, db storage.Database, info *types.RoomInfo, eventID string, serverName gomatrixserverlib.ServerName, isServerInRoom bool,
|
||||||
) (bool, error) {
|
) (bool, error) {
|
||||||
|
stateAtEvent, err := db.GetHistoryVisibilityState(ctx, info, eventID, string(serverName))
|
||||||
|
switch err {
|
||||||
|
case nil:
|
||||||
|
// No error, so continue normally
|
||||||
|
case tables.OptimisationNotSupportedError:
|
||||||
|
// The database engine didn't support this optimisation, so fall back to using
|
||||||
|
// the old and slow method
|
||||||
|
stateAtEvent, err = slowGetHistoryVisibilityState(ctx, db, info, eventID, serverName)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// Something else went wrong
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return auth.IsServerAllowed(serverName, isServerInRoom, stateAtEvent), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func slowGetHistoryVisibilityState(
|
||||||
|
ctx context.Context, db storage.Database, info *types.RoomInfo, eventID string, serverName gomatrixserverlib.ServerName,
|
||||||
|
) ([]*gomatrixserverlib.Event, error) {
|
||||||
roomState := state.NewStateResolution(db, info)
|
roomState := state.NewStateResolution(db, info)
|
||||||
stateEntries, err := roomState.LoadStateAtEvent(ctx, eventID)
|
stateEntries, err := roomState.LoadStateAtEvent(ctx, eventID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, sql.ErrNoRows) {
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
return false, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
return false, fmt.Errorf("roomState.LoadStateAtEvent: %w", err)
|
return nil, fmt.Errorf("roomState.LoadStateAtEvent: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Extract all of the event state key NIDs from the room state.
|
// Extract all of the event state key NIDs from the room state.
|
||||||
|
|
@ -243,7 +275,7 @@ func CheckServerAllowedToSeeEvent(
|
||||||
// Then request those state key NIDs from the database.
|
// Then request those state key NIDs from the database.
|
||||||
stateKeys, err := db.EventStateKeys(ctx, stateKeyNIDs)
|
stateKeys, err := db.EventStateKeys(ctx, stateKeyNIDs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("db.EventStateKeys: %w", err)
|
return nil, fmt.Errorf("db.EventStateKeys: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the event state key doesn't match the given servername
|
// If the event state key doesn't match the given servername
|
||||||
|
|
@ -266,15 +298,10 @@ func CheckServerAllowedToSeeEvent(
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(filteredEntries) == 0 {
|
if len(filteredEntries) == 0 {
|
||||||
return false, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
stateAtEvent, err := LoadStateEvents(ctx, db, filteredEntries)
|
return LoadStateEvents(ctx, db, filteredEntries)
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return auth.IsServerAllowed(serverName, isServerInRoom, stateAtEvent), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Remove this when we have tests to assert correctness of this function
|
// TODO: Remove this when we have tests to assert correctness of this function
|
||||||
|
|
@ -382,7 +409,7 @@ func QueryLatestEventsAndState(
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if roomInfo == nil || roomInfo.IsStub {
|
if roomInfo == nil || roomInfo.IsStub() {
|
||||||
response.RoomExists = false
|
response.RoomExists = false
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -25,6 +25,11 @@ import (
|
||||||
|
|
||||||
"github.com/Arceliar/phony"
|
"github.com/Arceliar/phony"
|
||||||
"github.com/getsentry/sentry-go"
|
"github.com/getsentry/sentry-go"
|
||||||
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
"github.com/nats-io/nats.go"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
fedapi "github.com/matrix-org/dendrite/federationapi/api"
|
fedapi "github.com/matrix-org/dendrite/federationapi/api"
|
||||||
"github.com/matrix-org/dendrite/roomserver/acls"
|
"github.com/matrix-org/dendrite/roomserver/acls"
|
||||||
"github.com/matrix-org/dendrite/roomserver/api"
|
"github.com/matrix-org/dendrite/roomserver/api"
|
||||||
|
|
@ -35,10 +40,6 @@ import (
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
"github.com/matrix-org/dendrite/setup/config"
|
||||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||||
"github.com/matrix-org/dendrite/setup/process"
|
"github.com/matrix-org/dendrite/setup/process"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
|
||||||
"github.com/nats-io/nats.go"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Inputer is responsible for consuming from the roomserver input
|
// Inputer is responsible for consuming from the roomserver input
|
||||||
|
|
@ -60,9 +61,9 @@ import (
|
||||||
// per-room durable consumers will only progress through the stream
|
// per-room durable consumers will only progress through the stream
|
||||||
// as events are processed.
|
// as events are processed.
|
||||||
//
|
//
|
||||||
// A BC * -> positions of each consumer (* = ephemeral)
|
// A BC * -> positions of each consumer (* = ephemeral)
|
||||||
// ⌄ ⌄⌄ ⌄
|
// ⌄ ⌄⌄ ⌄
|
||||||
// ABAABCAABCAA -> newest (letter = subject for each message)
|
// ABAABCAABCAA -> newest (letter = subject for each message)
|
||||||
//
|
//
|
||||||
// In this example, A is still processing an event but has two
|
// In this example, A is still processing an event but has two
|
||||||
// pending events to process afterwards. Both B and C are caught
|
// pending events to process afterwards. Both B and C are caught
|
||||||
|
|
|
||||||
|
|
@ -20,32 +20,32 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
"github.com/matrix-org/util"
|
||||||
|
"github.com/opentracing/opentracing-go"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||||
"github.com/matrix-org/dendrite/roomserver/api"
|
"github.com/matrix-org/dendrite/roomserver/api"
|
||||||
"github.com/matrix-org/dendrite/roomserver/state"
|
"github.com/matrix-org/dendrite/roomserver/state"
|
||||||
"github.com/matrix-org/dendrite/roomserver/storage/shared"
|
"github.com/matrix-org/dendrite/roomserver/storage/shared"
|
||||||
"github.com/matrix-org/dendrite/roomserver/types"
|
"github.com/matrix-org/dendrite/roomserver/types"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
|
||||||
"github.com/matrix-org/util"
|
|
||||||
"github.com/opentracing/opentracing-go"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// updateLatestEvents updates the list of latest events for this room in the database and writes the
|
// updateLatestEvents updates the list of latest events for this room in the database and writes the
|
||||||
// event to the output log.
|
// event to the output log.
|
||||||
// The latest events are the events that aren't referenced by another event in the database:
|
// The latest events are the events that aren't referenced by another event in the database:
|
||||||
//
|
//
|
||||||
// Time goes down the page. 1 is the m.room.create event (root).
|
// Time goes down the page. 1 is the m.room.create event (root).
|
||||||
//
|
// 1 After storing 1 the latest events are {1}
|
||||||
// 1 After storing 1 the latest events are {1}
|
// | After storing 2 the latest events are {2}
|
||||||
// | After storing 2 the latest events are {2}
|
// 2 After storing 3 the latest events are {3}
|
||||||
// 2 After storing 3 the latest events are {3}
|
// / \ After storing 4 the latest events are {3,4}
|
||||||
// / \ After storing 4 the latest events are {3,4}
|
// 3 4 After storing 5 the latest events are {5,4}
|
||||||
// 3 4 After storing 5 the latest events are {5,4}
|
// | | After storing 6 the latest events are {5,6}
|
||||||
// | | After storing 6 the latest events are {5,6}
|
// 5 6 <--- latest After storing 7 the latest events are {6,7}
|
||||||
// 5 6 <--- latest After storing 7 the latest events are {6,7}
|
// |
|
||||||
// |
|
// 7 <----- latest
|
||||||
// 7 <----- latest
|
|
||||||
//
|
//
|
||||||
// Can only be called once at a time
|
// Can only be called once at a time
|
||||||
func (r *Inputer) updateLatestEvents(
|
func (r *Inputer) updateLatestEvents(
|
||||||
|
|
|
||||||
|
|
@ -21,6 +21,7 @@ import (
|
||||||
"github.com/matrix-org/dendrite/roomserver/api"
|
"github.com/matrix-org/dendrite/roomserver/api"
|
||||||
"github.com/matrix-org/dendrite/roomserver/internal/helpers"
|
"github.com/matrix-org/dendrite/roomserver/internal/helpers"
|
||||||
"github.com/matrix-org/dendrite/roomserver/storage/shared"
|
"github.com/matrix-org/dendrite/roomserver/storage/shared"
|
||||||
|
"github.com/matrix-org/dendrite/roomserver/storage/tables"
|
||||||
"github.com/matrix-org/dendrite/roomserver/types"
|
"github.com/matrix-org/dendrite/roomserver/types"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
"github.com/opentracing/opentracing-go"
|
"github.com/opentracing/opentracing-go"
|
||||||
|
|
@ -60,20 +61,14 @@ func (r *Inputer) updateMemberships(
|
||||||
var updates []api.OutputEvent
|
var updates []api.OutputEvent
|
||||||
|
|
||||||
for _, change := range changes {
|
for _, change := range changes {
|
||||||
var ae *gomatrixserverlib.Event
|
var ae *types.Event
|
||||||
var re *gomatrixserverlib.Event
|
var re *types.Event
|
||||||
targetUserNID := change.EventStateKeyNID
|
targetUserNID := change.EventStateKeyNID
|
||||||
if change.removedEventNID != 0 {
|
if change.removedEventNID != 0 {
|
||||||
ev, _ := helpers.EventMap(events).Lookup(change.removedEventNID)
|
re, _ = helpers.EventMap(events).Lookup(change.removedEventNID)
|
||||||
if ev != nil {
|
|
||||||
re = ev.Event
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if change.addedEventNID != 0 {
|
if change.addedEventNID != 0 {
|
||||||
ev, _ := helpers.EventMap(events).Lookup(change.addedEventNID)
|
ae, _ = helpers.EventMap(events).Lookup(change.addedEventNID)
|
||||||
if ev != nil {
|
|
||||||
ae = ev.Event
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if updates, err = r.updateMembership(updater, targetUserNID, re, ae, updates); err != nil {
|
if updates, err = r.updateMembership(updater, targetUserNID, re, ae, updates); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
@ -85,30 +80,27 @@ func (r *Inputer) updateMemberships(
|
||||||
func (r *Inputer) updateMembership(
|
func (r *Inputer) updateMembership(
|
||||||
updater *shared.RoomUpdater,
|
updater *shared.RoomUpdater,
|
||||||
targetUserNID types.EventStateKeyNID,
|
targetUserNID types.EventStateKeyNID,
|
||||||
remove, add *gomatrixserverlib.Event,
|
remove, add *types.Event,
|
||||||
updates []api.OutputEvent,
|
updates []api.OutputEvent,
|
||||||
) ([]api.OutputEvent, error) {
|
) ([]api.OutputEvent, error) {
|
||||||
var err error
|
var err error
|
||||||
// Default the membership to Leave if no event was added or removed.
|
// Default the membership to Leave if no event was added or removed.
|
||||||
oldMembership := gomatrixserverlib.Leave
|
|
||||||
newMembership := gomatrixserverlib.Leave
|
newMembership := gomatrixserverlib.Leave
|
||||||
|
|
||||||
if remove != nil {
|
|
||||||
oldMembership, err = remove.Membership()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if add != nil {
|
if add != nil {
|
||||||
newMembership, err = add.Membership()
|
newMembership, err = add.Membership()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if oldMembership == newMembership && newMembership != gomatrixserverlib.Join {
|
|
||||||
// If the membership is the same then nothing changed and we can return
|
var targetLocal bool
|
||||||
// immediately, unless it's a Join update (e.g. profile update).
|
if add != nil {
|
||||||
return updates, nil
|
targetLocal = r.isLocalTarget(add)
|
||||||
|
}
|
||||||
|
|
||||||
|
mu, err := updater.MembershipUpdater(targetUserNID, targetLocal)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// In an ideal world, we shouldn't ever have "add" be nil and "remove" be
|
// In an ideal world, we shouldn't ever have "add" be nil and "remove" be
|
||||||
|
|
@ -120,17 +112,10 @@ func (r *Inputer) updateMembership(
|
||||||
// after a state reset, often thinking that the user was still joined to
|
// after a state reset, often thinking that the user was still joined to
|
||||||
// the room even though the room state said otherwise, and this would prevent
|
// the room even though the room state said otherwise, and this would prevent
|
||||||
// the user from being able to attempt to rejoin the room without modifying
|
// the user from being able to attempt to rejoin the room without modifying
|
||||||
// the database. So instead what we'll do is we'll just update the membership
|
// the database. So instead we're going to remove the membership from the
|
||||||
// table to say that the user is "leave" and we'll use the old event to
|
// database altogether, so that it doesn't create future problems.
|
||||||
// avoid nil pointer exceptions on the code path that follows.
|
if add == nil && remove != nil {
|
||||||
if add == nil {
|
return nil, mu.Delete()
|
||||||
add = remove
|
|
||||||
newMembership = gomatrixserverlib.Leave
|
|
||||||
}
|
|
||||||
|
|
||||||
mu, err := updater.MembershipUpdater(targetUserNID, r.isLocalTarget(add))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
switch newMembership {
|
switch newMembership {
|
||||||
|
|
@ -149,7 +134,7 @@ func (r *Inputer) updateMembership(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Inputer) isLocalTarget(event *gomatrixserverlib.Event) bool {
|
func (r *Inputer) isLocalTarget(event *types.Event) bool {
|
||||||
isTargetLocalUser := false
|
isTargetLocalUser := false
|
||||||
if statekey := event.StateKey(); statekey != nil {
|
if statekey := event.StateKey(); statekey != nil {
|
||||||
_, domain, _ := gomatrixserverlib.SplitID('@', *statekey)
|
_, domain, _ := gomatrixserverlib.SplitID('@', *statekey)
|
||||||
|
|
@ -159,81 +144,61 @@ func (r *Inputer) isLocalTarget(event *gomatrixserverlib.Event) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func updateToJoinMembership(
|
func updateToJoinMembership(
|
||||||
mu *shared.MembershipUpdater, add *gomatrixserverlib.Event, updates []api.OutputEvent,
|
mu *shared.MembershipUpdater, add *types.Event, updates []api.OutputEvent,
|
||||||
) ([]api.OutputEvent, error) {
|
) ([]api.OutputEvent, error) {
|
||||||
// If the user is already marked as being joined, we call SetToJoin to update
|
|
||||||
// the event ID then we can return immediately. Retired is ignored as there
|
|
||||||
// is no invite event to retire.
|
|
||||||
if mu.IsJoin() {
|
|
||||||
_, err := mu.SetToJoin(add.Sender(), add.EventID(), true)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return updates, nil
|
|
||||||
}
|
|
||||||
// When we mark a user as being joined we will invalidate any invites that
|
// When we mark a user as being joined we will invalidate any invites that
|
||||||
// are active for that user. We notify the consumers that the invites have
|
// are active for that user. We notify the consumers that the invites have
|
||||||
// been retired using a special event, even though they could infer this
|
// been retired using a special event, even though they could infer this
|
||||||
// by studying the state changes in the room event stream.
|
// by studying the state changes in the room event stream.
|
||||||
retired, err := mu.SetToJoin(add.Sender(), add.EventID(), false)
|
_, retired, err := mu.Update(tables.MembershipStateJoin, add)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
for _, eventID := range retired {
|
for _, eventID := range retired {
|
||||||
orie := api.OutputRetireInviteEvent{
|
|
||||||
EventID: eventID,
|
|
||||||
Membership: gomatrixserverlib.Join,
|
|
||||||
RetiredByEventID: add.EventID(),
|
|
||||||
TargetUserID: *add.StateKey(),
|
|
||||||
}
|
|
||||||
updates = append(updates, api.OutputEvent{
|
updates = append(updates, api.OutputEvent{
|
||||||
Type: api.OutputTypeRetireInviteEvent,
|
Type: api.OutputTypeRetireInviteEvent,
|
||||||
RetireInviteEvent: &orie,
|
RetireInviteEvent: &api.OutputRetireInviteEvent{
|
||||||
|
EventID: eventID,
|
||||||
|
Membership: gomatrixserverlib.Join,
|
||||||
|
RetiredByEventID: add.EventID(),
|
||||||
|
TargetUserID: *add.StateKey(),
|
||||||
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return updates, nil
|
return updates, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func updateToLeaveMembership(
|
func updateToLeaveMembership(
|
||||||
mu *shared.MembershipUpdater, add *gomatrixserverlib.Event,
|
mu *shared.MembershipUpdater, add *types.Event,
|
||||||
newMembership string, updates []api.OutputEvent,
|
newMembership string, updates []api.OutputEvent,
|
||||||
) ([]api.OutputEvent, error) {
|
) ([]api.OutputEvent, error) {
|
||||||
// If the user is already neither joined, nor invited to the room then we
|
|
||||||
// can return immediately.
|
|
||||||
if mu.IsLeave() {
|
|
||||||
return updates, nil
|
|
||||||
}
|
|
||||||
// When we mark a user as having left we will invalidate any invites that
|
// When we mark a user as having left we will invalidate any invites that
|
||||||
// are active for that user. We notify the consumers that the invites have
|
// are active for that user. We notify the consumers that the invites have
|
||||||
// been retired using a special event, even though they could infer this
|
// been retired using a special event, even though they could infer this
|
||||||
// by studying the state changes in the room event stream.
|
// by studying the state changes in the room event stream.
|
||||||
retired, err := mu.SetToLeave(add.Sender(), add.EventID())
|
_, retired, err := mu.Update(tables.MembershipStateLeaveOrBan, add)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
for _, eventID := range retired {
|
for _, eventID := range retired {
|
||||||
orie := api.OutputRetireInviteEvent{
|
|
||||||
EventID: eventID,
|
|
||||||
Membership: newMembership,
|
|
||||||
RetiredByEventID: add.EventID(),
|
|
||||||
TargetUserID: *add.StateKey(),
|
|
||||||
}
|
|
||||||
updates = append(updates, api.OutputEvent{
|
updates = append(updates, api.OutputEvent{
|
||||||
Type: api.OutputTypeRetireInviteEvent,
|
Type: api.OutputTypeRetireInviteEvent,
|
||||||
RetireInviteEvent: &orie,
|
RetireInviteEvent: &api.OutputRetireInviteEvent{
|
||||||
|
EventID: eventID,
|
||||||
|
Membership: newMembership,
|
||||||
|
RetiredByEventID: add.EventID(),
|
||||||
|
TargetUserID: *add.StateKey(),
|
||||||
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return updates, nil
|
return updates, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func updateToKnockMembership(
|
func updateToKnockMembership(
|
||||||
mu *shared.MembershipUpdater, add *gomatrixserverlib.Event, updates []api.OutputEvent,
|
mu *shared.MembershipUpdater, add *types.Event, updates []api.OutputEvent,
|
||||||
) ([]api.OutputEvent, error) {
|
) ([]api.OutputEvent, error) {
|
||||||
if mu.IsLeave() {
|
if _, _, err := mu.Update(tables.MembershipStateKnock, add); err != nil {
|
||||||
_, err := mu.SetToKnock(add)
|
return nil, err
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return updates, nil
|
return updates, nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -326,8 +326,10 @@ func (t *missingStateReq) lookupStateAfterEvent(ctx context.Context, roomVersion
|
||||||
return respState, true, nil
|
return respState, true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logrus.WithContext(ctx).Warnf("State for event %s not available locally, falling back to federation (via %d servers)", eventID, len(t.servers))
|
||||||
respState, err := t.lookupStateBeforeEvent(ctx, roomVersion, roomID, eventID)
|
respState, err := t.lookupStateBeforeEvent(ctx, roomVersion, roomID, eventID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
logrus.WithContext(ctx).WithError(err).Errorf("Failed to look up state before event %s", eventID)
|
||||||
return nil, false, fmt.Errorf("t.lookupStateBeforeEvent: %w", err)
|
return nil, false, fmt.Errorf("t.lookupStateBeforeEvent: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -339,6 +341,7 @@ func (t *missingStateReq) lookupStateAfterEvent(ctx context.Context, roomVersion
|
||||||
case nil:
|
case nil:
|
||||||
// do nothing
|
// do nothing
|
||||||
default:
|
default:
|
||||||
|
logrus.WithContext(ctx).WithError(err).Errorf("Failed to look up event %s", eventID)
|
||||||
return nil, false, fmt.Errorf("t.lookupEvent: %w", err)
|
return nil, false, fmt.Errorf("t.lookupEvent: %w", err)
|
||||||
}
|
}
|
||||||
h = t.cacheAndReturn(h)
|
h = t.cacheAndReturn(h)
|
||||||
|
|
@ -375,11 +378,7 @@ func (t *missingStateReq) lookupStateAfterEventLocally(ctx context.Context, room
|
||||||
defer span.Finish()
|
defer span.Finish()
|
||||||
|
|
||||||
var res parsedRespState
|
var res parsedRespState
|
||||||
roomInfo, err := t.db.RoomInfo(ctx, roomID)
|
roomState := state.NewStateResolution(t.db, t.roomInfo)
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
roomState := state.NewStateResolution(t.db, roomInfo)
|
|
||||||
stateAtEvents, err := t.db.StateAtEventIDs(ctx, []string{eventID})
|
stateAtEvents, err := t.db.StateAtEventIDs(ctx, []string{eventID})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
util.GetLogger(ctx).WithField("room_id", roomID).WithError(err).Warnf("failed to get state after %s locally", eventID)
|
util.GetLogger(ctx).WithField("room_id", roomID).WithError(err).Warnf("failed to get state after %s locally", eventID)
|
||||||
|
|
@ -666,9 +665,22 @@ func (t *missingStateReq) lookupMissingStateViaStateIDs(ctx context.Context, roo
|
||||||
|
|
||||||
util.GetLogger(ctx).WithField("room_id", roomID).Infof("lookupMissingStateViaStateIDs %s", eventID)
|
util.GetLogger(ctx).WithField("room_id", roomID).Infof("lookupMissingStateViaStateIDs %s", eventID)
|
||||||
// fetch the state event IDs at the time of the event
|
// fetch the state event IDs at the time of the event
|
||||||
stateIDs, err := t.federation.LookupStateIDs(ctx, t.origin, roomID, eventID)
|
var stateIDs gomatrixserverlib.RespStateIDs
|
||||||
|
var err error
|
||||||
|
count := 0
|
||||||
|
totalctx, totalcancel := context.WithTimeout(ctx, time.Minute*5)
|
||||||
|
for _, serverName := range t.servers {
|
||||||
|
reqctx, reqcancel := context.WithTimeout(totalctx, time.Second*20)
|
||||||
|
stateIDs, err = t.federation.LookupStateIDs(reqctx, serverName, roomID, eventID)
|
||||||
|
reqcancel()
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
totalcancel()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, fmt.Errorf("t.federation.LookupStateIDs tried %d server(s), last error: %w", count, err)
|
||||||
}
|
}
|
||||||
// work out which auth/state IDs are missing
|
// work out which auth/state IDs are missing
|
||||||
wantIDs := append(stateIDs.StateEventIDs, stateIDs.AuthEventIDs...)
|
wantIDs := append(stateIDs.StateEventIDs, stateIDs.AuthEventIDs...)
|
||||||
|
|
@ -754,9 +766,8 @@ func (t *missingStateReq) lookupMissingStateViaStateIDs(ctx context.Context, roo
|
||||||
|
|
||||||
// Define what we'll do in order to fetch the missing event ID.
|
// Define what we'll do in order to fetch the missing event ID.
|
||||||
fetch := func(missingEventID string) {
|
fetch := func(missingEventID string) {
|
||||||
var h *gomatrixserverlib.Event
|
h, herr := t.lookupEvent(ctx, roomVersion, roomID, missingEventID, false)
|
||||||
h, err = t.lookupEvent(ctx, roomVersion, roomID, missingEventID, false)
|
switch herr.(type) {
|
||||||
switch err.(type) {
|
|
||||||
case verifySigError:
|
case verifySigError:
|
||||||
return
|
return
|
||||||
case nil:
|
case nil:
|
||||||
|
|
@ -765,7 +776,7 @@ func (t *missingStateReq) lookupMissingStateViaStateIDs(ctx context.Context, roo
|
||||||
util.GetLogger(ctx).WithFields(logrus.Fields{
|
util.GetLogger(ctx).WithFields(logrus.Fields{
|
||||||
"event_id": missingEventID,
|
"event_id": missingEventID,
|
||||||
"room_id": roomID,
|
"room_id": roomID,
|
||||||
}).Warn("Failed to fetch missing event")
|
}).WithError(herr).Warn("Failed to fetch missing event")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
haveEventsMutex.Lock()
|
haveEventsMutex.Lock()
|
||||||
|
|
|
||||||
|
|
@ -52,7 +52,7 @@ func (r *Admin) PerformAdminEvacuateRoom(
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if roomInfo == nil || roomInfo.IsStub {
|
if roomInfo == nil || roomInfo.IsStub() {
|
||||||
res.Error = &api.PerformError{
|
res.Error = &api.PerformError{
|
||||||
Code: api.PerformErrorNoRoom,
|
Code: api.PerformErrorNoRoom,
|
||||||
Msg: fmt.Sprintf("Room %s not found", req.RoomID),
|
Msg: fmt.Sprintf("Room %s not found", req.RoomID),
|
||||||
|
|
|
||||||
|
|
@ -19,6 +19,10 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/getsentry/sentry-go"
|
"github.com/getsentry/sentry-go"
|
||||||
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
"github.com/matrix-org/util"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
federationAPI "github.com/matrix-org/dendrite/federationapi/api"
|
federationAPI "github.com/matrix-org/dendrite/federationapi/api"
|
||||||
"github.com/matrix-org/dendrite/internal/eventutil"
|
"github.com/matrix-org/dendrite/internal/eventutil"
|
||||||
"github.com/matrix-org/dendrite/roomserver/api"
|
"github.com/matrix-org/dendrite/roomserver/api"
|
||||||
|
|
@ -26,9 +30,6 @@ import (
|
||||||
"github.com/matrix-org/dendrite/roomserver/internal/helpers"
|
"github.com/matrix-org/dendrite/roomserver/internal/helpers"
|
||||||
"github.com/matrix-org/dendrite/roomserver/storage"
|
"github.com/matrix-org/dendrite/roomserver/storage"
|
||||||
"github.com/matrix-org/dendrite/roomserver/types"
|
"github.com/matrix-org/dendrite/roomserver/types"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
|
||||||
"github.com/matrix-org/util"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// the max number of servers to backfill from per request. If this is too low we may fail to backfill when
|
// the max number of servers to backfill from per request. If this is too low we may fail to backfill when
|
||||||
|
|
@ -73,7 +74,7 @@ func (r *Backfiller) PerformBackfill(
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if info == nil || info.IsStub {
|
if info == nil || info.IsStub() {
|
||||||
return fmt.Errorf("PerformBackfill: missing room info for room %s", request.RoomID)
|
return fmt.Errorf("PerformBackfill: missing room info for room %s", request.RoomID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -106,7 +107,7 @@ func (r *Backfiller) backfillViaFederation(ctx context.Context, req *api.Perform
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if info == nil || info.IsStub {
|
if info == nil || info.IsStub() {
|
||||||
return fmt.Errorf("backfillViaFederation: missing room info for room %s", req.RoomID)
|
return fmt.Errorf("backfillViaFederation: missing room info for room %s", req.RoomID)
|
||||||
}
|
}
|
||||||
requester := newBackfillRequester(r.DB, r.FSAPI, r.ServerName, req.BackwardsExtremities, r.PreferServers)
|
requester := newBackfillRequester(r.DB, r.FSAPI, r.ServerName, req.BackwardsExtremities, r.PreferServers)
|
||||||
|
|
@ -434,7 +435,7 @@ FindSuccessor:
|
||||||
logrus.WithError(err).WithField("room_id", roomID).Error("ServersAtEvent: failed to get RoomInfo for room")
|
logrus.WithError(err).WithField("room_id", roomID).Error("ServersAtEvent: failed to get RoomInfo for room")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if info == nil || info.IsStub {
|
if info == nil || info.IsStub() {
|
||||||
logrus.WithField("room_id", roomID).Error("ServersAtEvent: failed to get RoomInfo for room, room is missing")
|
logrus.WithField("room_id", roomID).Error("ServersAtEvent: failed to get RoomInfo for room, room is missing")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
@ -522,8 +523,9 @@ func (b *backfillRequester) ProvideEvents(roomVer gomatrixserverlib.RoomVersion,
|
||||||
}
|
}
|
||||||
|
|
||||||
// joinEventsFromHistoryVisibility returns all CURRENTLY joined members if our server can read the room history
|
// joinEventsFromHistoryVisibility returns all CURRENTLY joined members if our server can read the room history
|
||||||
|
//
|
||||||
// TODO: Long term we probably want a history_visibility table which stores eventNID | visibility_enum so we can just
|
// TODO: Long term we probably want a history_visibility table which stores eventNID | visibility_enum so we can just
|
||||||
// pull all events and then filter by that table.
|
// pull all events and then filter by that table.
|
||||||
func joinEventsFromHistoryVisibility(
|
func joinEventsFromHistoryVisibility(
|
||||||
ctx context.Context, db storage.Database, roomID string, stateEntries []types.StateEntry,
|
ctx context.Context, db storage.Database, roomID string, stateEntries []types.StateEntry,
|
||||||
thisServer gomatrixserverlib.ServerName) ([]types.Event, error) {
|
thisServer gomatrixserverlib.ServerName) ([]types.Event, error) {
|
||||||
|
|
|
||||||
|
|
@ -50,7 +50,7 @@ func (r *InboundPeeker) PerformInboundPeek(
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if info == nil || info.IsStub {
|
if info == nil || info.IsStub() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
response.RoomExists = true
|
response.RoomExists = true
|
||||||
|
|
|
||||||
|
|
@ -39,11 +39,13 @@ type Inviter struct {
|
||||||
Inputer *input.Inputer
|
Inputer *input.Inputer
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// nolint:gocyclo
|
||||||
func (r *Inviter) PerformInvite(
|
func (r *Inviter) PerformInvite(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
req *api.PerformInviteRequest,
|
req *api.PerformInviteRequest,
|
||||||
res *api.PerformInviteResponse,
|
res *api.PerformInviteResponse,
|
||||||
) ([]api.OutputEvent, error) {
|
) ([]api.OutputEvent, error) {
|
||||||
|
var outputUpdates []api.OutputEvent
|
||||||
event := req.Event
|
event := req.Event
|
||||||
if event.StateKey() == nil {
|
if event.StateKey() == nil {
|
||||||
return nil, fmt.Errorf("invite must be a state event")
|
return nil, fmt.Errorf("invite must be a state event")
|
||||||
|
|
@ -66,6 +68,13 @@ func (r *Inviter) PerformInvite(
|
||||||
}
|
}
|
||||||
isTargetLocal := domain == r.Cfg.Matrix.ServerName
|
isTargetLocal := domain == r.Cfg.Matrix.ServerName
|
||||||
isOriginLocal := event.Origin() == r.Cfg.Matrix.ServerName
|
isOriginLocal := event.Origin() == r.Cfg.Matrix.ServerName
|
||||||
|
if !isOriginLocal && !isTargetLocal {
|
||||||
|
res.Error = &api.PerformError{
|
||||||
|
Code: api.PerformErrorBadRequest,
|
||||||
|
Msg: "The invite must be either from or to a local user",
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
logger := util.GetLogger(ctx).WithFields(map[string]interface{}{
|
logger := util.GetLogger(ctx).WithFields(map[string]interface{}{
|
||||||
"inviter": event.Sender(),
|
"inviter": event.Sender(),
|
||||||
|
|
@ -97,6 +106,34 @@ func (r *Inviter) PerformInvite(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
updateMembershipTableManually := func() ([]api.OutputEvent, error) {
|
||||||
|
var updater *shared.MembershipUpdater
|
||||||
|
if updater, err = r.DB.MembershipUpdater(ctx, roomID, targetUserID, isTargetLocal, req.RoomVersion); err != nil {
|
||||||
|
return nil, fmt.Errorf("r.DB.MembershipUpdater: %w", err)
|
||||||
|
}
|
||||||
|
outputUpdates, err = helpers.UpdateToInviteMembership(updater, &types.Event{
|
||||||
|
EventNID: 0,
|
||||||
|
Event: event.Unwrap(),
|
||||||
|
}, outputUpdates, req.Event.RoomVersion)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("updateToInviteMembership: %w", err)
|
||||||
|
}
|
||||||
|
if err = updater.Commit(); err != nil {
|
||||||
|
return nil, fmt.Errorf("updater.Commit: %w", err)
|
||||||
|
}
|
||||||
|
logger.Debugf("updated membership to invite and sending invite OutputEvent")
|
||||||
|
return outputUpdates, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if (info == nil || info.IsStub()) && !isOriginLocal && isTargetLocal {
|
||||||
|
// The invite came in over federation for a room that we don't know about
|
||||||
|
// yet. We need to handle this a bit differently to most invites because
|
||||||
|
// we don't know the room state, therefore the roomserver can't process
|
||||||
|
// an input event. Instead we will update the membership table with the
|
||||||
|
// new invite and generate an output event.
|
||||||
|
return updateMembershipTableManually()
|
||||||
|
}
|
||||||
|
|
||||||
var isAlreadyJoined bool
|
var isAlreadyJoined bool
|
||||||
if info != nil {
|
if info != nil {
|
||||||
_, isAlreadyJoined, _, err = r.DB.GetMembership(ctx, info.RoomNID, *event.StateKey())
|
_, isAlreadyJoined, _, err = r.DB.GetMembership(ctx, info.RoomNID, *event.StateKey())
|
||||||
|
|
@ -140,31 +177,13 @@ func (r *Inviter) PerformInvite(
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If the invite originated remotely then we can't send an
|
||||||
|
// InputRoomEvent for the invite as it will never pass auth checks
|
||||||
|
// due to lacking room state, but we still need to tell the client
|
||||||
|
// about the invite so we can accept it, hence we return an output
|
||||||
|
// event to send to the Sync API.
|
||||||
if !isOriginLocal {
|
if !isOriginLocal {
|
||||||
// The invite originated over federation. Process the membership
|
return updateMembershipTableManually()
|
||||||
// update, which will notify the sync API etc about the incoming
|
|
||||||
// invite. We do NOT send an InputRoomEvent for the invite as it
|
|
||||||
// will never pass auth checks due to lacking room state, but we
|
|
||||||
// still need to tell the client about the invite so we can accept
|
|
||||||
// it, hence we return an output event to send to the sync api.
|
|
||||||
var updater *shared.MembershipUpdater
|
|
||||||
updater, err = r.DB.MembershipUpdater(ctx, roomID, targetUserID, isTargetLocal, req.RoomVersion)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("r.DB.MembershipUpdater: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
unwrapped := event.Unwrap()
|
|
||||||
var outputUpdates []api.OutputEvent
|
|
||||||
outputUpdates, err = helpers.UpdateToInviteMembership(updater, unwrapped, nil, req.Event.RoomVersion)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("updateToInviteMembership: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = updater.Commit(); err != nil {
|
|
||||||
return nil, fmt.Errorf("updater.Commit: %w", err)
|
|
||||||
}
|
|
||||||
logger.Debugf("updated membership to invite and sending invite OutputEvent")
|
|
||||||
return outputUpdates, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// The invite originated locally. Therefore we have a responsibility to
|
// The invite originated locally. Therefore we have a responsibility to
|
||||||
|
|
@ -229,12 +248,11 @@ func (r *Inviter) PerformInvite(
|
||||||
Code: api.PerformErrorNotAllowed,
|
Code: api.PerformErrorNotAllowed,
|
||||||
}
|
}
|
||||||
logger.WithError(err).WithField("event_id", event.EventID()).Error("r.InputRoomEvents failed")
|
logger.WithError(err).WithField("event_id", event.EventID()).Error("r.InputRoomEvents failed")
|
||||||
return nil, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Don't notify the sync api of this event in the same way as a federated invite so the invitee
|
// Don't notify the sync api of this event in the same way as a federated invite so the invitee
|
||||||
// gets the invite, as the roomserver will do this when it processes the m.room.member invite.
|
// gets the invite, as the roomserver will do this when it processes the m.room.member invite.
|
||||||
return nil, nil
|
return outputUpdates, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildInviteStrippedState(
|
func buildInviteStrippedState(
|
||||||
|
|
@ -258,7 +276,7 @@ func buildInviteStrippedState(
|
||||||
}
|
}
|
||||||
roomState := state.NewStateResolution(db, info)
|
roomState := state.NewStateResolution(db, info)
|
||||||
stateEntries, err := roomState.LoadStateAtSnapshotForStringTuples(
|
stateEntries, err := roomState.LoadStateAtSnapshotForStringTuples(
|
||||||
ctx, info.StateSnapshotNID, stateWanted,
|
ctx, info.StateSnapshotNID(), stateWanted,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
||||||
|
|
@ -268,21 +268,19 @@ func (r *Joiner) performJoinRoomByID(
|
||||||
case nil:
|
case nil:
|
||||||
// The room join is local. Send the new join event into the
|
// The room join is local. Send the new join event into the
|
||||||
// roomserver. First of all check that the user isn't already
|
// roomserver. First of all check that the user isn't already
|
||||||
// a member of the room.
|
// a member of the room. This is best-effort (as in we won't
|
||||||
alreadyJoined := false
|
// fail if we can't find the existing membership) because there
|
||||||
for _, se := range buildRes.StateEvents {
|
// is really no harm in just sending another membership event.
|
||||||
if !se.StateKeyEquals(userID) {
|
membershipReq := &api.QueryMembershipForUserRequest{
|
||||||
continue
|
RoomID: req.RoomIDOrAlias,
|
||||||
}
|
UserID: userID,
|
||||||
if membership, merr := se.Membership(); merr == nil {
|
|
||||||
alreadyJoined = (membership == gomatrixserverlib.Join)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
membershipRes := &api.QueryMembershipForUserResponse{}
|
||||||
|
_ = r.Queryer.QueryMembershipForUser(ctx, membershipReq, membershipRes)
|
||||||
|
|
||||||
// If we haven't already joined the room then send an event
|
// If we haven't already joined the room then send an event
|
||||||
// into the room changing our membership status.
|
// into the room changing our membership status.
|
||||||
if !alreadyJoined {
|
if !membershipRes.RoomExists || !membershipRes.IsInRoom {
|
||||||
inputReq := rsAPI.InputRoomEventsRequest{
|
inputReq := rsAPI.InputRoomEventsRequest{
|
||||||
InputRoomEvents: []rsAPI.InputRoomEvent{
|
InputRoomEvents: []rsAPI.InputRoomEvent{
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -228,14 +228,14 @@ func (r *Leaver) performFederatedRejectInvite(
|
||||||
util.GetLogger(ctx).WithError(err).Errorf("failed to get MembershipUpdater, still retiring invite event")
|
util.GetLogger(ctx).WithError(err).Errorf("failed to get MembershipUpdater, still retiring invite event")
|
||||||
}
|
}
|
||||||
if updater != nil {
|
if updater != nil {
|
||||||
if _, err = updater.SetToLeave(req.UserID, eventID); err != nil {
|
if err = updater.Delete(); err != nil {
|
||||||
util.GetLogger(ctx).WithError(err).Errorf("failed to set membership to leave, still retiring invite event")
|
util.GetLogger(ctx).WithError(err).Errorf("failed to delete membership, still retiring invite event")
|
||||||
if err = updater.Rollback(); err != nil {
|
if err = updater.Rollback(); err != nil {
|
||||||
util.GetLogger(ctx).WithError(err).Errorf("failed to rollback membership leave, still retiring invite event")
|
util.GetLogger(ctx).WithError(err).Errorf("failed to rollback deleting membership, still retiring invite event")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if err = updater.Commit(); err != nil {
|
if err = updater.Commit(); err != nil {
|
||||||
util.GetLogger(ctx).WithError(err).Errorf("failed to commit membership update, still retiring invite event")
|
util.GetLogger(ctx).WithError(err).Errorf("failed to commit deleting membership, still retiring invite event")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -16,6 +16,7 @@ package query
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"database/sql"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
@ -60,7 +61,7 @@ func (r *Queryer) QueryStateAfterEvents(
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if info == nil || info.IsStub {
|
if info == nil || info.IsStub() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -225,6 +226,9 @@ func (r *Queryer) QueryMembershipsForRoom(
|
||||||
var eventNIDs []types.EventNID
|
var eventNIDs []types.EventNID
|
||||||
eventNIDs, err = r.DB.GetMembershipEventNIDsForRoom(ctx, info.RoomNID, request.JoinedOnly, request.LocalOnly)
|
eventNIDs, err = r.DB.GetMembershipEventNIDsForRoom(ctx, info.RoomNID, request.JoinedOnly, request.LocalOnly)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return fmt.Errorf("r.DB.GetMembershipEventNIDsForRoom: %w", err)
|
return fmt.Errorf("r.DB.GetMembershipEventNIDsForRoom: %w", err)
|
||||||
}
|
}
|
||||||
events, err = r.DB.Events(ctx, eventNIDs)
|
events, err = r.DB.Events(ctx, eventNIDs)
|
||||||
|
|
@ -260,6 +264,9 @@ func (r *Queryer) QueryMembershipsForRoom(
|
||||||
var eventNIDs []types.EventNID
|
var eventNIDs []types.EventNID
|
||||||
eventNIDs, err = r.DB.GetMembershipEventNIDsForRoom(ctx, info.RoomNID, request.JoinedOnly, false)
|
eventNIDs, err = r.DB.GetMembershipEventNIDsForRoom(ctx, info.RoomNID, request.JoinedOnly, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -295,7 +302,7 @@ func (r *Queryer) QueryServerJoinedToRoom(
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("r.DB.RoomInfo: %w", err)
|
return fmt.Errorf("r.DB.RoomInfo: %w", err)
|
||||||
}
|
}
|
||||||
if info == nil || info.IsStub {
|
if info == nil || info.IsStub() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
response.RoomExists = true
|
response.RoomExists = true
|
||||||
|
|
@ -344,8 +351,8 @@ func (r *Queryer) QueryServerAllowedToSeeEvent(
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if info == nil {
|
if info == nil || info.IsStub() {
|
||||||
return fmt.Errorf("QueryServerAllowedToSeeEvent: no room info for room %s", roomID)
|
return nil
|
||||||
}
|
}
|
||||||
response.AllowedToSeeEvent, err = helpers.CheckServerAllowedToSeeEvent(
|
response.AllowedToSeeEvent, err = helpers.CheckServerAllowedToSeeEvent(
|
||||||
ctx, r.DB, info, request.EventID, request.ServerName, inRoomRes.IsInRoom,
|
ctx, r.DB, info, request.EventID, request.ServerName, inRoomRes.IsInRoom,
|
||||||
|
|
@ -383,7 +390,7 @@ func (r *Queryer) QueryMissingEvents(
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if info == nil || info.IsStub {
|
if info == nil || info.IsStub() {
|
||||||
return fmt.Errorf("missing RoomInfo for room %s", events[0].RoomID())
|
return fmt.Errorf("missing RoomInfo for room %s", events[0].RoomID())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -422,7 +429,7 @@ func (r *Queryer) QueryStateAndAuthChain(
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if info == nil || info.IsStub {
|
if info == nil || info.IsStub() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
response.RoomExists = true
|
response.RoomExists = true
|
||||||
|
|
@ -767,7 +774,7 @@ func (r *Queryer) QueryRestrictedJoinAllowed(ctx context.Context, req *api.Query
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("r.DB.RoomInfo: %w", err)
|
return fmt.Errorf("r.DB.RoomInfo: %w", err)
|
||||||
}
|
}
|
||||||
if roomInfo == nil || roomInfo.IsStub {
|
if roomInfo == nil || roomInfo.IsStub() {
|
||||||
return nil // fmt.Errorf("room %q doesn't exist or is stub room", req.RoomID)
|
return nil // fmt.Errorf("room %q doesn't exist or is stub room", req.RoomID)
|
||||||
}
|
}
|
||||||
// If the room version doesn't allow restricted joins then don't
|
// If the room version doesn't allow restricted joins then don't
|
||||||
|
|
@ -830,7 +837,7 @@ func (r *Queryer) QueryRestrictedJoinAllowed(ctx context.Context, req *api.Query
|
||||||
// See if the room exists. If it doesn't exist or if it's a stub
|
// See if the room exists. If it doesn't exist or if it's a stub
|
||||||
// room entry then we can't check memberships.
|
// room entry then we can't check memberships.
|
||||||
targetRoomInfo, err := r.DB.RoomInfo(ctx, rule.RoomID)
|
targetRoomInfo, err := r.DB.RoomInfo(ctx, rule.RoomID)
|
||||||
if err != nil || targetRoomInfo == nil || targetRoomInfo.IsStub {
|
if err != nil || targetRoomInfo == nil || targetRoomInfo.IsStub() {
|
||||||
res.Resident = false
|
res.Resident = false
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue