mirror of
https://github.com/matrix-org/dendrite.git
synced 2026-01-09 07:03:10 -06:00
Merge branch 'main' of github.com:matrix-org/dendrite into s7evink/expireedus
This commit is contained in:
commit
3f6008cf0c
38
.github/workflows/dendrite.yml
vendored
38
.github/workflows/dendrite.yml
vendored
|
|
@ -17,13 +17,14 @@ jobs:
|
|||
name: WASM build test
|
||||
timeout-minutes: 5
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ false }} # disable for now
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.18
|
||||
|
||||
- uses: actions/cache@v2
|
||||
with:
|
||||
|
|
@ -96,7 +97,7 @@ jobs:
|
|||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
go: ["1.16", "1.17", "1.18"]
|
||||
go: ["1.18"]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Setup go
|
||||
|
|
@ -126,7 +127,7 @@ jobs:
|
|||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
go: ["1.16", "1.17", "1.18"]
|
||||
go: ["1.18"]
|
||||
goos: ["linux"]
|
||||
goarch: ["amd64", "386"]
|
||||
steps:
|
||||
|
|
@ -159,7 +160,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go: ["1.16", "1.17", "1.18"]
|
||||
go: ["1.18"]
|
||||
goos: ["windows"]
|
||||
goarch: ["amd64"]
|
||||
steps:
|
||||
|
|
@ -208,7 +209,7 @@ jobs:
|
|||
- name: Setup go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.16"
|
||||
go-version: "1.18"
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
|
|
@ -222,6 +223,31 @@ jobs:
|
|||
- name: Test upgrade
|
||||
run: ./dendrite-upgrade-tests --head .
|
||||
|
||||
# run database upgrade tests, skipping over one version
|
||||
upgrade_test_direct:
|
||||
name: Upgrade tests from HEAD-2
|
||||
timeout-minutes: 20
|
||||
needs: initial-tests-done
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Setup go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.18"
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-upgrade-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-upgrade
|
||||
- name: Build upgrade-tests
|
||||
run: go build ./cmd/dendrite-upgrade-tests
|
||||
- name: Test upgrade
|
||||
run: ./dendrite-upgrade-tests -direct -from HEAD-2 --head .
|
||||
|
||||
# run Sytest in different variations
|
||||
sytest:
|
||||
timeout-minutes: 20
|
||||
|
|
@ -358,7 +384,7 @@ jobs:
|
|||
|
||||
integration-tests-done:
|
||||
name: Integration tests passed
|
||||
needs: [initial-tests-done, upgrade_test, sytest, complement]
|
||||
needs: [initial-tests-done, upgrade_test, upgrade_test_direct, sytest, complement]
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
|
||||
steps:
|
||||
|
|
|
|||
95
CHANGES.md
95
CHANGES.md
|
|
@ -1,5 +1,100 @@
|
|||
# Changelog
|
||||
|
||||
## Dendrite 0.9.0 (2022-08-01)
|
||||
|
||||
### Features
|
||||
|
||||
* Dendrite now uses Ristretto for managing in-memory caches
|
||||
* Should improve cache utilisation considerably over time by more intelligently selecting and managing cache entries compared to the previous LRU-based cache
|
||||
* Defaults to a 1GB cache size if not configured otherwise
|
||||
* The estimated cache size in memory and maximum age can now be configured with new [configuration options](https://github.com/matrix-org/dendrite/blob/e94ef84aaba30e12baf7f524c4e7a36d2fdeb189/dendrite-sample.monolith.yaml#L44-L61) to prevent unbounded cache growth
|
||||
* Added support for serving the `/.well-known/matrix/client` hint directly from Dendrite
|
||||
* Configurable with the new [configuration option](https://github.com/matrix-org/dendrite/blob/e94ef84aaba30e12baf7f524c4e7a36d2fdeb189/dendrite-sample.monolith.yaml#L67-L69)
|
||||
* Refactored membership updater, which should eliminate some bugs caused by the membership table getting out of sync with the room state
|
||||
* The User API is now responsible for sending account data updates to other components, which may fix some races and duplicate account data events
|
||||
* Optimised database query for checking whether a remote server is allowed to request an event over federation without using anywhere near as much CPU time (PostgreSQL only)
|
||||
* Database migrations have been refactored to eliminate some problems that were present with `goose` and upgrading from older Dendrite versions
|
||||
* Media fetching will now use the `/v3` endpoints for downloading media from remote homeservers
|
||||
* HTTP 404 and HTTP 405 errors from the client-facing APIs should now be returned with CORS headers so that web-based clients do not produce incorrect access control warnings for unknown endpoints
|
||||
* Some preparation work for full history visibility support
|
||||
|
||||
### Fixes
|
||||
|
||||
* Fixes a crash that could occur during event redaction
|
||||
* The `/members` endpoint will no longer incorrectly return HTTP 500 as a result of some invite events
|
||||
* Send-to-device messages should now be ordered more reliably and the last position in the stream updated correctly
|
||||
* Parsing of appservice configuration files is now less strict (contributed by [Kab1r](https://github.com/Kab1r))
|
||||
* The sync API should now identify shared users correctly when waking up for E2EE key changes
|
||||
* The federation `/state` endpoint will now return a HTTP 403 when the state before an event isn't known instead of a HTTP 500
|
||||
* Presence timestamps should now be calculated with the correct precision
|
||||
* A race condition in the roomserver's room info has been fixed
|
||||
* A race condition in the sync API has been fixed
|
||||
|
||||
## Dendrite 0.8.9 (2022-07-01)
|
||||
|
||||
### Features
|
||||
|
||||
* Incoming device list updates over federation are now queued in JetStream for processing so that they will no longer block incoming federation transactions and should never end up dropped, which will hopefully help E2EE reliability
|
||||
* The `/context` endpoint now returns `"start"` and `"end"` parameters to allow pagination from a context call
|
||||
* The `/messages` endpoint will no longer return `"end"` when there are no more messages remaining
|
||||
* Deactivated user accounts will now leave all rooms automatically
|
||||
* New admin endpoint `/_dendrite/admin/evacuateUser/{userID}` has been added for forcing a local user to leave all joined rooms
|
||||
* Dendrite will now automatically attempt to raise the file descriptor limit at startup if it is too low
|
||||
|
||||
### Fixes
|
||||
|
||||
* A rare crash when retrieving remote device lists has been fixed
|
||||
* Fixes a bug where events were not redacted properly over federation
|
||||
* The `/invite` endpoints will now return an error instead of silently proceeding if the user ID is obviously malformed
|
||||
|
||||
## Dendrite 0.8.8 (2022-06-09)
|
||||
|
||||
### Features
|
||||
|
||||
* The performance of state resolution has been increased significantly for larger rooms
|
||||
* A number of changes have been made to rate limiting:
|
||||
* Logged in users will now be rate-limited on a per-session basis rather than by remote IP
|
||||
* Rate limiting no longer applies to admin or appservice users
|
||||
* It is now possible to configure additional users that are exempt from rate limiting using the `exempt_user_ids` option in the `rate_limiting` section of the Dendrite config
|
||||
* Setting state is now idempotent via the client API state endpoints
|
||||
|
||||
### Fixes
|
||||
|
||||
* Room upgrades now properly propagate tombstone events to remote servers
|
||||
* Room upgrades will no longer send tombstone events if creating the upgraded room fails
|
||||
* A crash has been fixed when evaluating restricted room joins
|
||||
|
||||
## Dendrite 0.8.7 (2022-06-01)
|
||||
|
||||
### Features
|
||||
|
||||
* Support added for room version 10
|
||||
|
||||
### Fixes
|
||||
|
||||
* A number of state handling bugs have been fixed, which previously resulted in missing state events, unexpected state deletions, reverted memberships and unexpectedly rejected/soft-failed events in some specific cases
|
||||
* Fixed destination queue performance issues as a result of missing indexes, which speeds up outbound federation considerably
|
||||
* A bug which could cause the `/register` endpoint to return HTTP 500 has been fixed
|
||||
|
||||
## Dendrite 0.8.6 (2022-05-26)
|
||||
|
||||
### Features
|
||||
|
||||
* Room versions 8 and 9 are now marked as stable
|
||||
* Dendrite can now assist remote users to join restricted rooms via `/make_join` and `/send_join`
|
||||
|
||||
### Fixes
|
||||
|
||||
* The sync API no longer returns immediately on `/sync` requests unnecessarily if it can be avoided
|
||||
* A race condition has been fixed in the sync API when updating presence via `/sync`
|
||||
* A race condition has been fixed sending E2EE keys to remote servers over federation when joining rooms
|
||||
* The `trusted_private_chat` preset should now grant power level 100 to all participant users, which should improve the user experience of direct messages
|
||||
* Invited users are now authed correctly in restricted rooms
|
||||
* The `join_authorised_by_users_server` key is now correctly stripped in restricted rooms when updating the membership event
|
||||
* Appservices should now receive invite events correctly
|
||||
* Device list updates should no longer contain optional fields with `null` values
|
||||
* The `/deactivate` endpoint has been fixed to no longer confuse Element with incorrect completed flows
|
||||
|
||||
## Dendrite 0.8.5 (2022-05-13)
|
||||
|
||||
### Features
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ If you have further questions, please take a look at [our FAQ](docs/FAQ.md) or j
|
|||
|
||||
## Requirements
|
||||
|
||||
To build Dendrite, you will need Go 1.16 or later.
|
||||
To build Dendrite, you will need Go 1.18 or later.
|
||||
|
||||
For a usable federating Dendrite deployment, you will also need:
|
||||
|
||||
|
|
@ -96,10 +96,9 @@ than features that massive deployments may be interested in (User Directory, Ope
|
|||
This means Dendrite supports amongst others:
|
||||
|
||||
- Core room functionality (creating rooms, invites, auth rules)
|
||||
- Full support for room versions 1 to 7
|
||||
- Experimental support for room versions 8 to 9
|
||||
- Room versions 1 to 10 supported
|
||||
- Backfilling locally and via federation
|
||||
- Accounts, Profiles and Devices
|
||||
- Accounts, profiles and devices
|
||||
- Published room lists
|
||||
- Typing
|
||||
- Media APIs
|
||||
|
|
|
|||
|
|
@ -83,29 +83,38 @@ func (s *OutputRoomEventConsumer) onMessage(ctx context.Context, msg *nats.Msg)
|
|||
return true
|
||||
}
|
||||
|
||||
if output.Type != api.OutputTypeNewRoomEvent || output.NewRoomEvent == nil {
|
||||
return true
|
||||
}
|
||||
log.WithFields(log.Fields{
|
||||
"type": output.Type,
|
||||
}).Debug("Got a message in OutputRoomEventConsumer")
|
||||
|
||||
newEventID := output.NewRoomEvent.Event.EventID()
|
||||
events := make([]*gomatrixserverlib.HeaderedEvent, 0, len(output.NewRoomEvent.AddsStateEventIDs))
|
||||
events = append(events, output.NewRoomEvent.Event)
|
||||
if len(output.NewRoomEvent.AddsStateEventIDs) > 0 {
|
||||
eventsReq := &api.QueryEventsByIDRequest{
|
||||
EventIDs: make([]string, 0, len(output.NewRoomEvent.AddsStateEventIDs)),
|
||||
}
|
||||
eventsRes := &api.QueryEventsByIDResponse{}
|
||||
for _, eventID := range output.NewRoomEvent.AddsStateEventIDs {
|
||||
if eventID != newEventID {
|
||||
eventsReq.EventIDs = append(eventsReq.EventIDs, eventID)
|
||||
events := []*gomatrixserverlib.HeaderedEvent{}
|
||||
if output.Type == api.OutputTypeNewRoomEvent && output.NewRoomEvent != nil {
|
||||
newEventID := output.NewRoomEvent.Event.EventID()
|
||||
events = append(events, output.NewRoomEvent.Event)
|
||||
if len(output.NewRoomEvent.AddsStateEventIDs) > 0 {
|
||||
eventsReq := &api.QueryEventsByIDRequest{
|
||||
EventIDs: make([]string, 0, len(output.NewRoomEvent.AddsStateEventIDs)),
|
||||
}
|
||||
eventsRes := &api.QueryEventsByIDResponse{}
|
||||
for _, eventID := range output.NewRoomEvent.AddsStateEventIDs {
|
||||
if eventID != newEventID {
|
||||
eventsReq.EventIDs = append(eventsReq.EventIDs, eventID)
|
||||
}
|
||||
}
|
||||
if len(eventsReq.EventIDs) > 0 {
|
||||
if err := s.rsAPI.QueryEventsByID(s.ctx, eventsReq, eventsRes); err != nil {
|
||||
return false
|
||||
}
|
||||
events = append(events, eventsRes.Events...)
|
||||
}
|
||||
}
|
||||
if len(eventsReq.EventIDs) > 0 {
|
||||
if err := s.rsAPI.QueryEventsByID(s.ctx, eventsReq, eventsRes); err != nil {
|
||||
return false
|
||||
}
|
||||
events = append(events, eventsRes.Events...)
|
||||
}
|
||||
} else if output.Type == api.OutputTypeNewInviteEvent && output.NewInviteEvent != nil {
|
||||
events = append(events, output.NewInviteEvent.Event)
|
||||
} else {
|
||||
log.WithFields(log.Fields{
|
||||
"type": output.Type,
|
||||
}).Debug("appservice OutputRoomEventConsumer ignoring event", string(msg.Data))
|
||||
return true
|
||||
}
|
||||
|
||||
// Send event to any relevant application services
|
||||
|
|
|
|||
2
build.sh
2
build.sh
|
|
@ -21,4 +21,4 @@ mkdir -p bin
|
|||
|
||||
CGO_ENABLED=1 go build -trimpath -ldflags "$FLAGS" -v -o "bin/" ./cmd/...
|
||||
|
||||
CGO_ENABLED=0 GOOS=js GOARCH=wasm go build -trimpath -ldflags "$FLAGS" -o bin/main.wasm ./cmd/dendritejs-pinecone
|
||||
# CGO_ENABLED=0 GOOS=js GOARCH=wasm go build -trimpath -ldflags "$FLAGS" -o bin/main.wasm ./cmd/dendritejs-pinecone
|
||||
|
|
|
|||
|
|
@ -8,7 +8,6 @@ COPY . /build
|
|||
|
||||
RUN mkdir -p bin
|
||||
RUN go build -trimpath -o bin/ ./cmd/dendrite-monolith-server
|
||||
RUN go build -trimpath -o bin/ ./cmd/goose
|
||||
RUN go build -trimpath -o bin/ ./cmd/create-account
|
||||
RUN go build -trimpath -o bin/ ./cmd/generate-keys
|
||||
|
||||
|
|
|
|||
|
|
@ -8,7 +8,6 @@ COPY . /build
|
|||
|
||||
RUN mkdir -p bin
|
||||
RUN go build -trimpath -o bin/ ./cmd/dendrite-polylith-multi
|
||||
RUN go build -trimpath -o bin/ ./cmd/goose
|
||||
RUN go build -trimpath -o bin/ ./cmd/create-account
|
||||
RUN go build -trimpath -o bin/ ./cmd/generate-keys
|
||||
|
||||
|
|
|
|||
|
|
@ -239,7 +239,7 @@ func (m *DendriteMonolith) Start() {
|
|||
m.PineconeRouter = pineconeRouter.NewRouter(logrus.WithField("pinecone", "router"), sk, false)
|
||||
m.PineconeQUIC = pineconeSessions.NewSessions(logrus.WithField("pinecone", "sessions"), m.PineconeRouter, []string{"matrix"})
|
||||
m.PineconeMulticast = pineconeMulticast.NewMulticast(logrus.WithField("pinecone", "multicast"), m.PineconeRouter)
|
||||
m.PineconeManager = pineconeConnections.NewConnectionManager(m.PineconeRouter)
|
||||
m.PineconeManager = pineconeConnections.NewConnectionManager(m.PineconeRouter, nil)
|
||||
|
||||
prefix := hex.EncodeToString(pk)
|
||||
cfg := &config.Dendrite{}
|
||||
|
|
@ -261,7 +261,7 @@ func (m *DendriteMonolith) Start() {
|
|||
cfg.MSCs.MSCs = []string{"msc2836", "msc2946"}
|
||||
cfg.ClientAPI.RegistrationDisabled = false
|
||||
cfg.ClientAPI.OpenRegistrationWithoutVerificationEnabled = true
|
||||
if err := cfg.Derive(); err != nil {
|
||||
if err = cfg.Derive(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
|
|
@ -342,11 +342,23 @@ func (m *DendriteMonolith) Start() {
|
|||
|
||||
go func() {
|
||||
m.logger.Info("Listening on ", cfg.Global.ServerName)
|
||||
m.logger.Fatal(m.httpServer.Serve(m.PineconeQUIC.Protocol("matrix")))
|
||||
|
||||
switch m.httpServer.Serve(m.PineconeQUIC.Protocol("matrix")) {
|
||||
case net.ErrClosed, http.ErrServerClosed:
|
||||
m.logger.Info("Stopped listening on ", cfg.Global.ServerName)
|
||||
default:
|
||||
m.logger.Fatal(err)
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
logrus.Info("Listening on ", m.listener.Addr())
|
||||
logrus.Fatal(http.Serve(m.listener, httpRouter))
|
||||
|
||||
switch http.Serve(m.listener, httpRouter) {
|
||||
case net.ErrClosed, http.ErrServerClosed:
|
||||
m.logger.Info("Stopped listening on ", cfg.Global.ServerName)
|
||||
default:
|
||||
m.logger.Fatal(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -170,11 +170,11 @@ func (m *DendriteMonolith) Start() {
|
|||
|
||||
go func() {
|
||||
m.logger.Info("Listening on ", ygg.DerivedServerName())
|
||||
m.logger.Fatal(m.httpServer.Serve(ygg))
|
||||
m.logger.Error(m.httpServer.Serve(ygg))
|
||||
}()
|
||||
go func() {
|
||||
logrus.Info("Listening on ", m.listener.Addr())
|
||||
logrus.Fatal(http.Serve(m.listener, httpRouter))
|
||||
logrus.Error(http.Serve(m.listener, httpRouter))
|
||||
}()
|
||||
go func() {
|
||||
logrus.Info("Sending wake-up message to known nodes")
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.16-stretch as build
|
||||
FROM golang:1.18-stretch as build
|
||||
RUN apt-get update && apt-get install -y sqlite3
|
||||
WORKDIR /build
|
||||
|
||||
|
|
@ -27,6 +27,6 @@ EXPOSE 8008 8448
|
|||
# At runtime, generate TLS cert based on the CA now mounted at /ca
|
||||
# At runtime, replace the SERVER_NAME with what we are told
|
||||
CMD ./generate-keys --server $SERVER_NAME --tls-cert server.crt --tls-key server.key --tls-authority-cert /complement/ca/ca.crt --tls-authority-key /complement/ca/ca.key && \
|
||||
./generate-config -server $SERVER_NAME --ci > dendrite.yaml && \
|
||||
cp /complement/ca/ca.crt /usr/local/share/ca-certificates/ && update-ca-certificates && \
|
||||
./dendrite-monolith-server --really-enable-open-registration --tls-cert server.crt --tls-key server.key --config dendrite.yaml -api=${API:-0}
|
||||
./generate-config -server $SERVER_NAME --ci > dendrite.yaml && \
|
||||
cp /complement/ca/ca.crt /usr/local/share/ca-certificates/ && update-ca-certificates && \
|
||||
./dendrite-monolith-server --really-enable-open-registration --tls-cert server.crt --tls-key server.key --config dendrite.yaml -api=${API:-0}
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
#
|
||||
# Use these mounts to make use of this dockerfile:
|
||||
# COMPLEMENT_HOST_MOUNTS='/your/local/dendrite:/dendrite:ro;/your/go/path:/go:ro'
|
||||
FROM golang:1.16-stretch
|
||||
FROM golang:1.18-stretch
|
||||
RUN apt-get update && apt-get install -y sqlite3
|
||||
|
||||
WORKDIR /runtime
|
||||
|
|
@ -16,24 +16,24 @@ EXPOSE 8008 8448
|
|||
|
||||
# This script compiles Dendrite for us.
|
||||
RUN echo '\
|
||||
#!/bin/bash -eux \n\
|
||||
if test -f "/runtime/dendrite-monolith-server"; then \n\
|
||||
#!/bin/bash -eux \n\
|
||||
if test -f "/runtime/dendrite-monolith-server"; then \n\
|
||||
echo "Skipping compilation; binaries exist" \n\
|
||||
exit 0 \n\
|
||||
fi \n\
|
||||
cd /dendrite \n\
|
||||
go build -v -o /runtime /dendrite/cmd/dendrite-monolith-server \n\
|
||||
' > compile.sh && chmod +x compile.sh
|
||||
fi \n\
|
||||
cd /dendrite \n\
|
||||
go build -v -o /runtime /dendrite/cmd/dendrite-monolith-server \n\
|
||||
' > compile.sh && chmod +x compile.sh
|
||||
|
||||
# This script runs Dendrite for us. Must be run in the /runtime directory.
|
||||
RUN echo '\
|
||||
#!/bin/bash -eu \n\
|
||||
./generate-keys --private-key matrix_key.pem \n\
|
||||
./generate-keys --server $SERVER_NAME --tls-cert server.crt --tls-key server.key --tls-authority-cert /complement/ca/ca.crt --tls-authority-key /complement/ca/ca.key \n\
|
||||
./generate-config -server $SERVER_NAME --ci > dendrite.yaml \n\
|
||||
cp /complement/ca/ca.crt /usr/local/share/ca-certificates/ && update-ca-certificates \n\
|
||||
./dendrite-monolith-server --really-enable-open-registration --tls-cert server.crt --tls-key server.key --config dendrite.yaml \n\
|
||||
' > run.sh && chmod +x run.sh
|
||||
#!/bin/bash -eu \n\
|
||||
./generate-keys --private-key matrix_key.pem \n\
|
||||
./generate-keys --server $SERVER_NAME --tls-cert server.crt --tls-key server.key --tls-authority-cert /complement/ca/ca.crt --tls-authority-key /complement/ca/ca.key \n\
|
||||
./generate-config -server $SERVER_NAME --ci > dendrite.yaml \n\
|
||||
cp /complement/ca/ca.crt /usr/local/share/ca-certificates/ && update-ca-certificates \n\
|
||||
./dendrite-monolith-server --really-enable-open-registration --tls-cert server.crt --tls-key server.key --config dendrite.yaml \n\
|
||||
' > run.sh && chmod +x run.sh
|
||||
|
||||
|
||||
WORKDIR /cache
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.16-stretch as build
|
||||
FROM golang:1.18-stretch as build
|
||||
RUN apt-get update && apt-get install -y postgresql
|
||||
WORKDIR /build
|
||||
|
||||
|
|
@ -9,16 +9,16 @@ RUN sed -i "s%127.0.0.1/32 md5%127.0.0.1/32 trust%g" /etc/
|
|||
|
||||
# This entry script starts postgres, waits for it to be up then starts dendrite
|
||||
RUN echo '\
|
||||
#!/bin/bash -eu \n\
|
||||
pg_lsclusters \n\
|
||||
pg_ctlcluster 9.6 main start \n\
|
||||
\n\
|
||||
until pg_isready \n\
|
||||
do \n\
|
||||
echo "Waiting for postgres"; \n\
|
||||
sleep 1; \n\
|
||||
done \n\
|
||||
' > run_postgres.sh && chmod +x run_postgres.sh
|
||||
#!/bin/bash -eu \n\
|
||||
pg_lsclusters \n\
|
||||
pg_ctlcluster 9.6 main start \n\
|
||||
\n\
|
||||
until pg_isready \n\
|
||||
do \n\
|
||||
echo "Waiting for postgres"; \n\
|
||||
sleep 1; \n\
|
||||
done \n\
|
||||
' > run_postgres.sh && chmod +x run_postgres.sh
|
||||
|
||||
# we will dump the binaries and config file to this location to ensure any local untracked files
|
||||
# that come from the COPY . . file don't contaminate the build
|
||||
|
|
@ -46,9 +46,9 @@ EXPOSE 8008 8448
|
|||
# At runtime, generate TLS cert based on the CA now mounted at /ca
|
||||
# At runtime, replace the SERVER_NAME with what we are told
|
||||
CMD /build/run_postgres.sh && ./generate-keys --server $SERVER_NAME --tls-cert server.crt --tls-key server.key --tls-authority-cert /complement/ca/ca.crt --tls-authority-key /complement/ca/ca.key && \
|
||||
./generate-config -server $SERVER_NAME --ci > dendrite.yaml && \
|
||||
# Replace the connection string with a single postgres DB, using user/db = 'postgres' and no password, bump max_conns
|
||||
sed -i "s%connection_string:.*$%connection_string: postgresql://postgres@localhost/postgres?sslmode=disable%g" dendrite.yaml && \
|
||||
sed -i 's/max_open_conns:.*$/max_open_conns: 100/g' dendrite.yaml && \
|
||||
cp /complement/ca/ca.crt /usr/local/share/ca-certificates/ && update-ca-certificates && \
|
||||
./dendrite-monolith-server --really-enable-open-registration --tls-cert server.crt --tls-key server.key --config dendrite.yaml -api=${API:-0}
|
||||
./generate-config -server $SERVER_NAME --ci > dendrite.yaml && \
|
||||
# Replace the connection string with a single postgres DB, using user/db = 'postgres' and no password, bump max_conns
|
||||
sed -i "s%connection_string:.*$%connection_string: postgresql://postgres@localhost/postgres?sslmode=disable%g" dendrite.yaml && \
|
||||
sed -i 's/max_open_conns:.*$/max_open_conns: 100/g' dendrite.yaml && \
|
||||
cp /complement/ca/ca.crt /usr/local/share/ca-certificates/ && update-ca-certificates && \
|
||||
./dendrite-monolith-server --really-enable-open-registration --tls-cert server.crt --tls-key server.key --config dendrite.yaml -api=${API:-0}
|
||||
|
|
@ -102,8 +102,7 @@ type userInteractiveFlow struct {
|
|||
// the user already has a valid access token, but we want to double-check
|
||||
// that it isn't stolen by re-authenticating them.
|
||||
type UserInteractive struct {
|
||||
Completed []string
|
||||
Flows []userInteractiveFlow
|
||||
Flows []userInteractiveFlow
|
||||
// Map of login type to implementation
|
||||
Types map[string]Type
|
||||
// Map of session ID to completed login types, will need to be extended in future
|
||||
|
|
@ -116,7 +115,6 @@ func NewUserInteractive(userAccountAPI api.UserLoginAPI, cfg *config.ClientAPI)
|
|||
Config: cfg,
|
||||
}
|
||||
return &UserInteractive{
|
||||
Completed: []string{},
|
||||
Flows: []userInteractiveFlow{
|
||||
{
|
||||
Stages: []string{typePassword.Name()},
|
||||
|
|
@ -140,7 +138,6 @@ func (u *UserInteractive) IsSingleStageFlow(authType string) bool {
|
|||
|
||||
func (u *UserInteractive) AddCompletedStage(sessionID, authType string) {
|
||||
// TODO: Handle multi-stage flows
|
||||
u.Completed = append(u.Completed, authType)
|
||||
delete(u.Sessions, sessionID)
|
||||
}
|
||||
|
||||
|
|
@ -157,7 +154,7 @@ func (u *UserInteractive) Challenge(sessionID string) *util.JSONResponse {
|
|||
return &util.JSONResponse{
|
||||
Code: 401,
|
||||
JSON: Challenge{
|
||||
Completed: u.Completed,
|
||||
Completed: u.Sessions[sessionID],
|
||||
Flows: u.Flows,
|
||||
Session: sessionID,
|
||||
Params: make(map[string]interface{}),
|
||||
|
|
|
|||
|
|
@ -187,3 +187,38 @@ func TestUserInteractivePasswordBadLogin(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUserInteractive_AddCompletedStage(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
sessionID string
|
||||
}{
|
||||
{
|
||||
name: "first user",
|
||||
sessionID: util.RandomString(8),
|
||||
},
|
||||
{
|
||||
name: "second user",
|
||||
sessionID: util.RandomString(8),
|
||||
},
|
||||
{
|
||||
name: "third user",
|
||||
sessionID: util.RandomString(8),
|
||||
},
|
||||
}
|
||||
u := setup()
|
||||
ctx := context.Background()
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, resp := u.Verify(ctx, []byte("{}"), nil)
|
||||
challenge, ok := resp.JSON.(Challenge)
|
||||
if !ok {
|
||||
t.Fatalf("expected a Challenge, got %T", resp.JSON)
|
||||
}
|
||||
if len(challenge.Completed) > 0 {
|
||||
t.Fatalf("expected 0 completed stages, got %d", len(challenge.Completed))
|
||||
}
|
||||
u.AddCompletedStage(tt.sessionID, "")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -48,7 +48,6 @@ func AddPublicRoutes(
|
|||
|
||||
syncProducer := &producers.SyncAPIProducer{
|
||||
JetStream: js,
|
||||
TopicClientData: cfg.Matrix.JetStream.Prefixed(jetstream.OutputClientData),
|
||||
TopicReceiptEvent: cfg.Matrix.JetStream.Prefixed(jetstream.OutputReceiptEvent),
|
||||
TopicSendToDeviceEvent: cfg.Matrix.JetStream.Prefixed(jetstream.OutputSendToDeviceEvent),
|
||||
TopicTypingEvent: cfg.Matrix.JetStream.Prefixed(jetstream.OutputTypingEvent),
|
||||
|
|
@ -59,6 +58,7 @@ func AddPublicRoutes(
|
|||
|
||||
routing.Setup(
|
||||
base.PublicClientAPIMux,
|
||||
base.PublicWellKnownAPIMux,
|
||||
base.SynapseAdminMux,
|
||||
base.DendriteAdminMux,
|
||||
cfg, rsAPI, asAPI,
|
||||
|
|
|
|||
|
|
@ -154,6 +154,12 @@ func MissingParam(msg string) *MatrixError {
|
|||
return &MatrixError{"M_MISSING_PARAM", msg}
|
||||
}
|
||||
|
||||
// UnableToAuthoriseJoin is an error that is returned when a server can't
|
||||
// determine whether to allow a restricted join or not.
|
||||
func UnableToAuthoriseJoin(msg string) *MatrixError {
|
||||
return &MatrixError{"M_UNABLE_TO_AUTHORISE_JOIN", msg}
|
||||
}
|
||||
|
||||
// LeaveServerNoticeError is an error returned when trying to reject an invite
|
||||
// for a server notice room.
|
||||
func LeaveServerNoticeError() *MatrixError {
|
||||
|
|
|
|||
|
|
@ -17,10 +17,10 @@ package producers
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/matrix-org/dendrite/internal/eventutil"
|
||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||
"github.com/matrix-org/dendrite/syncapi/types"
|
||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
|
|
@ -31,7 +31,6 @@ import (
|
|||
|
||||
// SyncAPIProducer produces events for the sync API server to consume
|
||||
type SyncAPIProducer struct {
|
||||
TopicClientData string
|
||||
TopicReceiptEvent string
|
||||
TopicSendToDeviceEvent string
|
||||
TopicTypingEvent string
|
||||
|
|
@ -41,36 +40,6 @@ type SyncAPIProducer struct {
|
|||
UserAPI userapi.ClientUserAPI
|
||||
}
|
||||
|
||||
// SendData sends account data to the sync API server
|
||||
func (p *SyncAPIProducer) SendData(userID string, roomID string, dataType string, readMarker *eventutil.ReadMarkerJSON, ignoredUsers *types.IgnoredUsers) error {
|
||||
m := &nats.Msg{
|
||||
Subject: p.TopicClientData,
|
||||
Header: nats.Header{},
|
||||
}
|
||||
m.Header.Set(jetstream.UserID, userID)
|
||||
|
||||
data := eventutil.AccountData{
|
||||
RoomID: roomID,
|
||||
Type: dataType,
|
||||
ReadMarker: readMarker,
|
||||
IgnoredUsers: ignoredUsers,
|
||||
}
|
||||
var err error
|
||||
m.Data, err = json.Marshal(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"user_id": userID,
|
||||
"room_id": roomID,
|
||||
"data_type": dataType,
|
||||
}).Tracef("Producing to topic '%s'", p.TopicClientData)
|
||||
|
||||
_, err = p.JetStream.PublishMsg(m)
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *SyncAPIProducer) SendReceipt(
|
||||
ctx context.Context,
|
||||
userID, roomID, eventID, receiptType string, timestamp gomatrixserverlib.Timestamp,
|
||||
|
|
@ -83,7 +52,7 @@ func (p *SyncAPIProducer) SendReceipt(
|
|||
m.Header.Set(jetstream.RoomID, roomID)
|
||||
m.Header.Set(jetstream.EventID, eventID)
|
||||
m.Header.Set("type", receiptType)
|
||||
m.Header.Set("timestamp", strconv.Itoa(int(timestamp)))
|
||||
m.Header.Set("timestamp", fmt.Sprintf("%d", timestamp))
|
||||
|
||||
log.WithFields(log.Fields{}).Tracef("Producing to topic '%s'", p.TopicReceiptEvent)
|
||||
_, err := p.JetStream.PublishMsg(m, nats.Context(ctx))
|
||||
|
|
|
|||
|
|
@ -25,7 +25,6 @@ import (
|
|||
"github.com/matrix-org/dendrite/clientapi/producers"
|
||||
"github.com/matrix-org/dendrite/internal/eventutil"
|
||||
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/syncapi/types"
|
||||
"github.com/matrix-org/dendrite/userapi/api"
|
||||
|
||||
"github.com/matrix-org/util"
|
||||
|
|
@ -127,18 +126,6 @@ func SaveAccountData(
|
|||
return util.ErrorResponse(err)
|
||||
}
|
||||
|
||||
var ignoredUsers *types.IgnoredUsers
|
||||
if dataType == "m.ignored_user_list" {
|
||||
ignoredUsers = &types.IgnoredUsers{}
|
||||
_ = json.Unmarshal(body, ignoredUsers)
|
||||
}
|
||||
|
||||
// TODO: user API should do this since it's account data
|
||||
if err := syncProducer.SendData(userID, roomID, dataType, nil, ignoredUsers); err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("syncProducer.SendData failed")
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusOK,
|
||||
JSON: struct{}{},
|
||||
|
|
@ -191,11 +178,6 @@ func SaveReadMarker(
|
|||
return util.ErrorResponse(err)
|
||||
}
|
||||
|
||||
if err := syncProducer.SendData(device.UserID, roomID, "m.fully_read", &r, nil); err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("syncProducer.SendData failed")
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
||||
// Handle the read receipt that may be included in the read marker
|
||||
if r.Read != "" {
|
||||
return SetReceipt(req, syncProducer, device, roomID, "m.read", r.Read)
|
||||
|
|
|
|||
|
|
@ -47,3 +47,40 @@ func AdminEvacuateRoom(req *http.Request, device *userapi.Device, rsAPI roomserv
|
|||
},
|
||||
}
|
||||
}
|
||||
|
||||
func AdminEvacuateUser(req *http.Request, device *userapi.Device, rsAPI roomserverAPI.ClientRoomserverAPI) util.JSONResponse {
|
||||
if device.AccountType != userapi.AccountTypeAdmin {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusForbidden,
|
||||
JSON: jsonerror.Forbidden("This API can only be used by admin users."),
|
||||
}
|
||||
}
|
||||
vars, err := httputil.URLDecodeMapValues(mux.Vars(req))
|
||||
if err != nil {
|
||||
return util.ErrorResponse(err)
|
||||
}
|
||||
userID, ok := vars["userID"]
|
||||
if !ok {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.MissingArgument("Expecting user ID."),
|
||||
}
|
||||
}
|
||||
res := &roomserverAPI.PerformAdminEvacuateUserResponse{}
|
||||
rsAPI.PerformAdminEvacuateUser(
|
||||
req.Context(),
|
||||
&roomserverAPI.PerformAdminEvacuateUserRequest{
|
||||
UserID: userID,
|
||||
},
|
||||
res,
|
||||
)
|
||||
if err := res.Error; err != nil {
|
||||
return err.JSONResponse()
|
||||
}
|
||||
return util.JSONResponse{
|
||||
Code: 200,
|
||||
JSON: map[string]interface{}{
|
||||
"affected": res.Affected,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ func GetAliases(
|
|||
return util.ErrorResponse(fmt.Errorf("rsAPI.QueryCurrentState: %w", err))
|
||||
}
|
||||
|
||||
visibility := "invite"
|
||||
visibility := gomatrixserverlib.HistoryVisibilityInvited
|
||||
if historyVisEvent, ok := stateRes.StateEvents[stateTuple]; ok {
|
||||
var err error
|
||||
visibility, err = historyVisEvent.HistoryVisibility()
|
||||
|
|
|
|||
|
|
@ -245,7 +245,9 @@ func createRoom(
|
|||
case presetTrustedPrivateChat:
|
||||
joinRuleContent.JoinRule = gomatrixserverlib.Invite
|
||||
historyVisibilityContent.HistoryVisibility = historyVisibilityShared
|
||||
// TODO If trusted_private_chat, all invitees are given the same power level as the room creator.
|
||||
for _, invitee := range r.Invite {
|
||||
powerLevelContent.Users[invitee] = 100
|
||||
}
|
||||
case presetPublicChat:
|
||||
joinRuleContent.JoinRule = gomatrixserverlib.Public
|
||||
historyVisibilityContent.HistoryVisibility = historyVisibilityShared
|
||||
|
|
|
|||
|
|
@ -29,9 +29,10 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/tidwall/gjson"
|
||||
|
||||
"github.com/matrix-org/dendrite/internal/eventutil"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/tidwall/gjson"
|
||||
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/gomatrixserverlib/tokens"
|
||||
|
|
@ -68,9 +69,10 @@ const (
|
|||
// It shouldn't be passed by value because it contains a mutex.
|
||||
type sessionsDict struct {
|
||||
sync.RWMutex
|
||||
sessions map[string][]authtypes.LoginType
|
||||
params map[string]registerRequest
|
||||
timer map[string]*time.Timer
|
||||
sessions map[string][]authtypes.LoginType
|
||||
sessionCompletedResult map[string]registerResponse
|
||||
params map[string]registerRequest
|
||||
timer map[string]*time.Timer
|
||||
// deleteSessionToDeviceID protects requests to DELETE /devices/{deviceID} from being abused.
|
||||
// If a UIA session is started by trying to delete device1, and then UIA is completed by deleting device2,
|
||||
// the delete request will fail for device2 since the UIA was initiated by trying to delete device1.
|
||||
|
|
@ -115,6 +117,7 @@ func (d *sessionsDict) deleteSession(sessionID string) {
|
|||
delete(d.params, sessionID)
|
||||
delete(d.sessions, sessionID)
|
||||
delete(d.deleteSessionToDeviceID, sessionID)
|
||||
delete(d.sessionCompletedResult, sessionID)
|
||||
// stop the timer, e.g. because the registration was completed
|
||||
if t, ok := d.timer[sessionID]; ok {
|
||||
if !t.Stop() {
|
||||
|
|
@ -130,6 +133,7 @@ func (d *sessionsDict) deleteSession(sessionID string) {
|
|||
func newSessionsDict() *sessionsDict {
|
||||
return &sessionsDict{
|
||||
sessions: make(map[string][]authtypes.LoginType),
|
||||
sessionCompletedResult: make(map[string]registerResponse),
|
||||
params: make(map[string]registerRequest),
|
||||
timer: make(map[string]*time.Timer),
|
||||
deleteSessionToDeviceID: make(map[string]string),
|
||||
|
|
@ -173,6 +177,19 @@ func (d *sessionsDict) addDeviceToDelete(sessionID, deviceID string) {
|
|||
d.deleteSessionToDeviceID[sessionID] = deviceID
|
||||
}
|
||||
|
||||
func (d *sessionsDict) addCompletedRegistration(sessionID string, response registerResponse) {
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
d.sessionCompletedResult[sessionID] = response
|
||||
}
|
||||
|
||||
func (d *sessionsDict) getCompletedRegistration(sessionID string) (registerResponse, bool) {
|
||||
d.RLock()
|
||||
defer d.RUnlock()
|
||||
result, ok := d.sessionCompletedResult[sessionID]
|
||||
return result, ok
|
||||
}
|
||||
|
||||
func (d *sessionsDict) getDeviceToDelete(sessionID string) (string, bool) {
|
||||
d.RLock()
|
||||
defer d.RUnlock()
|
||||
|
|
@ -544,6 +561,14 @@ func Register(
|
|||
r.DeviceID = data.DeviceID
|
||||
r.InitialDisplayName = data.InitialDisplayName
|
||||
r.InhibitLogin = data.InhibitLogin
|
||||
// Check if the user already registered using this session, if so, return that result
|
||||
if response, ok := sessions.getCompletedRegistration(sessionID); ok {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusOK,
|
||||
JSON: response,
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
if resErr := httputil.UnmarshalJSON(reqBody, &r); resErr != nil {
|
||||
return *resErr
|
||||
|
|
@ -839,13 +864,6 @@ func completeRegistration(
|
|||
displayName, deviceID *string,
|
||||
accType userapi.AccountType,
|
||||
) util.JSONResponse {
|
||||
var registrationOK bool
|
||||
defer func() {
|
||||
if registrationOK {
|
||||
sessions.deleteSession(sessionID)
|
||||
}
|
||||
}()
|
||||
|
||||
if username == "" {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
|
|
@ -886,7 +904,6 @@ func completeRegistration(
|
|||
// Check whether inhibit_login option is set. If so, don't create an access
|
||||
// token or a device for this user
|
||||
if inhibitLogin {
|
||||
registrationOK = true
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusOK,
|
||||
JSON: registerResponse{
|
||||
|
|
@ -920,15 +937,17 @@ func completeRegistration(
|
|||
}
|
||||
}
|
||||
|
||||
registrationOK = true
|
||||
result := registerResponse{
|
||||
UserID: devRes.Device.UserID,
|
||||
AccessToken: devRes.Device.AccessToken,
|
||||
HomeServer: accRes.Account.ServerName,
|
||||
DeviceID: devRes.Device.ID,
|
||||
}
|
||||
sessions.addCompletedRegistration(sessionID, result)
|
||||
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusOK,
|
||||
JSON: registerResponse{
|
||||
UserID: devRes.Device.UserID,
|
||||
AccessToken: devRes.Device.AccessToken,
|
||||
HomeServer: accRes.Account.ServerName,
|
||||
DeviceID: devRes.Device.ID,
|
||||
},
|
||||
JSON: result,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -18,8 +18,6 @@ import (
|
|||
"encoding/json"
|
||||
"net/http"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/matrix-org/dendrite/clientapi/httputil"
|
||||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||
"github.com/matrix-org/dendrite/clientapi/producers"
|
||||
|
|
@ -98,10 +96,6 @@ func PutTag(
|
|||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
||||
if err = syncProducer.SendData(userID, roomID, "m.tag", nil, nil); err != nil {
|
||||
logrus.WithError(err).Error("Failed to send m.tag account data update to syncapi")
|
||||
}
|
||||
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusOK,
|
||||
JSON: struct{}{},
|
||||
|
|
@ -150,11 +144,6 @@ func DeleteTag(
|
|||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
||||
// TODO: user API should do this since it's account data
|
||||
if err := syncProducer.SendData(userID, roomID, "m.tag", nil, nil); err != nil {
|
||||
logrus.WithError(err).Error("Failed to send m.tag account data update to syncapi")
|
||||
}
|
||||
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusOK,
|
||||
JSON: struct{}{},
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ import (
|
|||
// applied:
|
||||
// nolint: gocyclo
|
||||
func Setup(
|
||||
publicAPIMux, synapseAdminRouter, dendriteAdminRouter *mux.Router,
|
||||
publicAPIMux, wkMux, synapseAdminRouter, dendriteAdminRouter *mux.Router,
|
||||
cfg *config.ClientAPI,
|
||||
rsAPI roomserverAPI.ClientRoomserverAPI,
|
||||
asAPI appserviceAPI.AppServiceInternalAPI,
|
||||
|
|
@ -74,6 +74,26 @@ func Setup(
|
|||
unstableFeatures["org.matrix."+msc] = true
|
||||
}
|
||||
|
||||
if cfg.Matrix.WellKnownClientName != "" {
|
||||
logrus.Infof("Setting m.homeserver base_url as %s at /.well-known/matrix/client", cfg.Matrix.WellKnownClientName)
|
||||
wkMux.Handle("/client", httputil.MakeExternalAPI("wellknown", func(r *http.Request) util.JSONResponse {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusOK,
|
||||
JSON: struct {
|
||||
HomeserverName struct {
|
||||
BaseUrl string `json:"base_url"`
|
||||
} `json:"m.homeserver"`
|
||||
}{
|
||||
HomeserverName: struct {
|
||||
BaseUrl string `json:"base_url"`
|
||||
}{
|
||||
BaseUrl: cfg.Matrix.WellKnownClientName,
|
||||
},
|
||||
},
|
||||
}
|
||||
})).Methods(http.MethodGet, http.MethodOptions)
|
||||
}
|
||||
|
||||
publicAPIMux.Handle("/versions",
|
||||
httputil.MakeExternalAPI("versions", func(req *http.Request) util.JSONResponse {
|
||||
return util.JSONResponse{
|
||||
|
|
@ -89,6 +109,9 @@ func Setup(
|
|||
"r0.4.0",
|
||||
"r0.5.0",
|
||||
"r0.6.1",
|
||||
"v1.0",
|
||||
"v1.1",
|
||||
"v1.2",
|
||||
}, UnstableFeatures: unstableFeatures},
|
||||
}
|
||||
}),
|
||||
|
|
@ -126,6 +149,12 @@ func Setup(
|
|||
}),
|
||||
).Methods(http.MethodGet, http.MethodOptions)
|
||||
|
||||
dendriteAdminRouter.Handle("/admin/evacuateUser/{userID}",
|
||||
httputil.MakeAuthAPI("admin_evacuate_user", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
return AdminEvacuateUser(req, device, rsAPI)
|
||||
}),
|
||||
).Methods(http.MethodGet, http.MethodOptions)
|
||||
|
||||
// server notifications
|
||||
if cfg.Matrix.ServerNotices.Enabled {
|
||||
logrus.Info("Enabling server notices at /_synapse/admin/v1/send_server_notice")
|
||||
|
|
@ -137,7 +166,7 @@ func Setup(
|
|||
synapseAdminRouter.Handle("/admin/v1/send_server_notice/{txnID}",
|
||||
httputil.MakeAuthAPI("send_server_notice", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
// not specced, but ensure we're rate limiting requests to this endpoint
|
||||
if r := rateLimits.Limit(req); r != nil {
|
||||
if r := rateLimits.Limit(req, device); r != nil {
|
||||
return *r
|
||||
}
|
||||
vars, err := httputil.URLDecodeMapValues(mux.Vars(req))
|
||||
|
|
@ -157,7 +186,7 @@ func Setup(
|
|||
synapseAdminRouter.Handle("/admin/v1/send_server_notice",
|
||||
httputil.MakeAuthAPI("send_server_notice", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
// not specced, but ensure we're rate limiting requests to this endpoint
|
||||
if r := rateLimits.Limit(req); r != nil {
|
||||
if r := rateLimits.Limit(req, device); r != nil {
|
||||
return *r
|
||||
}
|
||||
return SendServerNotice(
|
||||
|
|
@ -187,7 +216,7 @@ func Setup(
|
|||
).Methods(http.MethodPost, http.MethodOptions)
|
||||
v3mux.Handle("/join/{roomIDOrAlias}",
|
||||
httputil.MakeAuthAPI(gomatrixserverlib.Join, userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
if r := rateLimits.Limit(req); r != nil {
|
||||
if r := rateLimits.Limit(req, device); r != nil {
|
||||
return *r
|
||||
}
|
||||
vars, err := httputil.URLDecodeMapValues(mux.Vars(req))
|
||||
|
|
@ -203,7 +232,7 @@ func Setup(
|
|||
if mscCfg.Enabled("msc2753") {
|
||||
v3mux.Handle("/peek/{roomIDOrAlias}",
|
||||
httputil.MakeAuthAPI(gomatrixserverlib.Peek, userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
if r := rateLimits.Limit(req); r != nil {
|
||||
if r := rateLimits.Limit(req, device); r != nil {
|
||||
return *r
|
||||
}
|
||||
vars, err := httputil.URLDecodeMapValues(mux.Vars(req))
|
||||
|
|
@ -223,7 +252,7 @@ func Setup(
|
|||
).Methods(http.MethodGet, http.MethodOptions)
|
||||
v3mux.Handle("/rooms/{roomID}/join",
|
||||
httputil.MakeAuthAPI(gomatrixserverlib.Join, userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
if r := rateLimits.Limit(req); r != nil {
|
||||
if r := rateLimits.Limit(req, device); r != nil {
|
||||
return *r
|
||||
}
|
||||
vars, err := httputil.URLDecodeMapValues(mux.Vars(req))
|
||||
|
|
@ -237,7 +266,7 @@ func Setup(
|
|||
).Methods(http.MethodPost, http.MethodOptions)
|
||||
v3mux.Handle("/rooms/{roomID}/leave",
|
||||
httputil.MakeAuthAPI("membership", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
if r := rateLimits.Limit(req); r != nil {
|
||||
if r := rateLimits.Limit(req, device); r != nil {
|
||||
return *r
|
||||
}
|
||||
vars, err := httputil.URLDecodeMapValues(mux.Vars(req))
|
||||
|
|
@ -271,7 +300,7 @@ func Setup(
|
|||
).Methods(http.MethodPost, http.MethodOptions)
|
||||
v3mux.Handle("/rooms/{roomID}/invite",
|
||||
httputil.MakeAuthAPI("membership", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
if r := rateLimits.Limit(req); r != nil {
|
||||
if r := rateLimits.Limit(req, device); r != nil {
|
||||
return *r
|
||||
}
|
||||
vars, err := httputil.URLDecodeMapValues(mux.Vars(req))
|
||||
|
|
@ -389,14 +418,14 @@ func Setup(
|
|||
).Methods(http.MethodPut, http.MethodOptions)
|
||||
|
||||
v3mux.Handle("/register", httputil.MakeExternalAPI("register", func(req *http.Request) util.JSONResponse {
|
||||
if r := rateLimits.Limit(req); r != nil {
|
||||
if r := rateLimits.Limit(req, nil); r != nil {
|
||||
return *r
|
||||
}
|
||||
return Register(req, userAPI, cfg)
|
||||
})).Methods(http.MethodPost, http.MethodOptions)
|
||||
|
||||
v3mux.Handle("/register/available", httputil.MakeExternalAPI("registerAvailable", func(req *http.Request) util.JSONResponse {
|
||||
if r := rateLimits.Limit(req); r != nil {
|
||||
if r := rateLimits.Limit(req, nil); r != nil {
|
||||
return *r
|
||||
}
|
||||
return RegisterAvailable(req, cfg, userAPI)
|
||||
|
|
@ -470,7 +499,7 @@ func Setup(
|
|||
|
||||
v3mux.Handle("/rooms/{roomID}/typing/{userID}",
|
||||
httputil.MakeAuthAPI("rooms_typing", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
if r := rateLimits.Limit(req); r != nil {
|
||||
if r := rateLimits.Limit(req, device); r != nil {
|
||||
return *r
|
||||
}
|
||||
vars, err := httputil.URLDecodeMapValues(mux.Vars(req))
|
||||
|
|
@ -527,7 +556,7 @@ func Setup(
|
|||
|
||||
v3mux.Handle("/account/whoami",
|
||||
httputil.MakeAuthAPI("whoami", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
if r := rateLimits.Limit(req); r != nil {
|
||||
if r := rateLimits.Limit(req, device); r != nil {
|
||||
return *r
|
||||
}
|
||||
return Whoami(req, device)
|
||||
|
|
@ -536,7 +565,7 @@ func Setup(
|
|||
|
||||
v3mux.Handle("/account/password",
|
||||
httputil.MakeAuthAPI("password", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
if r := rateLimits.Limit(req); r != nil {
|
||||
if r := rateLimits.Limit(req, device); r != nil {
|
||||
return *r
|
||||
}
|
||||
return Password(req, userAPI, device, cfg)
|
||||
|
|
@ -545,7 +574,7 @@ func Setup(
|
|||
|
||||
v3mux.Handle("/account/deactivate",
|
||||
httputil.MakeAuthAPI("deactivate", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
if r := rateLimits.Limit(req); r != nil {
|
||||
if r := rateLimits.Limit(req, device); r != nil {
|
||||
return *r
|
||||
}
|
||||
return Deactivate(req, userInteractiveAuth, userAPI, device)
|
||||
|
|
@ -556,7 +585,7 @@ func Setup(
|
|||
|
||||
v3mux.Handle("/login",
|
||||
httputil.MakeExternalAPI("login", func(req *http.Request) util.JSONResponse {
|
||||
if r := rateLimits.Limit(req); r != nil {
|
||||
if r := rateLimits.Limit(req, nil); r != nil {
|
||||
return *r
|
||||
}
|
||||
return Login(req, userAPI, cfg)
|
||||
|
|
@ -664,7 +693,7 @@ func Setup(
|
|||
|
||||
v3mux.Handle("/pushrules/{scope}/{kind}/{ruleID}",
|
||||
httputil.MakeAuthAPI("push_rules", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
if r := rateLimits.Limit(req); r != nil {
|
||||
if r := rateLimits.Limit(req, device); r != nil {
|
||||
return *r
|
||||
}
|
||||
vars, err := httputil.URLDecodeMapValues(mux.Vars(req))
|
||||
|
|
@ -730,7 +759,7 @@ func Setup(
|
|||
|
||||
v3mux.Handle("/profile/{userID}/avatar_url",
|
||||
httputil.MakeAuthAPI("profile_avatar_url", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
if r := rateLimits.Limit(req); r != nil {
|
||||
if r := rateLimits.Limit(req, device); r != nil {
|
||||
return *r
|
||||
}
|
||||
vars, err := httputil.URLDecodeMapValues(mux.Vars(req))
|
||||
|
|
@ -755,7 +784,7 @@ func Setup(
|
|||
|
||||
v3mux.Handle("/profile/{userID}/displayname",
|
||||
httputil.MakeAuthAPI("profile_displayname", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
if r := rateLimits.Limit(req); r != nil {
|
||||
if r := rateLimits.Limit(req, device); r != nil {
|
||||
return *r
|
||||
}
|
||||
vars, err := httputil.URLDecodeMapValues(mux.Vars(req))
|
||||
|
|
@ -794,7 +823,7 @@ func Setup(
|
|||
|
||||
v3mux.Handle("/voip/turnServer",
|
||||
httputil.MakeAuthAPI("turn_server", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
if r := rateLimits.Limit(req); r != nil {
|
||||
if r := rateLimits.Limit(req, device); r != nil {
|
||||
return *r
|
||||
}
|
||||
return RequestTurnServer(req, device, cfg)
|
||||
|
|
@ -873,7 +902,7 @@ func Setup(
|
|||
|
||||
v3mux.Handle("/user/{userID}/openid/request_token",
|
||||
httputil.MakeAuthAPI("openid_request_token", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
if r := rateLimits.Limit(req); r != nil {
|
||||
if r := rateLimits.Limit(req, device); r != nil {
|
||||
return *r
|
||||
}
|
||||
vars, err := httputil.URLDecodeMapValues(mux.Vars(req))
|
||||
|
|
@ -886,7 +915,7 @@ func Setup(
|
|||
|
||||
v3mux.Handle("/user_directory/search",
|
||||
httputil.MakeAuthAPI("userdirectory_search", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
if r := rateLimits.Limit(req); r != nil {
|
||||
if r := rateLimits.Limit(req, device); r != nil {
|
||||
return *r
|
||||
}
|
||||
postContent := struct {
|
||||
|
|
@ -932,7 +961,7 @@ func Setup(
|
|||
|
||||
v3mux.Handle("/rooms/{roomID}/read_markers",
|
||||
httputil.MakeAuthAPI("rooms_read_markers", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
if r := rateLimits.Limit(req); r != nil {
|
||||
if r := rateLimits.Limit(req, device); r != nil {
|
||||
return *r
|
||||
}
|
||||
vars, err := httputil.URLDecodeMapValues(mux.Vars(req))
|
||||
|
|
@ -945,7 +974,7 @@ func Setup(
|
|||
|
||||
v3mux.Handle("/rooms/{roomID}/forget",
|
||||
httputil.MakeAuthAPI("rooms_forget", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
if r := rateLimits.Limit(req); r != nil {
|
||||
if r := rateLimits.Limit(req, device); r != nil {
|
||||
return *r
|
||||
}
|
||||
vars, err := httputil.URLDecodeMapValues(mux.Vars(req))
|
||||
|
|
@ -1022,7 +1051,7 @@ func Setup(
|
|||
|
||||
v3mux.Handle("/pushers/set",
|
||||
httputil.MakeAuthAPI("set_pushers", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
if r := rateLimits.Limit(req); r != nil {
|
||||
if r := rateLimits.Limit(req, device); r != nil {
|
||||
return *r
|
||||
}
|
||||
return SetPusher(req, device, userAPI)
|
||||
|
|
@ -1080,7 +1109,7 @@ func Setup(
|
|||
|
||||
v3mux.Handle("/capabilities",
|
||||
httputil.MakeAuthAPI("capabilities", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
if r := rateLimits.Limit(req); r != nil {
|
||||
if r := rateLimits.Limit(req, device); r != nil {
|
||||
return *r
|
||||
}
|
||||
return GetCapabilities(req, rsAPI)
|
||||
|
|
@ -1296,7 +1325,7 @@ func Setup(
|
|||
).Methods(http.MethodPost, http.MethodOptions)
|
||||
v3mux.Handle("/rooms/{roomId}/receipt/{receiptType}/{eventId}",
|
||||
httputil.MakeAuthAPI(gomatrixserverlib.Join, userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
if r := rateLimits.Limit(req); r != nil {
|
||||
if r := rateLimits.Limit(req, device); r != nil {
|
||||
return *r
|
||||
}
|
||||
vars, err := httputil.URLDecodeMapValues(mux.Vars(req))
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
|
@ -96,14 +97,28 @@ func SendEvent(
|
|||
mutex.(*sync.Mutex).Lock()
|
||||
defer mutex.(*sync.Mutex).Unlock()
|
||||
|
||||
startedGeneratingEvent := time.Now()
|
||||
|
||||
var r map[string]interface{} // must be a JSON object
|
||||
resErr := httputil.UnmarshalJSONRequest(req, &r)
|
||||
if resErr != nil {
|
||||
return *resErr
|
||||
}
|
||||
|
||||
if stateKey != nil {
|
||||
// If the existing/new state content are equal, return the existing event_id, making the request idempotent.
|
||||
if resp := stateEqual(req.Context(), rsAPI, eventType, *stateKey, roomID, r); resp != nil {
|
||||
return *resp
|
||||
}
|
||||
}
|
||||
|
||||
startedGeneratingEvent := time.Now()
|
||||
|
||||
// If we're sending a membership update, make sure to strip the authorised
|
||||
// via key if it is present, otherwise other servers won't be able to auth
|
||||
// the event if the room is set to the "restricted" join rule.
|
||||
if eventType == gomatrixserverlib.MRoomMember {
|
||||
delete(r, "join_authorised_via_users_server")
|
||||
}
|
||||
|
||||
evTime, err := httputil.ParseTSParam(req)
|
||||
if err != nil {
|
||||
return util.JSONResponse{
|
||||
|
|
@ -201,6 +216,37 @@ func SendEvent(
|
|||
return res
|
||||
}
|
||||
|
||||
// stateEqual compares the new and the existing state event content. If they are equal, returns a *util.JSONResponse
|
||||
// with the existing event_id, making this an idempotent request.
|
||||
func stateEqual(ctx context.Context, rsAPI api.ClientRoomserverAPI, eventType, stateKey, roomID string, newContent map[string]interface{}) *util.JSONResponse {
|
||||
stateRes := api.QueryCurrentStateResponse{}
|
||||
tuple := gomatrixserverlib.StateKeyTuple{
|
||||
EventType: eventType,
|
||||
StateKey: stateKey,
|
||||
}
|
||||
err := rsAPI.QueryCurrentState(ctx, &api.QueryCurrentStateRequest{
|
||||
RoomID: roomID,
|
||||
StateTuples: []gomatrixserverlib.StateKeyTuple{tuple},
|
||||
}, &stateRes)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if existingEvent, ok := stateRes.StateEvents[tuple]; ok {
|
||||
var existingContent map[string]interface{}
|
||||
if err = json.Unmarshal(existingEvent.Content(), &existingContent); err != nil {
|
||||
return nil
|
||||
}
|
||||
if reflect.DeepEqual(existingContent, newContent) {
|
||||
return &util.JSONResponse{
|
||||
Code: http.StatusOK,
|
||||
JSON: sendEventResponse{existingEvent.EventID()},
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateSendEvent(
|
||||
ctx context.Context,
|
||||
r map[string]interface{},
|
||||
|
|
|
|||
|
|
@ -92,7 +92,7 @@ func main() {
|
|||
pRouter := pineconeRouter.NewRouter(logrus.WithField("pinecone", "router"), sk, false)
|
||||
pQUIC := pineconeSessions.NewSessions(logrus.WithField("pinecone", "sessions"), pRouter, []string{"matrix"})
|
||||
pMulticast := pineconeMulticast.NewMulticast(logrus.WithField("pinecone", "multicast"), pRouter)
|
||||
pManager := pineconeConnections.NewConnectionManager(pRouter)
|
||||
pManager := pineconeConnections.NewConnectionManager(pRouter, nil)
|
||||
pMulticast.Start()
|
||||
if instancePeer != nil && *instancePeer != "" {
|
||||
pManager.AddPeer(*instancePeer)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
# Yggdrasil Demo
|
||||
|
||||
This is the Dendrite Yggdrasil demo! It's easy to get started - all you need is Go 1.16 or later.
|
||||
This is the Dendrite Yggdrasil demo! It's easy to get started - all you need is Go 1.18 or later.
|
||||
|
||||
To run the homeserver, start at the root of the Dendrite repository and run:
|
||||
|
||||
|
|
@ -13,10 +13,10 @@ The following command line arguments are accepted:
|
|||
* `-peer tcp://a.b.c.d:e` to specify a static Yggdrasil peer to connect to - you will need to supply this if you do not have another Yggdrasil node on your network
|
||||
* `-port 12345` to specify a port to listen on for client connections
|
||||
|
||||
If you need to find an internet peer, take a look at [this list](https://publicpeers.neilalexander.dev/).
|
||||
If you need to find an internet peer, take a look at [this list](https://publicpeers.neilalexander.dev/).
|
||||
|
||||
Then point your favourite Matrix client to the homeserver URL`http://localhost:8008` (or whichever `-port` you specified), create an account and log in.
|
||||
|
||||
If your peering connection is operational then you should see a `Connected TCP:` line in the log output. If not then try a different peer.
|
||||
|
||||
Once logged in, you should be able to open the room directory or join a room by its ID.
|
||||
Once logged in, you should be able to open the room directory or join a room by its ID.
|
||||
|
|
|
|||
|
|
@ -37,6 +37,7 @@ var (
|
|||
flagBuildConcurrency = flag.Int("build-concurrency", runtime.NumCPU(), "The amount of build concurrency when building images")
|
||||
flagHead = flag.String("head", "", "Location to a dendrite repository to treat as HEAD instead of Github")
|
||||
flagDockerHost = flag.String("docker-host", "localhost", "The hostname of the docker client. 'localhost' if running locally, 'host.docker.internal' if running in Docker.")
|
||||
flagDirect = flag.Bool("direct", false, "If a direct upgrade from the defined FROM version to TO should be done")
|
||||
alphaNumerics = regexp.MustCompile("[^a-zA-Z0-9]+")
|
||||
)
|
||||
|
||||
|
|
@ -48,7 +49,7 @@ const HEAD = "HEAD"
|
|||
// due to the error:
|
||||
// When using COPY with more than one source file, the destination must be a directory and end with a /
|
||||
// We need to run a postgres anyway, so use the dockerfile associated with Complement instead.
|
||||
const Dockerfile = `FROM golang:1.16-stretch as build
|
||||
const Dockerfile = `FROM golang:1.18-stretch as build
|
||||
RUN apt-get update && apt-get install -y postgresql
|
||||
WORKDIR /build
|
||||
|
||||
|
|
@ -229,7 +230,7 @@ func getAndSortVersionsFromGithub(httpClient *http.Client) (semVers []*semver.Ve
|
|||
return semVers, nil
|
||||
}
|
||||
|
||||
func calculateVersions(cli *http.Client, from, to string) []string {
|
||||
func calculateVersions(cli *http.Client, from, to string, direct bool) []string {
|
||||
semvers, err := getAndSortVersionsFromGithub(cli)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to collect semvers from github: %s", err)
|
||||
|
|
@ -284,6 +285,9 @@ func calculateVersions(cli *http.Client, from, to string) []string {
|
|||
if to == HEAD {
|
||||
versions = append(versions, HEAD)
|
||||
}
|
||||
if direct {
|
||||
versions = []string{versions[0], versions[len(versions)-1]}
|
||||
}
|
||||
return versions
|
||||
}
|
||||
|
||||
|
|
@ -461,7 +465,7 @@ func main() {
|
|||
os.Exit(1)
|
||||
}
|
||||
cleanup(dockerClient)
|
||||
versions := calculateVersions(httpClient, *flagFrom, *flagTo)
|
||||
versions := calculateVersions(httpClient, *flagFrom, *flagTo, *flagDirect)
|
||||
log.Printf("Testing dendrite versions: %v\n", versions)
|
||||
|
||||
branchToImageID := buildDendriteImages(httpClient, dockerClient, *flagTempDir, *flagBuildConcurrency, versions)
|
||||
|
|
|
|||
|
|
@ -1,109 +0,0 @@
|
|||
## Database migrations
|
||||
|
||||
We use [goose](https://github.com/pressly/goose) to handle database migrations. This allows us to execute
|
||||
both SQL deltas (e.g `ALTER TABLE ...`) as well as manipulate data in the database in Go using Go functions.
|
||||
|
||||
To run a migration, the `goose` binary in this directory needs to be built:
|
||||
```
|
||||
$ go build ./cmd/goose
|
||||
```
|
||||
|
||||
This binary allows Dendrite databases to be upgraded and downgraded. Sample usage for upgrading the roomserver database:
|
||||
|
||||
```
|
||||
# for sqlite
|
||||
$ ./goose -dir roomserver/storage/sqlite3/deltas sqlite3 ./roomserver.db up
|
||||
|
||||
# for postgres
|
||||
$ ./goose -dir roomserver/storage/postgres/deltas postgres "user=dendrite dbname=dendrite sslmode=disable" up
|
||||
```
|
||||
|
||||
For a full list of options, including rollbacks, see https://github.com/pressly/goose or use `goose` with no args.
|
||||
|
||||
|
||||
### Rationale
|
||||
|
||||
Dendrite creates tables on startup using `CREATE TABLE IF NOT EXISTS`, so you might think that we should also
|
||||
apply version upgrades on startup as well. This is convenient and doesn't involve an additional binary to run
|
||||
which complicates upgrades. However, combining the upgrade mechanism and the server binary makes it difficult
|
||||
to handle rollbacks. Firstly, how do you specify you wish to rollback? We would have to add additional flags
|
||||
to the main server binary to say "rollback to version X". Secondly, if you roll back the server binary from
|
||||
version 5 to version 4, the version 4 binary doesn't know how to rollback the database from version 5 to
|
||||
version 4! For these reasons, we prefer to have a separate "upgrade" binary which is run for database upgrades.
|
||||
Rather than roll-our-own migration tool, we decided to use [goose](https://github.com/pressly/goose) as it supports
|
||||
complex migrations in Go code in addition to just executing SQL deltas. Other alternatives like
|
||||
`github.com/golang-migrate/migrate` [do not support](https://github.com/golang-migrate/migrate/issues/15) these
|
||||
kinds of complex migrations.
|
||||
|
||||
### Adding new deltas
|
||||
|
||||
You can add `.sql` or `.go` files manually or you can use goose to create them for you.
|
||||
|
||||
If you only want to add a SQL delta then run:
|
||||
|
||||
```
|
||||
$ ./goose -dir serverkeyapi/storage/sqlite3/deltas sqlite3 ./foo.db create new_col sql
|
||||
2020/09/09 14:37:43 Created new file: serverkeyapi/storage/sqlite3/deltas/20200909143743_new_col.sql
|
||||
```
|
||||
|
||||
In this case, the version number is `20200909143743`. The important thing is that it is always increasing.
|
||||
|
||||
Then add up/downgrade SQL commands to the created file which looks like:
|
||||
```sql
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
SELECT 'up SQL query';
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
SELECT 'down SQL query';
|
||||
-- +goose StatementEnd
|
||||
|
||||
```
|
||||
You __must__ keep the `+goose` annotations. You'll need to repeat this process for Postgres.
|
||||
|
||||
For complex Go migrations:
|
||||
|
||||
```
|
||||
$ ./goose -dir serverkeyapi/storage/sqlite3/deltas sqlite3 ./foo.db create complex_update go
|
||||
2020/09/09 14:40:38 Created new file: serverkeyapi/storage/sqlite3/deltas/20200909144038_complex_update.go
|
||||
```
|
||||
|
||||
Then modify the created `.go` file which looks like:
|
||||
|
||||
```go
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
"github.com/pressly/goose"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigration(upComplexUpdate, downComplexUpdate)
|
||||
}
|
||||
|
||||
func upComplexUpdate(tx *sql.Tx) error {
|
||||
// This code is executed when the migration is applied.
|
||||
return nil
|
||||
}
|
||||
|
||||
func downComplexUpdate(tx *sql.Tx) error {
|
||||
// This code is executed when the migration is rolled back.
|
||||
return nil
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
You __must__ import the package in `/cmd/goose/main.go` so `func init()` gets called.
|
||||
|
||||
|
||||
#### Database limitations
|
||||
|
||||
- SQLite3 does NOT support `ALTER TABLE table_name DROP COLUMN` - you would have to rename the column or drop the table
|
||||
entirely and recreate it. ([example](https://github.com/matrix-org/dendrite/blob/master/userapi/storage/accounts/sqlite3/deltas/20200929203058_is_active.sql))
|
||||
|
||||
More information: [sqlite.org](https://www.sqlite.org/lang_altertable.html)
|
||||
|
|
@ -1,154 +0,0 @@
|
|||
// This is custom goose binary
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/pressly/goose"
|
||||
|
||||
pgusers "github.com/matrix-org/dendrite/userapi/storage/postgres/deltas"
|
||||
slusers "github.com/matrix-org/dendrite/userapi/storage/sqlite3/deltas"
|
||||
|
||||
_ "github.com/lib/pq"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
const (
|
||||
AppService = "appservice"
|
||||
FederationSender = "federationapi"
|
||||
KeyServer = "keyserver"
|
||||
MediaAPI = "mediaapi"
|
||||
RoomServer = "roomserver"
|
||||
SigningKeyServer = "signingkeyserver"
|
||||
SyncAPI = "syncapi"
|
||||
UserAPI = "userapi"
|
||||
)
|
||||
|
||||
var (
|
||||
dir = flags.String("dir", "", "directory with migration files")
|
||||
flags = flag.NewFlagSet("goose", flag.ExitOnError)
|
||||
component = flags.String("component", "", "dendrite component name")
|
||||
knownDBs = []string{
|
||||
AppService, FederationSender, KeyServer, MediaAPI, RoomServer, SigningKeyServer, SyncAPI, UserAPI,
|
||||
}
|
||||
)
|
||||
|
||||
// nolint: gocyclo
|
||||
func main() {
|
||||
err := flags.Parse(os.Args[1:])
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
args := flags.Args()
|
||||
|
||||
if len(args) < 3 {
|
||||
fmt.Println(
|
||||
`Usage: goose [OPTIONS] DRIVER DBSTRING COMMAND
|
||||
|
||||
Drivers:
|
||||
postgres
|
||||
sqlite3
|
||||
|
||||
Examples:
|
||||
goose -component roomserver sqlite3 ./roomserver.db status
|
||||
goose -component roomserver sqlite3 ./roomserver.db up
|
||||
|
||||
goose -component roomserver postgres "user=dendrite dbname=dendrite sslmode=disable" status
|
||||
|
||||
Options:
|
||||
-component string
|
||||
Dendrite component name e.g roomserver, signingkeyserver, clientapi, syncapi
|
||||
-table string
|
||||
migrations table name (default "goose_db_version")
|
||||
-h print help
|
||||
-v enable verbose mode
|
||||
-dir string
|
||||
directory with migration files, only relevant when creating new migrations.
|
||||
-version
|
||||
print version
|
||||
|
||||
Commands:
|
||||
up Migrate the DB to the most recent version available
|
||||
up-by-one Migrate the DB up by 1
|
||||
up-to VERSION Migrate the DB to a specific VERSION
|
||||
down Roll back the version by 1
|
||||
down-to VERSION Roll back to a specific VERSION
|
||||
redo Re-run the latest migration
|
||||
reset Roll back all migrations
|
||||
status Dump the migration status for the current DB
|
||||
version Print the current version of the database
|
||||
create NAME [sql|go] Creates new migration file with the current timestamp
|
||||
fix Apply sequential ordering to migrations`,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
engine := args[0]
|
||||
if engine != "sqlite3" && engine != "postgres" {
|
||||
fmt.Println("engine must be one of 'sqlite3' or 'postgres'")
|
||||
return
|
||||
}
|
||||
|
||||
knownComponent := false
|
||||
for _, c := range knownDBs {
|
||||
if c == *component {
|
||||
knownComponent = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !knownComponent {
|
||||
fmt.Printf("component must be one of %v\n", knownDBs)
|
||||
return
|
||||
}
|
||||
|
||||
if engine == "sqlite3" {
|
||||
loadSQLiteDeltas(*component)
|
||||
} else {
|
||||
loadPostgresDeltas(*component)
|
||||
}
|
||||
|
||||
dbstring, command := args[1], args[2]
|
||||
|
||||
db, err := goose.OpenDBWithDriver(engine, dbstring)
|
||||
if err != nil {
|
||||
log.Fatalf("goose: failed to open DB: %v\n", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := db.Close(); err != nil {
|
||||
log.Fatalf("goose: failed to close DB: %v\n", err)
|
||||
}
|
||||
}()
|
||||
|
||||
arguments := []string{}
|
||||
if len(args) > 3 {
|
||||
arguments = append(arguments, args[3:]...)
|
||||
}
|
||||
|
||||
// goose demands a directory even though we don't use it for upgrades
|
||||
d := *dir
|
||||
if d == "" {
|
||||
d = os.TempDir()
|
||||
}
|
||||
if err := goose.Run(command, db, d, arguments...); err != nil {
|
||||
log.Fatalf("goose %v: %v", command, err)
|
||||
}
|
||||
}
|
||||
|
||||
func loadSQLiteDeltas(component string) {
|
||||
switch component {
|
||||
case UserAPI:
|
||||
slusers.LoadFromGoose()
|
||||
}
|
||||
}
|
||||
|
||||
func loadPostgresDeltas(component string) {
|
||||
switch component {
|
||||
case UserAPI:
|
||||
pgusers.LoadFromGoose()
|
||||
}
|
||||
}
|
||||
|
|
@ -4,13 +4,18 @@ import (
|
|||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/matrix-org/dendrite/internal/caching"
|
||||
"github.com/matrix-org/dendrite/roomserver/state"
|
||||
"github.com/matrix-org/dendrite/roomserver/storage"
|
||||
"github.com/matrix-org/dendrite/roomserver/types"
|
||||
"github.com/matrix-org/dendrite/setup"
|
||||
"github.com/matrix-org/dendrite/setup/base"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
)
|
||||
|
||||
|
|
@ -23,11 +28,20 @@ import (
|
|||
// e.g. ./resolve-state --roomversion=5 1254 1235 1282
|
||||
|
||||
var roomVersion = flag.String("roomversion", "5", "the room version to parse events as")
|
||||
var filterType = flag.String("filtertype", "", "the event types to filter on")
|
||||
var difference = flag.Bool("difference", false, "whether to calculate the difference between snapshots")
|
||||
|
||||
// nolint:gocyclo
|
||||
func main() {
|
||||
ctx := context.Background()
|
||||
cfg := setup.ParseFlags(true)
|
||||
args := os.Args[1:]
|
||||
cfg.Logging = append(cfg.Logging[:0], config.LogrusHook{
|
||||
Type: "std",
|
||||
Level: "error",
|
||||
})
|
||||
cfg.ClientAPI.RegistrationDisabled = true
|
||||
base := base.NewBaseDendrite(cfg, "ResolveState", base.DisableMetrics)
|
||||
args := flag.Args()
|
||||
|
||||
fmt.Println("Room version", *roomVersion)
|
||||
|
||||
|
|
@ -40,35 +54,84 @@ func main() {
|
|||
|
||||
fmt.Println("Fetching", len(snapshotNIDs), "snapshot NIDs")
|
||||
|
||||
cache, err := caching.NewInMemoryLRUCache(true)
|
||||
roomserverDB, err := storage.Open(
|
||||
base, &cfg.RoomServer.Database,
|
||||
caching.NewRistrettoCache(128*1024*1024, time.Hour, true),
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
roomserverDB, err := storage.Open(nil, &cfg.RoomServer.Database, cache)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
stateres := state.NewStateResolution(roomserverDB, &types.RoomInfo{
|
||||
RoomVersion: gomatrixserverlib.RoomVersion(*roomVersion),
|
||||
})
|
||||
|
||||
if *difference {
|
||||
if len(snapshotNIDs) != 2 {
|
||||
panic("need exactly two state snapshot NIDs to calculate difference")
|
||||
}
|
||||
var removed, added []types.StateEntry
|
||||
removed, added, err = stateres.DifferenceBetweeenStateSnapshots(ctx, snapshotNIDs[0], snapshotNIDs[1])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var eventNIDs []types.EventNID
|
||||
for _, entry := range append(removed, added...) {
|
||||
eventNIDs = append(eventNIDs, entry.EventNID)
|
||||
}
|
||||
|
||||
var eventEntries []types.Event
|
||||
eventEntries, err = roomserverDB.Events(ctx, eventNIDs)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
events := make(map[types.EventNID]*gomatrixserverlib.Event, len(eventEntries))
|
||||
for _, entry := range eventEntries {
|
||||
events[entry.EventNID] = entry.Event
|
||||
}
|
||||
|
||||
if len(removed) > 0 {
|
||||
fmt.Println("Removed:")
|
||||
for _, r := range removed {
|
||||
event := events[r.EventNID]
|
||||
fmt.Println()
|
||||
fmt.Printf("* %s %s %q\n", event.EventID(), event.Type(), *event.StateKey())
|
||||
fmt.Printf(" %s\n", string(event.Content()))
|
||||
}
|
||||
}
|
||||
|
||||
if len(removed) > 0 && len(added) > 0 {
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
if len(added) > 0 {
|
||||
fmt.Println("Added:")
|
||||
for _, a := range added {
|
||||
event := events[a.EventNID]
|
||||
fmt.Println()
|
||||
fmt.Printf("* %s %s %q\n", event.EventID(), event.Type(), *event.StateKey())
|
||||
fmt.Printf(" %s\n", string(event.Content()))
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
blockNIDs, err := roomserverDB.StateBlockNIDs(ctx, snapshotNIDs)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var stateEntries []types.StateEntryList
|
||||
for _, list := range blockNIDs {
|
||||
entries, err2 := roomserverDB.StateEntries(ctx, list.StateBlockNIDs)
|
||||
if err2 != nil {
|
||||
panic(err2)
|
||||
var stateEntries []types.StateEntry
|
||||
for _, snapshotNID := range snapshotNIDs {
|
||||
var entries []types.StateEntry
|
||||
entries, err = stateres.LoadStateAtSnapshot(ctx, snapshotNID)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
stateEntries = append(stateEntries, entries...)
|
||||
}
|
||||
|
||||
var eventNIDs []types.EventNID
|
||||
for _, entry := range stateEntries {
|
||||
for _, e := range entry.StateEntries {
|
||||
eventNIDs = append(eventNIDs, e.EventNID)
|
||||
}
|
||||
eventNIDs = append(eventNIDs, entry.EventNID)
|
||||
}
|
||||
|
||||
fmt.Println("Fetching", len(eventNIDs), "state events")
|
||||
|
|
@ -103,7 +166,8 @@ func main() {
|
|||
}
|
||||
|
||||
fmt.Println("Resolving state")
|
||||
resolved, err := gomatrixserverlib.ResolveConflicts(
|
||||
var resolved Events
|
||||
resolved, err = gomatrixserverlib.ResolveConflicts(
|
||||
gomatrixserverlib.RoomVersion(*roomVersion),
|
||||
events,
|
||||
authEvents,
|
||||
|
|
@ -113,9 +177,41 @@ func main() {
|
|||
}
|
||||
|
||||
fmt.Println("Resolved state contains", len(resolved), "events")
|
||||
sort.Sort(resolved)
|
||||
filteringEventType := *filterType
|
||||
count := 0
|
||||
for _, event := range resolved {
|
||||
if filteringEventType != "" && event.Type() != filteringEventType {
|
||||
continue
|
||||
}
|
||||
count++
|
||||
fmt.Println()
|
||||
fmt.Printf("* %s %s %q\n", event.EventID(), event.Type(), *event.StateKey())
|
||||
fmt.Printf(" %s\n", string(event.Content()))
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("Returned", count, "state events after filtering")
|
||||
}
|
||||
|
||||
type Events []*gomatrixserverlib.Event
|
||||
|
||||
func (e Events) Len() int {
|
||||
return len(e)
|
||||
}
|
||||
|
||||
func (e Events) Swap(i, j int) {
|
||||
e[i], e[j] = e[j], e[i]
|
||||
}
|
||||
|
||||
func (e Events) Less(i, j int) bool {
|
||||
typeDelta := strings.Compare(e[i].Type(), e[j].Type())
|
||||
if typeDelta < 0 {
|
||||
return true
|
||||
}
|
||||
if typeDelta > 0 {
|
||||
return false
|
||||
}
|
||||
stateKeyDelta := strings.Compare(*e[i].StateKey(), *e[j].StateKey())
|
||||
return stateKeyDelta < 0
|
||||
}
|
||||
|
|
|
|||
|
|
@ -41,10 +41,33 @@ global:
|
|||
max_idle_conns: 5
|
||||
conn_max_lifetime: -1
|
||||
|
||||
# Configuration for in-memory caches. Caches can often improve performance by
|
||||
# keeping frequently accessed items (like events, identifiers etc.) in memory
|
||||
# rather than having to read them from the database.
|
||||
cache:
|
||||
# The estimated maximum size for the global cache in bytes, or in terabytes,
|
||||
# gigabytes, megabytes or kilobytes when the appropriate 'tb', 'gb', 'mb' or
|
||||
# 'kb' suffix is specified. Note that this is not a hard limit, nor is it a
|
||||
# memory limit for the entire process. A cache that is too small may ultimately
|
||||
# provide little or no benefit.
|
||||
max_size_estimated: 1gb
|
||||
|
||||
# The maximum amount of time that a cache entry can live for in memory before
|
||||
# it will be evicted and/or refreshed from the database. Lower values result in
|
||||
# easier admission of new cache entries but may also increase database load in
|
||||
# comparison to higher values, so adjust conservatively. Higher values may make
|
||||
# it harder for new items to make it into the cache, e.g. if new rooms suddenly
|
||||
# become popular.
|
||||
max_age: 1h
|
||||
|
||||
# The server name to delegate server-server communications to, with optional port
|
||||
# e.g. localhost:443
|
||||
well_known_server_name: ""
|
||||
|
||||
# The server name to delegate client-server communications to, with optional port
|
||||
# e.g. localhost:443
|
||||
well_known_client_name: ""
|
||||
|
||||
# Lists of domains that the server will trust as identity servers to verify third
|
||||
# party identifiers such as phone numbers and email addresses.
|
||||
trusted_third_party_id_servers:
|
||||
|
|
@ -90,6 +113,11 @@ global:
|
|||
addresses:
|
||||
# - localhost:4222
|
||||
|
||||
# Disable the validation of TLS certificates of NATS. This is
|
||||
# not recommended in production since it may allow NATS traffic
|
||||
# to be sent to an insecure endpoint.
|
||||
disable_tls_validation: false
|
||||
|
||||
# Persistent directory to store JetStream streams in. This directory should be
|
||||
# preserved across Dendrite restarts.
|
||||
storage_path: ./
|
||||
|
|
@ -160,11 +188,14 @@ client_api:
|
|||
|
||||
# Settings for rate-limited endpoints. Rate limiting kicks in after the threshold
|
||||
# number of "slots" have been taken by requests from a specific host. Each "slot"
|
||||
# will be released after the cooloff time in milliseconds.
|
||||
# will be released after the cooloff time in milliseconds. Server administrators
|
||||
# and appservice users are exempt from rate limiting by default.
|
||||
rate_limiting:
|
||||
enabled: true
|
||||
threshold: 5
|
||||
cooloff_ms: 500
|
||||
exempt_user_ids:
|
||||
# - "@user:domain.com"
|
||||
|
||||
# Configuration for the Federation API.
|
||||
federation_api:
|
||||
|
|
|
|||
|
|
@ -31,10 +31,33 @@ global:
|
|||
# considered valid by other homeservers.
|
||||
key_validity_period: 168h0m0s
|
||||
|
||||
# Configuration for in-memory caches. Caches can often improve performance by
|
||||
# keeping frequently accessed items (like events, identifiers etc.) in memory
|
||||
# rather than having to read them from the database.
|
||||
cache:
|
||||
# The estimated maximum size for the global cache in bytes, or in terabytes,
|
||||
# gigabytes, megabytes or kilobytes when the appropriate 'tb', 'gb', 'mb' or
|
||||
# 'kb' suffix is specified. Note that this is not a hard limit, nor is it a
|
||||
# memory limit for the entire process. A cache that is too small may ultimately
|
||||
# provide little or no benefit.
|
||||
max_size_estimated: 1gb
|
||||
|
||||
# The maximum amount of time that a cache entry can live for in memory before
|
||||
# it will be evicted and/or refreshed from the database. Lower values result in
|
||||
# easier admission of new cache entries but may also increase database load in
|
||||
# comparison to higher values, so adjust conservatively. Higher values may make
|
||||
# it harder for new items to make it into the cache, e.g. if new rooms suddenly
|
||||
# become popular.
|
||||
max_age: 1h
|
||||
|
||||
# The server name to delegate server-server communications to, with optional port
|
||||
# e.g. localhost:443
|
||||
well_known_server_name: ""
|
||||
|
||||
# The server name to delegate client-server communications to, with optional port
|
||||
# e.g. localhost:443
|
||||
well_known_client_name: ""
|
||||
|
||||
# Lists of domains that the server will trust as identity servers to verify third
|
||||
# party identifiers such as phone numbers and email addresses.
|
||||
trusted_third_party_id_servers:
|
||||
|
|
@ -80,6 +103,11 @@ global:
|
|||
addresses:
|
||||
- hostname:4222
|
||||
|
||||
# Disable the validation of TLS certificates of NATS. This is
|
||||
# not recommended in production since it may allow NATS traffic
|
||||
# to be sent to an insecure endpoint.
|
||||
disable_tls_validation: false
|
||||
|
||||
# The prefix to use for stream names for this homeserver - really only useful
|
||||
# if you are running more than one Dendrite server on the same NATS deployment.
|
||||
topic_prefix: Dendrite
|
||||
|
|
@ -106,7 +134,7 @@ app_service_api:
|
|||
|
||||
# Database configuration for this component.
|
||||
database:
|
||||
connection_string: postgresql://username@password:hostname/dendrite_appservice?sslmode=disable
|
||||
connection_string: postgresql://username:password@hostname/dendrite_appservice?sslmode=disable
|
||||
max_open_conns: 10
|
||||
max_idle_conns: 2
|
||||
conn_max_lifetime: -1
|
||||
|
|
@ -163,11 +191,14 @@ client_api:
|
|||
|
||||
# Settings for rate-limited endpoints. Rate limiting kicks in after the threshold
|
||||
# number of "slots" have been taken by requests from a specific host. Each "slot"
|
||||
# will be released after the cooloff time in milliseconds.
|
||||
# will be released after the cooloff time in milliseconds. Server administrators
|
||||
# and appservice users are exempt from rate limiting by default.
|
||||
rate_limiting:
|
||||
enabled: true
|
||||
threshold: 5
|
||||
cooloff_ms: 500
|
||||
exempt_user_ids:
|
||||
# - "@user:domain.com"
|
||||
|
||||
# Configuration for the Federation API.
|
||||
federation_api:
|
||||
|
|
@ -177,7 +208,7 @@ federation_api:
|
|||
external_api:
|
||||
listen: http://[::]:8072
|
||||
database:
|
||||
connection_string: postgresql://username@password:hostname/dendrite_federationapi?sslmode=disable
|
||||
connection_string: postgresql://username:password@hostname/dendrite_federationapi?sslmode=disable
|
||||
max_open_conns: 10
|
||||
max_idle_conns: 2
|
||||
conn_max_lifetime: -1
|
||||
|
|
@ -214,7 +245,7 @@ key_server:
|
|||
listen: http://[::]:7779 # The listen address for incoming API requests
|
||||
connect: http://key_server:7779 # The connect address for other components to use
|
||||
database:
|
||||
connection_string: postgresql://username@password:hostname/dendrite_keyserver?sslmode=disable
|
||||
connection_string: postgresql://username:password@hostname/dendrite_keyserver?sslmode=disable
|
||||
max_open_conns: 10
|
||||
max_idle_conns: 2
|
||||
conn_max_lifetime: -1
|
||||
|
|
@ -227,7 +258,7 @@ media_api:
|
|||
external_api:
|
||||
listen: http://[::]:8074
|
||||
database:
|
||||
connection_string: postgresql://username@password:hostname/dendrite_mediaapi?sslmode=disable
|
||||
connection_string: postgresql://username:password@hostname/dendrite_mediaapi?sslmode=disable
|
||||
max_open_conns: 5
|
||||
max_idle_conns: 2
|
||||
conn_max_lifetime: -1
|
||||
|
|
@ -264,7 +295,7 @@ mscs:
|
|||
# - msc2836 # (Threading, see https://github.com/matrix-org/matrix-doc/pull/2836)
|
||||
# - msc2946 # (Spaces Summary, see https://github.com/matrix-org/matrix-doc/pull/2946)
|
||||
database:
|
||||
connection_string: postgresql://username@password:hostname/dendrite_mscs?sslmode=disable
|
||||
connection_string: postgresql://username:password@hostname/dendrite_mscs?sslmode=disable
|
||||
max_open_conns: 5
|
||||
max_idle_conns: 2
|
||||
conn_max_lifetime: -1
|
||||
|
|
@ -275,7 +306,7 @@ room_server:
|
|||
listen: http://[::]:7770 # The listen address for incoming API requests
|
||||
connect: http://room_server:7770 # The connect address for other components to use
|
||||
database:
|
||||
connection_string: postgresql://username@password:hostname/dendrite_roomserver?sslmode=disable
|
||||
connection_string: postgresql://username:password@hostname/dendrite_roomserver?sslmode=disable
|
||||
max_open_conns: 10
|
||||
max_idle_conns: 2
|
||||
conn_max_lifetime: -1
|
||||
|
|
@ -288,7 +319,7 @@ sync_api:
|
|||
external_api:
|
||||
listen: http://[::]:8073
|
||||
database:
|
||||
connection_string: postgresql://username@password:hostname/dendrite_syncapi?sslmode=disable
|
||||
connection_string: postgresql://username:password@hostname/dendrite_syncapi?sslmode=disable
|
||||
max_open_conns: 10
|
||||
max_idle_conns: 2
|
||||
conn_max_lifetime: -1
|
||||
|
|
@ -304,7 +335,7 @@ user_api:
|
|||
listen: http://[::]:7781 # The listen address for incoming API requests
|
||||
connect: http://user_api:7781 # The connect address for other components to use
|
||||
account_database:
|
||||
connection_string: postgresql://username@password:hostname/dendrite_userapi?sslmode=disable
|
||||
connection_string: postgresql://username:password@hostname/dendrite_userapi?sslmode=disable
|
||||
max_open_conns: 10
|
||||
max_idle_conns: 2
|
||||
conn_max_lifetime: -1
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ Unfortunately we can't accept contributions without it.
|
|||
|
||||
## Getting up and running
|
||||
|
||||
See the [Installation](INSTALL.md) section for information on how to build an
|
||||
See the [Installation](installation) section for information on how to build an
|
||||
instance of Dendrite. You will likely need this in order to test your changes.
|
||||
|
||||
## Code style
|
||||
|
|
|
|||
|
|
@ -86,9 +86,12 @@ would be a huge help too, as that will help us to understand where the memory us
|
|||
|
||||
You may need to revisit the connection limit of your PostgreSQL server and/or make changes to the `max_connections` lines in your Dendrite configuration. Be aware that each Dendrite component opens its own database connections and has its own connection limit, even in monolith mode!
|
||||
|
||||
## What is being reported when enabling anonymous stats?
|
||||
## What is being reported when enabling phone-home statistics?
|
||||
|
||||
If anonymous stats reporting is enabled, the following data is send to the defined endpoint.
|
||||
Phone-home statistics contain your server's domain name, some configuration information about
|
||||
your deployment and aggregated information about active users on your deployment. They are sent
|
||||
to the endpoint URL configured in your Dendrite configuration file only. The following is an
|
||||
example of the data that is sent:
|
||||
|
||||
```json
|
||||
{
|
||||
|
|
@ -106,7 +109,7 @@ If anonymous stats reporting is enabled, the following data is send to the defin
|
|||
"go_arch": "amd64",
|
||||
"go_os": "linux",
|
||||
"go_version": "go1.16.13",
|
||||
"homeserver": "localhost:8800",
|
||||
"homeserver": "my.domain.com",
|
||||
"log_level": "trace",
|
||||
"memory_rss": 93452,
|
||||
"monolith": true,
|
||||
|
|
|
|||
|
|
@ -233,6 +233,8 @@ GEM
|
|||
multipart-post (2.1.1)
|
||||
nokogiri (1.13.6-arm64-darwin)
|
||||
racc (~> 1.4)
|
||||
nokogiri (1.13.6-x86_64-linux)
|
||||
racc (~> 1.4)
|
||||
octokit (4.22.0)
|
||||
faraday (>= 0.9)
|
||||
sawyer (~> 0.8.0, >= 0.5.3)
|
||||
|
|
@ -263,7 +265,7 @@ GEM
|
|||
thread_safe (0.3.6)
|
||||
typhoeus (1.4.0)
|
||||
ethon (>= 0.9.0)
|
||||
tzinfo (1.2.9)
|
||||
tzinfo (1.2.10)
|
||||
thread_safe (~> 0.1)
|
||||
unf (0.1.4)
|
||||
unf_ext
|
||||
|
|
@ -273,11 +275,11 @@ GEM
|
|||
|
||||
PLATFORMS
|
||||
arm64-darwin-21
|
||||
x86_64-linux
|
||||
|
||||
DEPENDENCIES
|
||||
github-pages (~> 226)
|
||||
jekyll-feed (~> 0.15.1)
|
||||
minima (~> 2.5.1)
|
||||
|
||||
BUNDLED WITH
|
||||
2.3.7
|
||||
|
|
|
|||
|
|
@ -32,6 +32,15 @@ To create a new **admin account**, add the `-admin` flag:
|
|||
./bin/create-account -config /path/to/dendrite.yaml -username USERNAME -admin
|
||||
```
|
||||
|
||||
An example of using `create-account` when running in **Docker**, having found the `CONTAINERNAME` from `docker ps`:
|
||||
|
||||
```bash
|
||||
docker exec -it CONTAINERNAME /usr/bin/create-account -config /path/to/dendrite.yaml -username USERNAME
|
||||
```
|
||||
```bash
|
||||
docker exec -it CONTAINERNAME /usr/bin/create-account -config /path/to/dendrite.yaml -username USERNAME -admin
|
||||
```
|
||||
|
||||
## Using shared secret registration
|
||||
|
||||
Dendrite supports the Synapse-compatible shared secret registration endpoint.
|
||||
|
|
|
|||
|
|
@ -19,6 +19,12 @@ This endpoint will instruct Dendrite to part all local users from the given `roo
|
|||
in the URL. It may take some time to complete. A JSON body will be returned containing
|
||||
the user IDs of all affected users.
|
||||
|
||||
## `/_dendrite/admin/evacuateUser/{userID}`
|
||||
|
||||
This endpoint will instruct Dendrite to part the given local `userID` in the URL from
|
||||
all rooms which they are currently joined. A JSON body will be returned containing
|
||||
the room IDs of all affected rooms.
|
||||
|
||||
## `/_synapse/admin/v1/register`
|
||||
|
||||
Shared secret registration — please see the [user creation page](createusers) for
|
||||
|
|
|
|||
81
docs/administration/5_troubleshooting.md
Normal file
81
docs/administration/5_troubleshooting.md
Normal file
|
|
@ -0,0 +1,81 @@
|
|||
---
|
||||
title: Troubleshooting
|
||||
parent: Administration
|
||||
permalink: /administration/troubleshooting
|
||||
---
|
||||
|
||||
# Troubleshooting
|
||||
|
||||
If your Dendrite installation is acting strangely, there are a few things you should
|
||||
check before seeking help.
|
||||
|
||||
## 1. Logs
|
||||
|
||||
Dendrite, by default, will log all warnings and errors to stdout, in addition to any
|
||||
other locations configured in the `dendrite.yaml` configuration file. Often there will
|
||||
be clues in the logs.
|
||||
|
||||
You can increase this log level to the more verbose `debug` level if necessary by adding
|
||||
this to the config and restarting Dendrite:
|
||||
|
||||
```
|
||||
logging:
|
||||
- type: std
|
||||
level: debug
|
||||
```
|
||||
|
||||
Look specifically for lines that contain `level=error` or `level=warning`.
|
||||
|
||||
## 2. Federation tester
|
||||
|
||||
If you are experiencing problems federating with other homeservers, you should check
|
||||
that the [Federation Tester](https://federationtester.matrix.org) is passing for your
|
||||
server.
|
||||
|
||||
Common reasons that it may not pass include:
|
||||
|
||||
1. Incorrect DNS configuration;
|
||||
2. Misconfigured DNS SRV entries or well-known files;
|
||||
3. Invalid TLS/SSL certificates;
|
||||
4. Reverse proxy configuration issues (if applicable).
|
||||
|
||||
Correct any errors if shown and re-run the federation tester to check the results.
|
||||
|
||||
## 3. System time
|
||||
|
||||
Matrix relies heavily on TLS which requires the system time to be correct. If the clock
|
||||
drifts then you may find that federation no works reliably (or at all) and clients may
|
||||
struggle to connect to your Dendrite server.
|
||||
|
||||
Ensure that your system time is correct and consider syncing to a reliable NTP source.
|
||||
|
||||
## 4. Database connections
|
||||
|
||||
If you are using the PostgreSQL database, you should ensure that Dendrite's configured
|
||||
number of database connections does not exceed the maximum allowed by PostgreSQL.
|
||||
|
||||
Open your `postgresql.conf` configuration file and check the value of `max_connections`
|
||||
(which is typically `100` by default). Then open your `dendrite.yaml` configuration file
|
||||
and ensure that:
|
||||
|
||||
1. If you are using the `global.database` section, that `max_open_conns` does not exceed
|
||||
that number;
|
||||
2. If you are **not** using the `global.database` section, that the sum total of all
|
||||
`max_open_conns` across all `database` blocks does not exceed that number.
|
||||
|
||||
## 5. File descriptors
|
||||
|
||||
Dendrite requires a sufficient number of file descriptors for every connection it makes
|
||||
to a remote server, every connection to the database engine and every file it is reading
|
||||
or writing to at a given time (media, logs etc). We recommend ensuring that the limit is
|
||||
no lower than 65535 for Dendrite.
|
||||
|
||||
Dendrite will check at startup if there are a sufficient number of available descriptors.
|
||||
If there aren't, you will see a log lines like this:
|
||||
|
||||
```
|
||||
level=warning msg="IMPORTANT: Process file descriptor limit is currently 65535, it is recommended to raise the limit for Dendrite to at least 65535 to avoid issues"
|
||||
```
|
||||
|
||||
Follow the [Optimisation](../installation/10_optimisation.md) instructions to correct the
|
||||
available number of file descriptors.
|
||||
57
docs/caddy/monolith/Caddyfile
Normal file
57
docs/caddy/monolith/Caddyfile
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
# Sample Caddyfile for using Caddy in front of Dendrite.
|
||||
#
|
||||
# Customize email address and domain names.
|
||||
# Optional settings commented out.
|
||||
#
|
||||
# BE SURE YOUR DOMAINS ARE POINTED AT YOUR SERVER FIRST.
|
||||
# Documentation: https://caddyserver.com/docs/
|
||||
#
|
||||
# Bonus tip: If your IP address changes, use Caddy's
|
||||
# dynamic DNS plugin to update your DNS records to
|
||||
# point to your new IP automatically:
|
||||
# https://github.com/mholt/caddy-dynamicdns
|
||||
#
|
||||
|
||||
|
||||
# Global options block
|
||||
{
|
||||
# In case there is a problem with your certificates.
|
||||
# email example@example.com
|
||||
|
||||
# Turn off the admin endpoint if you don't need graceful config
|
||||
# changes and/or are running untrusted code on your machine.
|
||||
# admin off
|
||||
|
||||
# Enable this if your clients don't send ServerName in TLS handshakes.
|
||||
# default_sni example.com
|
||||
|
||||
# Enable debug mode for verbose logging.
|
||||
# debug
|
||||
|
||||
# Use Let's Encrypt's staging endpoint for testing.
|
||||
# acme_ca https://acme-staging-v02.api.letsencrypt.org/directory
|
||||
|
||||
# If you're port-forwarding HTTP/HTTPS ports from 80/443 to something
|
||||
# else, enable these and put the alternate port numbers here.
|
||||
# http_port 8080
|
||||
# https_port 8443
|
||||
}
|
||||
|
||||
# The server name of your matrix homeserver. This example shows
|
||||
# "well-known delegation" from the registered domain to a subdomain,
|
||||
# which is only needed if your server_name doesn't match your Matrix
|
||||
# homeserver URL (i.e. you can show users a vanity domain that looks
|
||||
# nice and is easy to remember but still have your Matrix server on
|
||||
# its own subdomain or hosted service).
|
||||
example.com {
|
||||
header /.well-known/matrix/* Content-Type application/json
|
||||
header /.well-known/matrix/* Access-Control-Allow-Origin *
|
||||
respond /.well-known/matrix/server `{"m.server": "matrix.example.com:443"}`
|
||||
respond /.well-known/matrix/client `{"m.homeserver": {"base_url": "https://matrix.example.com"}}`
|
||||
}
|
||||
|
||||
# The actual domain name whereby your Matrix server is accessed.
|
||||
matrix.example.com {
|
||||
# Set localhost:8008 to the address of your Dendrite server, if different
|
||||
reverse_proxy /_matrix/* localhost:8008
|
||||
}
|
||||
66
docs/caddy/polylith/Caddyfile
Normal file
66
docs/caddy/polylith/Caddyfile
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
# Sample Caddyfile for using Caddy in front of Dendrite.
|
||||
#
|
||||
# Customize email address and domain names.
|
||||
# Optional settings commented out.
|
||||
#
|
||||
# BE SURE YOUR DOMAINS ARE POINTED AT YOUR SERVER FIRST.
|
||||
# Documentation: https://caddyserver.com/docs/
|
||||
#
|
||||
# Bonus tip: If your IP address changes, use Caddy's
|
||||
# dynamic DNS plugin to update your DNS records to
|
||||
# point to your new IP automatically:
|
||||
# https://github.com/mholt/caddy-dynamicdns
|
||||
#
|
||||
|
||||
|
||||
# Global options block
|
||||
{
|
||||
# In case there is a problem with your certificates.
|
||||
# email example@example.com
|
||||
|
||||
# Turn off the admin endpoint if you don't need graceful config
|
||||
# changes and/or are running untrusted code on your machine.
|
||||
# admin off
|
||||
|
||||
# Enable this if your clients don't send ServerName in TLS handshakes.
|
||||
# default_sni example.com
|
||||
|
||||
# Enable debug mode for verbose logging.
|
||||
# debug
|
||||
|
||||
# Use Let's Encrypt's staging endpoint for testing.
|
||||
# acme_ca https://acme-staging-v02.api.letsencrypt.org/directory
|
||||
|
||||
# If you're port-forwarding HTTP/HTTPS ports from 80/443 to something
|
||||
# else, enable these and put the alternate port numbers here.
|
||||
# http_port 8080
|
||||
# https_port 8443
|
||||
}
|
||||
|
||||
# The server name of your matrix homeserver. This example shows
|
||||
# "well-known delegation" from the registered domain to a subdomain,
|
||||
# which is only needed if your server_name doesn't match your Matrix
|
||||
# homeserver URL (i.e. you can show users a vanity domain that looks
|
||||
# nice and is easy to remember but still have your Matrix server on
|
||||
# its own subdomain or hosted service).
|
||||
example.com {
|
||||
header /.well-known/matrix/* Content-Type application/json
|
||||
header /.well-known/matrix/* Access-Control-Allow-Origin *
|
||||
respond /.well-known/matrix/server `{"m.server": "matrix.example.com:443"}`
|
||||
respond /.well-known/matrix/client `{"m.homeserver": {"base_url": "https://matrix.example.com"}}`
|
||||
}
|
||||
|
||||
# The actual domain name whereby your Matrix server is accessed.
|
||||
matrix.example.com {
|
||||
# Change the end of each reverse_proxy line to the correct
|
||||
# address for your various services.
|
||||
@sync_api {
|
||||
path_regexp /_matrix/client/.*?/(sync|user/.*?/filter/?.*|keys/changes|rooms/.*?/messages)$
|
||||
}
|
||||
reverse_proxy @sync_api sync_api:8073
|
||||
|
||||
reverse_proxy /_matrix/client* client_api:8071
|
||||
reverse_proxy /_matrix/federation* federation_api:8071
|
||||
reverse_proxy /_matrix/key* federation_api:8071
|
||||
reverse_proxy /_matrix/media* media_api:8071
|
||||
}
|
||||
|
|
@ -2,7 +2,7 @@
|
|||
title: Starting the polylith
|
||||
parent: Installation
|
||||
has_toc: true
|
||||
nav_order: 9
|
||||
nav_order: 10
|
||||
permalink: /installation/start/polylith
|
||||
---
|
||||
|
||||
71
docs/installation/11_optimisation.md
Normal file
71
docs/installation/11_optimisation.md
Normal file
|
|
@ -0,0 +1,71 @@
|
|||
---
|
||||
title: Optimise your installation
|
||||
parent: Installation
|
||||
has_toc: true
|
||||
nav_order: 11
|
||||
permalink: /installation/start/optimisation
|
||||
---
|
||||
|
||||
# Optimise your installation
|
||||
|
||||
Now that you have Dendrite running, the following tweaks will improve the reliability
|
||||
and performance of your installation.
|
||||
|
||||
## File descriptor limit
|
||||
|
||||
Most platforms have a limit on how many file descriptors a single process can open. All
|
||||
connections made by Dendrite consume file descriptors — this includes database connections
|
||||
and network requests to remote homeservers. When participating in large federated rooms
|
||||
where Dendrite must talk to many remote servers, it is often very easy to exhaust default
|
||||
limits which are quite low.
|
||||
|
||||
We currently recommend setting the file descriptor limit to 65535 to avoid such
|
||||
issues. Dendrite will log immediately after startup if the file descriptor limit is too low:
|
||||
|
||||
```
|
||||
level=warning msg="IMPORTANT: Process file descriptor limit is currently 1024, it is recommended to raise the limit for Dendrite to at least 65535 to avoid issues"
|
||||
```
|
||||
|
||||
UNIX systems have two limits: a hard limit and a soft limit. You can view the soft limit
|
||||
by running `ulimit -Sn` and the hard limit with `ulimit -Hn`:
|
||||
|
||||
```bash
|
||||
$ ulimit -Hn
|
||||
1048576
|
||||
|
||||
$ ulimit -Sn
|
||||
1024
|
||||
```
|
||||
|
||||
Increase the soft limit before starting Dendrite:
|
||||
|
||||
```bash
|
||||
ulimit -Sn 65535
|
||||
```
|
||||
|
||||
The log line at startup should no longer appear if the limit is sufficient.
|
||||
|
||||
If you are running under a systemd service, you can instead add `LimitNOFILE=65535` option
|
||||
to the `[Service]` section of your service unit file.
|
||||
|
||||
## DNS caching
|
||||
|
||||
Dendrite has a built-in DNS cache which significantly reduces the load that Dendrite will
|
||||
place on your DNS resolver. This may also speed up outbound federation.
|
||||
|
||||
Consider enabling the DNS cache by modifying the `global` section of your configuration file:
|
||||
|
||||
```yaml
|
||||
dns_cache:
|
||||
enabled: true
|
||||
cache_size: 4096
|
||||
cache_lifetime: 600s
|
||||
```
|
||||
|
||||
## Time synchronisation
|
||||
|
||||
Matrix relies heavily on TLS which requires the system time to be correct. If the clock
|
||||
drifts then you may find that federation no works reliably (or at all) and clients may
|
||||
struggle to connect to your Dendrite server.
|
||||
|
||||
Ensure that the time is synchronised on your system by enabling NTP sync.
|
||||
|
|
@ -9,21 +9,19 @@ permalink: /installation/planning
|
|||
|
||||
## Modes
|
||||
|
||||
Dendrite can be run in one of two configurations:
|
||||
Dendrite consists of several components, each responsible for a different aspect of the Matrix protocol.
|
||||
Users can run Dendrite in one of two modes which dictate how these components are executed and communicate.
|
||||
|
||||
* **Monolith mode**: All components run in the same process. In this mode,
|
||||
it is possible to run an in-process NATS Server instead of running a standalone deployment.
|
||||
This will usually be the preferred model for low-to-mid volume deployments, providing the best
|
||||
balance between performance and resource usage.
|
||||
* **Monolith mode** runs all components in a single process. Components communicate through an internal NATS
|
||||
server with generally low overhead. This mode dramatically simplifies deployment complexity and offers the
|
||||
best balance between performance and resource usage for low-to-mid volume deployments.
|
||||
|
||||
* **Polylith mode**: A cluster of individual components running in their own processes, dealing
|
||||
with different aspects of the Matrix protocol. Components communicate with each other using
|
||||
internal HTTP APIs and NATS Server. This will almost certainly be the preferred model for very
|
||||
large deployments but scalability comes with a cost. API calls are expensive and therefore a
|
||||
polylith deployment may end up using disproportionately more resources for a smaller number of
|
||||
users compared to a monolith deployment.
|
||||
* **Polylith mode** runs all components in isolated processes. Components communicate through an external NATS
|
||||
server and HTTP APIs, which incur considerable overhead. While this mode allows for more granular control of
|
||||
resources dedicated toward individual processes, given the additional communications overhead, it is only
|
||||
necessary for very large deployments.
|
||||
|
||||
At present, we **recommend monolith mode deployments** in all cases.
|
||||
Given our current state of development, **we recommend monolith mode** for all deployments.
|
||||
|
||||
## Databases
|
||||
|
||||
|
|
@ -77,7 +75,7 @@ In order to install Dendrite, you will need to satisfy the following dependencie
|
|||
|
||||
### Go
|
||||
|
||||
At this time, Dendrite supports being built with Go 1.16 or later. We do not support building
|
||||
At this time, Dendrite supports being built with Go 1.18 or later. We do not support building
|
||||
Dendrite with older versions of Go than this. If you are installing Go using a package manager,
|
||||
you should check (by running `go version`) that you are using a suitable version before you start.
|
||||
|
||||
|
|
@ -97,12 +95,13 @@ enabled.
|
|||
To do so, follow the [NATS Server installation instructions](https://docs.nats.io/running-a-nats-service/introduction/installation) and then [start your NATS deployment](https://docs.nats.io/running-a-nats-service/introduction/running). JetStream must be enabled, either by passing the `-js` flag to `nats-server`,
|
||||
or by specifying the `store_dir` option in the the `jetstream` configuration.
|
||||
|
||||
### Reverse proxy (polylith deployments)
|
||||
### Reverse proxy
|
||||
|
||||
Polylith deployments require a reverse proxy, such as [NGINX](https://www.nginx.com) or
|
||||
[HAProxy](http://www.haproxy.org). Configuring those is not covered in this documentation,
|
||||
although a [sample configuration for NGINX](https://github.com/matrix-org/dendrite/blob/main/docs/nginx/polylith-sample.conf)
|
||||
is provided.
|
||||
A reverse proxy such as [Caddy](https://caddyserver.com), [NGINX](https://www.nginx.com) or
|
||||
[HAProxy](http://www.haproxy.org) is required for polylith deployments and is useful for monolith
|
||||
deployments. Configuring those is not covered in this documentation, although sample configurations
|
||||
for [Caddy](https://github.com/matrix-org/dendrite/blob/main/docs/caddy) and
|
||||
[NGINX](https://github.com/matrix-org/dendrite/blob/main/docs/nginx) are provided.
|
||||
|
||||
### Windows
|
||||
|
||||
|
|
|
|||
|
|
@ -14,27 +14,38 @@ that take the format `@user:example.com`.
|
|||
For federation to work, the server name must be resolvable by other homeservers on the internet
|
||||
— that is, the domain must be registered and properly configured with the relevant DNS records.
|
||||
|
||||
Matrix servers discover each other when federating using the following methods:
|
||||
Matrix servers usually discover each other when federating using the following methods:
|
||||
|
||||
1. If a well-known delegation exists on `example.com`, use the path server from the
|
||||
1. If a well-known delegation exists on `example.com`, use the domain and port from the
|
||||
well-known file to connect to the remote homeserver;
|
||||
2. If a DNS SRV delegation exists on `example.com`, use the hostname and port from the DNS SRV
|
||||
2. If a DNS SRV delegation exists on `example.com`, use the IP address and port from the DNS SRV
|
||||
record to connect to the remote homeserver;
|
||||
3. If neither well-known or DNS SRV delegation are configured, attempt to connect to the remote
|
||||
homeserver by connecting to `example.com` port TCP/8448 using HTTPS.
|
||||
|
||||
The exact details of how server name resolution works can be found in
|
||||
[the spec](https://spec.matrix.org/v1.3/server-server-api/#resolving-server-names).
|
||||
|
||||
## TLS certificates
|
||||
|
||||
Matrix federation requires that valid TLS certificates are present on the domain. You must
|
||||
obtain certificates from a publicly accepted Certificate Authority (CA). [LetsEncrypt](https://letsencrypt.org)
|
||||
is an example of such a CA that can be used. Self-signed certificates are not suitable for
|
||||
federation and will typically not be accepted by other homeservers.
|
||||
obtain certificates from a publicly-trusted certificate authority (CA). [Let's Encrypt](https://letsencrypt.org)
|
||||
is a popular choice of CA because the certificates are publicly-trusted, free, and automated
|
||||
via the ACME protocol. (Self-signed certificates are not suitable for federation and will typically
|
||||
not be accepted by other homeservers.)
|
||||
|
||||
A common practice to help ease the management of certificates is to install a reverse proxy in
|
||||
front of Dendrite which manages the TLS certificates and HTTPS proxying itself. Software such as
|
||||
[NGINX](https://www.nginx.com) and [HAProxy](http://www.haproxy.org) can be used for the task.
|
||||
Although the finer details of configuring these are not described here, you must reverse proxy
|
||||
all `/_matrix` paths to your Dendrite server.
|
||||
Automating the renewal of TLS certificates is best practice. There are many tools for this,
|
||||
but the simplest way to achieve TLS automation is to have your reverse proxy do it for you.
|
||||
[Caddy](https://caddyserver.com) is recommended as a production-grade reverse proxy with
|
||||
automatic TLS which is commonly used in front of Dendrite. It obtains and renews TLS certificates
|
||||
automatically and by default as long as your domain name is pointed at your server first.
|
||||
Although the finer details of [configuring Caddy](https://caddyserver.com/docs/) is not described
|
||||
here, in general, you must reverse proxy all `/_matrix` paths to your Dendrite server. For example,
|
||||
with Caddy:
|
||||
|
||||
```
|
||||
reverse_proxy /_matrix/* localhost:8008
|
||||
```
|
||||
|
||||
It is possible for the reverse proxy to listen on the standard HTTPS port TCP/443 so long as your
|
||||
domain delegation is configured to point to port TCP/443.
|
||||
|
|
@ -51,17 +62,12 @@ you will be able to delegate from `example.com` to `matrix.example.com` so that
|
|||
|
||||
Delegation can be performed in one of two ways:
|
||||
|
||||
* **Well-known delegation**: A well-known text file is served over HTTPS on the domain name
|
||||
that you want to use, pointing to your server on `matrix.example.com` port 8448;
|
||||
* **DNS SRV delegation**: A DNS SRV record is created on the domain name that you want to
|
||||
use, pointing to your server on `matrix.example.com` port TCP/8448.
|
||||
* **Well-known delegation (preferred)**: A well-known text file is served over HTTPS on the domain
|
||||
name that you want to use, pointing to your server on `matrix.example.com` port 8448;
|
||||
* **DNS SRV delegation (not recommended)**: See the SRV delegation section below for details.
|
||||
|
||||
If you are using a reverse proxy to forward `/_matrix` to Dendrite, your well-known or DNS SRV
|
||||
delegation must refer to the hostname and port that the reverse proxy is listening on instead.
|
||||
|
||||
Well-known delegation is typically easier to set up and usually preferred. However, you can use
|
||||
either or both methods to delegate. If you configure both methods of delegation, it is important
|
||||
that they both agree and refer to the same hostname and port.
|
||||
If you are using a reverse proxy to forward `/_matrix` to Dendrite, your well-known or delegation
|
||||
must refer to the hostname and port that the reverse proxy is listening on instead.
|
||||
|
||||
## Well-known delegation
|
||||
|
||||
|
|
@ -74,20 +80,46 @@ and contain the following JSON document:
|
|||
|
||||
```json
|
||||
{
|
||||
"m.server": "https://matrix.example.com:8448"
|
||||
"m.server": "matrix.example.com:8448"
|
||||
}
|
||||
```
|
||||
|
||||
For example, this can be done with the following Caddy config:
|
||||
|
||||
```
|
||||
handle /.well-known/matrix/client {
|
||||
header Content-Type application/json
|
||||
header Access-Control-Allow-Origin *
|
||||
respond `{"m.homeserver": {"base_url": "https://matrix.example.com:8448"}}`
|
||||
}
|
||||
```
|
||||
|
||||
You can also serve `.well-known` with Dendrite itself by setting the `well_known_server_name` config
|
||||
option to the value you want for `m.server`. This is primarily useful if Dendrite is exposed on
|
||||
`example.com:443` and you don't want to set up a separate webserver just for serving the `.well-known`
|
||||
file.
|
||||
|
||||
```yaml
|
||||
global:
|
||||
...
|
||||
well_known_server_name: "example.com:443"
|
||||
```
|
||||
|
||||
## DNS SRV delegation
|
||||
|
||||
Using DNS SRV delegation requires creating DNS SRV records on the `example.com` zone which
|
||||
refer to your Dendrite installation.
|
||||
This method is not recommended, as the behavior of SRV records in Matrix is rather unintuitive:
|
||||
SRV records will only change the IP address and port that other servers connect to, they won't
|
||||
affect the domain name. In technical terms, the `Host` header and TLS SNI of federation requests
|
||||
will still be `example.com` even if the SRV record points at `matrix.example.com`.
|
||||
|
||||
Assuming that your Dendrite installation is listening for HTTPS connections at `matrix.example.com`
|
||||
port 8448, the DNS SRV record must have the following fields:
|
||||
In practice, this means that the server must be configured with valid TLS certificates for
|
||||
`example.com`, rather than `matrix.example.com` as one might intuitively expect. If there's a
|
||||
reverse proxy in between, the proxy configuration must be written as if it's `example.com`, as the
|
||||
proxy will never see the name `matrix.example.com` in incoming requests.
|
||||
|
||||
* Name: `@` (or whichever term your DNS provider uses to signal the root)
|
||||
* Service: `_matrix`
|
||||
* Protocol: `_tcp`
|
||||
* Port: `8448`
|
||||
* Target: `matrix.example.com`
|
||||
This behavior also means that if `example.com` and `matrix.example.com` point at the same IP
|
||||
address, there is no reason to have a SRV record pointing at `matrix.example.com`. It can still
|
||||
be used to change the port number, but it won't do anything else.
|
||||
|
||||
If you understand how SRV records work and still want to use them, the service name is `_matrix` and
|
||||
the protocol is `_tcp`.
|
||||
|
|
|
|||
38
docs/installation/3_build.md
Normal file
38
docs/installation/3_build.md
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
---
|
||||
title: Building Dendrite
|
||||
parent: Installation
|
||||
has_toc: true
|
||||
nav_order: 3
|
||||
permalink: /installation/build
|
||||
---
|
||||
|
||||
# Build all Dendrite commands
|
||||
|
||||
Dendrite has numerous utility commands in addition to the actual server binaries.
|
||||
Build them all from the root of the source repo with `build.sh` (Linux/Mac):
|
||||
|
||||
```sh
|
||||
./build.sh
|
||||
```
|
||||
|
||||
or `build.cmd` (Windows):
|
||||
|
||||
```powershell
|
||||
build.cmd
|
||||
```
|
||||
|
||||
The resulting binaries will be placed in the `bin` subfolder.
|
||||
|
||||
# Installing as a monolith
|
||||
|
||||
You can install the Dendrite monolith binary into `$GOPATH/bin` by using `go install`:
|
||||
|
||||
```sh
|
||||
go install ./cmd/dendrite-monolith-server
|
||||
```
|
||||
|
||||
Alternatively, you can specify a custom path for the binary to be written to using `go build`:
|
||||
|
||||
```sh
|
||||
go build -o /usr/local/bin/ ./cmd/dendrite-monolith-server
|
||||
```
|
||||
|
|
@ -17,7 +17,9 @@ filenames in the Dendrite configuration file and start Dendrite. The databases w
|
|||
and populated automatically.
|
||||
|
||||
Note that Dendrite **cannot share a single SQLite database across multiple components**. Each
|
||||
component must be configured with its own SQLite database filename.
|
||||
component must be configured with its own SQLite database filename. You will have to remove
|
||||
the `global.database` section from your Dendrite config and add it to each individual section
|
||||
instead in order to use SQLite.
|
||||
|
||||
### Connection strings
|
||||
|
||||
|
|
@ -29,5 +29,6 @@ Polylith deployments require a reverse proxy in order to ensure that requests ar
|
|||
sent to the correct endpoint. You must ensure that a suitable reverse proxy is installed
|
||||
and configured.
|
||||
|
||||
A [sample configuration file](https://github.com/matrix-org/dendrite/blob/main/docs/nginx/polylith-sample.conf)
|
||||
is provided for [NGINX](https://www.nginx.com).
|
||||
Sample configurations are provided
|
||||
for [Caddy](https://github.com/matrix-org/dendrite/blob/main/docs/caddy/polylith/Caddyfile)
|
||||
and [NGINX](https://github.com/matrix-org/dendrite/blob/main/docs/nginx/polylith-sample.conf).
|
||||
|
|
@ -1,13 +1,13 @@
|
|||
---
|
||||
title: Populate the configuration
|
||||
title: Configuring Dendrite
|
||||
parent: Installation
|
||||
nav_order: 7
|
||||
permalink: /installation/configuration
|
||||
---
|
||||
|
||||
# Populate the configuration
|
||||
# Configuring Dendrite
|
||||
|
||||
The configuration file is used to configure Dendrite. Sample configuration files are
|
||||
A YAML configuration file is used to configure Dendrite. Sample configuration files are
|
||||
present in the top level of the Dendrite repository:
|
||||
|
||||
* [`dendrite-sample.monolith.yaml`](https://github.com/matrix-org/dendrite/blob/main/dendrite-sample.monolith.yaml)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
title: Generating signing keys
|
||||
parent: Installation
|
||||
nav_order: 4
|
||||
nav_order: 8
|
||||
permalink: /installation/signingkeys
|
||||
---
|
||||
|
||||
|
|
@ -15,8 +15,9 @@ you can start your Dendrite monolith deployment by starting the `dendrite-monoli
|
|||
./dendrite-monolith-server -config /path/to/dendrite.yaml
|
||||
```
|
||||
|
||||
If you want to change the addresses or ports that Dendrite listens on, you
|
||||
can use the `-http-bind-address` and `-https-bind-address` command line arguments:
|
||||
By default, Dendrite will listen HTTP on port 8008. If you want to change the addresses
|
||||
or ports that Dendrite listens on, you can use the `-http-bind-address` and
|
||||
`-https-bind-address` command line arguments:
|
||||
|
||||
```bash
|
||||
./dendrite-monolith-server -config /path/to/dendrite.yaml \
|
||||
|
|
@ -133,7 +133,7 @@ func (t *OutputPresenceConsumer) onMessage(ctx context.Context, msg *nats.Msg) b
|
|||
return true
|
||||
}
|
||||
|
||||
log.Debugf("sending presence EDU to %d servers", len(joined))
|
||||
log.Tracef("sending presence EDU to %d servers", len(joined))
|
||||
if err = t.queues.SendEDU(edu, t.ServerName, joined); err != nil {
|
||||
log.WithError(err).Error("failed to send EDU")
|
||||
return false
|
||||
|
|
|
|||
|
|
@ -90,7 +90,7 @@ func (t *OutputReceiptConsumer) onMessage(ctx context.Context, msg *nats.Msg) bo
|
|||
return true
|
||||
}
|
||||
|
||||
timestamp, err := strconv.Atoi(msg.Header.Get("timestamp"))
|
||||
timestamp, err := strconv.ParseUint(msg.Header.Get("timestamp"), 10, 64)
|
||||
if err != nil {
|
||||
// If the message was invalid, log it and move on to the next message in the stream
|
||||
log.WithError(err).Errorf("EDU output log: message parse failure")
|
||||
|
|
|
|||
|
|
@ -95,6 +95,11 @@ func (t *OutputSendToDeviceConsumer) onMessage(ctx context.Context, msg *nats.Ms
|
|||
return true
|
||||
}
|
||||
|
||||
// The SyncAPI is already handling sendToDevice for the local server
|
||||
if destServerName == t.ServerName {
|
||||
return true
|
||||
}
|
||||
|
||||
// Pack the EDU and marshal it
|
||||
edu := &gomatrixserverlib.EDU{
|
||||
Type: gomatrixserverlib.MDirectToDevice,
|
||||
|
|
|
|||
|
|
@ -65,6 +65,7 @@ func AddPublicRoutes(
|
|||
TopicSendToDeviceEvent: cfg.Matrix.JetStream.Prefixed(jetstream.OutputSendToDeviceEvent),
|
||||
TopicTypingEvent: cfg.Matrix.JetStream.Prefixed(jetstream.OutputTypingEvent),
|
||||
TopicPresenceEvent: cfg.Matrix.JetStream.Prefixed(jetstream.OutputPresenceEvent),
|
||||
TopicDeviceListUpdate: cfg.Matrix.JetStream.Prefixed(jetstream.InputDeviceListUpdate),
|
||||
ServerName: cfg.Matrix.ServerName,
|
||||
UserAPI: userAPI,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ import (
|
|||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
|
@ -55,60 +54,59 @@ var servers = map[string]*server{
|
|||
func TestMain(m *testing.M) {
|
||||
// Set up the server key API for each "server" that we
|
||||
// will use in our tests.
|
||||
for _, s := range servers {
|
||||
// Generate a new key.
|
||||
_, testPriv, err := ed25519.GenerateKey(nil)
|
||||
if err != nil {
|
||||
panic("can't generate identity key: " + err.Error())
|
||||
os.Exit(func() int {
|
||||
for _, s := range servers {
|
||||
// Generate a new key.
|
||||
_, testPriv, err := ed25519.GenerateKey(nil)
|
||||
if err != nil {
|
||||
panic("can't generate identity key: " + err.Error())
|
||||
}
|
||||
|
||||
// Create a new cache but don't enable prometheus!
|
||||
s.cache = caching.NewRistrettoCache(8*1024*1024, time.Hour, false)
|
||||
|
||||
// Create a temporary directory for JetStream.
|
||||
d, err := ioutil.TempDir("./", "jetstream*")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer os.RemoveAll(d)
|
||||
|
||||
// Draw up just enough Dendrite config for the server key
|
||||
// API to work.
|
||||
cfg := &config.Dendrite{}
|
||||
cfg.Defaults(true)
|
||||
cfg.Global.ServerName = gomatrixserverlib.ServerName(s.name)
|
||||
cfg.Global.PrivateKey = testPriv
|
||||
cfg.Global.JetStream.InMemory = true
|
||||
cfg.Global.JetStream.TopicPrefix = string(s.name[:1])
|
||||
cfg.Global.JetStream.StoragePath = config.Path(d)
|
||||
cfg.Global.KeyID = serverKeyID
|
||||
cfg.Global.KeyValidityPeriod = s.validity
|
||||
cfg.FederationAPI.Database.ConnectionString = config.DataSource("file::memory:")
|
||||
s.config = &cfg.FederationAPI
|
||||
|
||||
// Create a transport which redirects federation requests to
|
||||
// the mock round tripper. Since we're not *really* listening for
|
||||
// federation requests then this will return the key instead.
|
||||
transport := &http.Transport{}
|
||||
transport.RegisterProtocol("matrix", &MockRoundTripper{})
|
||||
|
||||
// Create the federation client.
|
||||
s.fedclient = gomatrixserverlib.NewFederationClient(
|
||||
s.config.Matrix.ServerName, serverKeyID, testPriv,
|
||||
gomatrixserverlib.WithTransport(transport),
|
||||
)
|
||||
|
||||
// Finally, build the server key APIs.
|
||||
sbase := base.NewBaseDendrite(cfg, "Monolith", base.DisableMetrics)
|
||||
s.api = NewInternalAPI(sbase, s.fedclient, nil, s.cache, nil, true)
|
||||
}
|
||||
|
||||
// Create a new cache but don't enable prometheus!
|
||||
s.cache, err = caching.NewInMemoryLRUCache(false)
|
||||
if err != nil {
|
||||
panic("can't create cache: " + err.Error())
|
||||
}
|
||||
|
||||
// Create a temporary directory for JetStream.
|
||||
d, err := ioutil.TempDir("./", "jetstream*")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer os.RemoveAll(d)
|
||||
|
||||
// Draw up just enough Dendrite config for the server key
|
||||
// API to work.
|
||||
cfg := &config.Dendrite{}
|
||||
cfg.Defaults(true)
|
||||
cfg.Global.ServerName = gomatrixserverlib.ServerName(s.name)
|
||||
cfg.Global.PrivateKey = testPriv
|
||||
cfg.Global.JetStream.InMemory = true
|
||||
cfg.Global.JetStream.TopicPrefix = string(s.name[:1])
|
||||
cfg.Global.JetStream.StoragePath = config.Path(d)
|
||||
cfg.Global.KeyID = serverKeyID
|
||||
cfg.Global.KeyValidityPeriod = s.validity
|
||||
cfg.FederationAPI.Database.ConnectionString = config.DataSource("file::memory:")
|
||||
s.config = &cfg.FederationAPI
|
||||
|
||||
// Create a transport which redirects federation requests to
|
||||
// the mock round tripper. Since we're not *really* listening for
|
||||
// federation requests then this will return the key instead.
|
||||
transport := &http.Transport{}
|
||||
transport.RegisterProtocol("matrix", &MockRoundTripper{})
|
||||
|
||||
// Create the federation client.
|
||||
s.fedclient = gomatrixserverlib.NewFederationClient(
|
||||
s.config.Matrix.ServerName, serverKeyID, testPriv,
|
||||
gomatrixserverlib.WithTransport(transport),
|
||||
)
|
||||
|
||||
// Finally, build the server key APIs.
|
||||
sbase := base.NewBaseDendrite(cfg, "Monolith", base.DisableMetrics)
|
||||
s.api = NewInternalAPI(sbase, s.fedclient, nil, s.cache, nil, true)
|
||||
}
|
||||
|
||||
// Now that we have built our server key APIs, start the
|
||||
// rest of the tests.
|
||||
os.Exit(m.Run())
|
||||
// Now that we have built our server key APIs, start the
|
||||
// rest of the tests.
|
||||
return m.Run()
|
||||
}())
|
||||
}
|
||||
|
||||
type MockRoundTripper struct{}
|
||||
|
|
@ -168,72 +166,6 @@ func TestServersRequestOwnKeys(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCachingBehaviour(t *testing.T) {
|
||||
// Server A will request Server B's key, which has a validity
|
||||
// period of an hour from now. We should retrieve the key and
|
||||
// it should make it into the cache automatically.
|
||||
|
||||
req := gomatrixserverlib.PublicKeyLookupRequest{
|
||||
ServerName: serverB.name,
|
||||
KeyID: serverKeyID,
|
||||
}
|
||||
ts := gomatrixserverlib.AsTimestamp(time.Now())
|
||||
|
||||
res, err := serverA.api.FetchKeys(
|
||||
context.Background(),
|
||||
map[gomatrixserverlib.PublicKeyLookupRequest]gomatrixserverlib.Timestamp{
|
||||
req: ts,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("server A failed to retrieve server B key: %s", err)
|
||||
}
|
||||
if len(res) != 1 {
|
||||
t.Fatalf("server B should have returned one key but instead returned %d keys", len(res))
|
||||
}
|
||||
if _, ok := res[req]; !ok {
|
||||
t.Fatalf("server B isn't included in the key fetch response")
|
||||
}
|
||||
|
||||
// At this point, if the previous key request was a success,
|
||||
// then the cache should now contain the key. Check if that's
|
||||
// the case - if it isn't then there's something wrong with
|
||||
// the cache implementation or we failed to get the key.
|
||||
|
||||
cres, ok := serverA.cache.GetServerKey(req, ts)
|
||||
if !ok {
|
||||
t.Fatalf("server B key should be in cache but isn't")
|
||||
}
|
||||
if !reflect.DeepEqual(cres, res[req]) {
|
||||
t.Fatalf("the cached result from server B wasn't what server B gave us")
|
||||
}
|
||||
|
||||
// If we ask the cache for the same key but this time for an event
|
||||
// that happened in +30 minutes. Since the validity period is for
|
||||
// another hour, then we should get a response back from the cache.
|
||||
|
||||
_, ok = serverA.cache.GetServerKey(
|
||||
req,
|
||||
gomatrixserverlib.AsTimestamp(time.Now().Add(time.Minute*30)),
|
||||
)
|
||||
if !ok {
|
||||
t.Fatalf("server B key isn't in cache when it should be (+30 minutes)")
|
||||
}
|
||||
|
||||
// If we ask the cache for the same key but this time for an event
|
||||
// that happened in +90 minutes then we should expect to get no
|
||||
// cache result. This is because the cache shouldn't return a result
|
||||
// that is obviously past the validity of the event.
|
||||
|
||||
_, ok = serverA.cache.GetServerKey(
|
||||
req,
|
||||
gomatrixserverlib.AsTimestamp(time.Now().Add(time.Minute*90)),
|
||||
)
|
||||
if ok {
|
||||
t.Fatalf("server B key is in cache when it shouldn't be (+90 minutes)")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRenewalBehaviour(t *testing.T) {
|
||||
// Server A will request Server C's key but their validity period
|
||||
// is an hour in the past. We'll retrieve the key as, even though it's
|
||||
|
|
@ -260,32 +192,7 @@ func TestRenewalBehaviour(t *testing.T) {
|
|||
t.Fatalf("server C isn't included in the key fetch response")
|
||||
}
|
||||
|
||||
// If we ask the cache for the server key for an event that happened
|
||||
// 90 minutes ago then we should get a cache result, as the key hadn't
|
||||
// passed its validity by that point. The fact that the key is now in
|
||||
// the cache is, in itself, proof that we successfully retrieved the
|
||||
// key before.
|
||||
|
||||
oldcached, ok := serverA.cache.GetServerKey(
|
||||
req,
|
||||
gomatrixserverlib.AsTimestamp(time.Now().Add(-time.Minute*90)),
|
||||
)
|
||||
if !ok {
|
||||
t.Fatalf("server C key isn't in cache when it should be (-90 minutes)")
|
||||
}
|
||||
|
||||
// If we now ask the cache for the same key but this time for an event
|
||||
// that only happened 30 minutes ago then we shouldn't get a cached
|
||||
// result, as the event happened after the key validity expired. This
|
||||
// is really just for sanity checking.
|
||||
|
||||
_, ok = serverA.cache.GetServerKey(
|
||||
req,
|
||||
gomatrixserverlib.AsTimestamp(time.Now().Add(-time.Minute*30)),
|
||||
)
|
||||
if ok {
|
||||
t.Fatalf("server B key is in cache when it shouldn't be (-30 minutes)")
|
||||
}
|
||||
originalValidity := res[req].ValidUntilTS
|
||||
|
||||
// We're now going to kick server C into renewing its key. Since we're
|
||||
// happy at this point that the key that we already have is from the past
|
||||
|
|
@ -306,24 +213,13 @@ func TestRenewalBehaviour(t *testing.T) {
|
|||
if len(res) != 1 {
|
||||
t.Fatalf("server C should have returned one key but instead returned %d keys", len(res))
|
||||
}
|
||||
if _, ok = res[req]; !ok {
|
||||
if _, ok := res[req]; !ok {
|
||||
t.Fatalf("server C isn't included in the key fetch response")
|
||||
}
|
||||
|
||||
// We're now going to ask the cache what the new key validity is. If
|
||||
// it is still the same as the previous validity then we've failed to
|
||||
// retrieve the renewed key. If it's newer then we've successfully got
|
||||
// the renewed key.
|
||||
currentValidity := res[req].ValidUntilTS
|
||||
|
||||
newcached, ok := serverA.cache.GetServerKey(
|
||||
req,
|
||||
gomatrixserverlib.AsTimestamp(time.Now().Add(-time.Minute*30)),
|
||||
)
|
||||
if !ok {
|
||||
t.Fatalf("server B key isn't in cache when it shouldn't be (post-renewal)")
|
||||
if originalValidity == currentValidity {
|
||||
t.Fatalf("server C key should have renewed but didn't")
|
||||
}
|
||||
if oldcached.ValidUntilTS >= newcached.ValidUntilTS {
|
||||
t.Fatalf("the server B key should have been renewed but wasn't")
|
||||
}
|
||||
t.Log(res)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -166,7 +166,8 @@ func (r *FederationInternalAPI) performJoinUsingServer(
|
|||
if content == nil {
|
||||
content = map[string]interface{}{}
|
||||
}
|
||||
content["membership"] = "join"
|
||||
_ = json.Unmarshal(respMakeJoin.JoinEvent.Content, &content)
|
||||
content["membership"] = gomatrixserverlib.Join
|
||||
if err = respMakeJoin.JoinEvent.SetContent(content); err != nil {
|
||||
return fmt.Errorf("respMakeJoin.JoinEvent.SetContent: %w", err)
|
||||
}
|
||||
|
|
@ -209,10 +210,22 @@ func (r *FederationInternalAPI) performJoinUsingServer(
|
|||
}
|
||||
r.statistics.ForServer(serverName).Success()
|
||||
|
||||
authEvents := respSendJoin.AuthEvents.UntrustedEvents(respMakeJoin.RoomVersion)
|
||||
// If the remote server returned an event in the "event" key of
|
||||
// the send_join request then we should use that instead. It may
|
||||
// contain signatures that we don't know about.
|
||||
if len(respSendJoin.Event) > 0 {
|
||||
var remoteEvent *gomatrixserverlib.Event
|
||||
remoteEvent, err = respSendJoin.Event.UntrustedEvent(respMakeJoin.RoomVersion)
|
||||
if err == nil && isWellFormedMembershipEvent(
|
||||
remoteEvent, roomID, userID, r.cfg.Matrix.ServerName,
|
||||
) {
|
||||
event = remoteEvent
|
||||
}
|
||||
}
|
||||
|
||||
// Sanity-check the join response to ensure that it has a create
|
||||
// event, that the room version is known, etc.
|
||||
authEvents := respSendJoin.AuthEvents.UntrustedEvents(respMakeJoin.RoomVersion)
|
||||
if err = sanityCheckAuthChain(authEvents); err != nil {
|
||||
return fmt.Errorf("sanityCheckAuthChain: %w", err)
|
||||
}
|
||||
|
|
@ -270,6 +283,26 @@ func (r *FederationInternalAPI) performJoinUsingServer(
|
|||
return nil
|
||||
}
|
||||
|
||||
// isWellFormedMembershipEvent returns true if the event looks like a legitimate
|
||||
// membership event.
|
||||
func isWellFormedMembershipEvent(event *gomatrixserverlib.Event, roomID, userID string, origin gomatrixserverlib.ServerName) bool {
|
||||
if membership, err := event.Membership(); err != nil {
|
||||
return false
|
||||
} else if membership != gomatrixserverlib.Join {
|
||||
return false
|
||||
}
|
||||
if event.RoomID() != roomID {
|
||||
return false
|
||||
}
|
||||
if event.Origin() != origin {
|
||||
return false
|
||||
}
|
||||
if !event.StateKeyEquals(userID) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// PerformOutboundPeekRequest implements api.FederationInternalAPI
|
||||
func (r *FederationInternalAPI) PerformOutboundPeek(
|
||||
ctx context.Context,
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ package producers
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
|
|
@ -34,6 +35,7 @@ type SyncAPIProducer struct {
|
|||
TopicSendToDeviceEvent string
|
||||
TopicTypingEvent string
|
||||
TopicPresenceEvent string
|
||||
TopicDeviceListUpdate string
|
||||
JetStream nats.JetStreamContext
|
||||
ServerName gomatrixserverlib.ServerName
|
||||
UserAPI userapi.UserInternalAPI
|
||||
|
|
@ -51,7 +53,7 @@ func (p *SyncAPIProducer) SendReceipt(
|
|||
m.Header.Set(jetstream.RoomID, roomID)
|
||||
m.Header.Set(jetstream.EventID, eventID)
|
||||
m.Header.Set("type", receiptType)
|
||||
m.Header.Set("timestamp", strconv.Itoa(int(timestamp)))
|
||||
m.Header.Set("timestamp", fmt.Sprintf("%d", timestamp))
|
||||
|
||||
log.WithFields(log.Fields{}).Tracef("Producing to topic '%s'", p.TopicReceiptEvent)
|
||||
_, err := p.JetStream.PublishMsg(m, nats.Context(ctx))
|
||||
|
|
@ -157,7 +159,22 @@ func (p *SyncAPIProducer) SendPresence(
|
|||
lastActiveTS := gomatrixserverlib.AsTimestamp(time.Now().Add(-(time.Duration(lastActiveAgo) * time.Millisecond)))
|
||||
|
||||
m.Header.Set("last_active_ts", strconv.Itoa(int(lastActiveTS)))
|
||||
log.Debugf("Sending presence to syncAPI: %+v", m.Header)
|
||||
log.Tracef("Sending presence to syncAPI: %+v", m.Header)
|
||||
_, err := p.JetStream.PublishMsg(m, nats.Context(ctx))
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *SyncAPIProducer) SendDeviceListUpdate(
|
||||
ctx context.Context, deviceListUpdate *gomatrixserverlib.DeviceListUpdateEvent,
|
||||
) (err error) {
|
||||
m := nats.NewMsg(p.TopicDeviceListUpdate)
|
||||
m.Header.Set(jetstream.UserID, deviceListUpdate.UserID)
|
||||
m.Data, err = json.Marshal(deviceListUpdate)
|
||||
if err != nil {
|
||||
return fmt.Errorf("json.Marshal: %w", err)
|
||||
}
|
||||
|
||||
log.Debugf("Sending device list update: %+v", m.Header)
|
||||
_, err = p.JetStream.PublishMsg(m, nats.Context(ctx))
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -85,6 +85,9 @@ func GetUserDevices(
|
|||
if targetKey, ok := targetUser[gomatrixserverlib.KeyID(dev.DeviceID)]; ok {
|
||||
for sourceUserID, forSourceUser := range targetKey {
|
||||
for sourceKeyID, sourceKey := range forSourceUser {
|
||||
if device.Keys.Signatures == nil {
|
||||
device.Keys.Signatures = map[string]map[gomatrixserverlib.KeyID]gomatrixserverlib.Base64Bytes{}
|
||||
}
|
||||
if _, ok := device.Keys.Signatures[sourceUserID]; !ok {
|
||||
device.Keys.Signatures[sourceUserID] = map[gomatrixserverlib.KeyID]gomatrixserverlib.Base64Bytes{}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -141,10 +141,16 @@ func processInvite(
|
|||
}
|
||||
|
||||
// Check that the event is signed by the server sending the request.
|
||||
redacted := event.Redact()
|
||||
redacted, err := gomatrixserverlib.RedactEventJSON(event.JSON(), event.Version())
|
||||
if err != nil {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.BadJSON("The event JSON could not be redacted"),
|
||||
}
|
||||
}
|
||||
verifyRequests := []gomatrixserverlib.VerifyJSONRequest{{
|
||||
ServerName: event.Origin(),
|
||||
Message: redacted.JSON(),
|
||||
Message: redacted,
|
||||
AtTS: event.OriginServerTS(),
|
||||
StrictValidityChecking: true,
|
||||
}}
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@
|
|||
package routing
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
|
|
@ -103,6 +104,16 @@ func MakeJoin(
|
|||
}
|
||||
}
|
||||
|
||||
// Check if the restricted join is allowed. If the room doesn't
|
||||
// support restricted joins then this is effectively a no-op.
|
||||
res, authorisedVia, err := checkRestrictedJoin(httpReq, rsAPI, verRes.RoomVersion, roomID, userID)
|
||||
if err != nil {
|
||||
util.GetLogger(httpReq.Context()).WithError(err).Error("checkRestrictedJoin failed")
|
||||
return jsonerror.InternalServerError()
|
||||
} else if res != nil {
|
||||
return *res
|
||||
}
|
||||
|
||||
// Try building an event for the server
|
||||
builder := gomatrixserverlib.EventBuilder{
|
||||
Sender: userID,
|
||||
|
|
@ -110,8 +121,11 @@ func MakeJoin(
|
|||
Type: "m.room.member",
|
||||
StateKey: &userID,
|
||||
}
|
||||
err = builder.SetContent(map[string]interface{}{"membership": gomatrixserverlib.Join})
|
||||
if err != nil {
|
||||
content := gomatrixserverlib.MemberContent{
|
||||
Membership: gomatrixserverlib.Join,
|
||||
AuthorisedVia: authorisedVia,
|
||||
}
|
||||
if err = builder.SetContent(content); err != nil {
|
||||
util.GetLogger(httpReq.Context()).WithError(err).Error("builder.SetContent failed")
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
|
@ -161,6 +175,7 @@ func MakeJoin(
|
|||
// SendJoin implements the /send_join API
|
||||
// The make-join send-join dance makes much more sense as a single
|
||||
// flow so the cyclomatic complexity is high:
|
||||
// nolint:gocyclo
|
||||
func SendJoin(
|
||||
httpReq *http.Request,
|
||||
request *gomatrixserverlib.FederationRequest,
|
||||
|
|
@ -187,6 +202,14 @@ func SendJoin(
|
|||
}
|
||||
}
|
||||
|
||||
// Check that the event is from the server sending the request.
|
||||
if event.Origin() != request.Origin() {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusForbidden,
|
||||
JSON: jsonerror.Forbidden("The join must be sent by the server it originated on"),
|
||||
}
|
||||
}
|
||||
|
||||
// Check that a state key is provided.
|
||||
if event.StateKey() == nil || event.StateKeyEquals("") {
|
||||
return util.JSONResponse{
|
||||
|
|
@ -201,6 +224,22 @@ func SendJoin(
|
|||
}
|
||||
}
|
||||
|
||||
// Check that the sender belongs to the server that is sending us
|
||||
// the request. By this point we've already asserted that the sender
|
||||
// and the state key are equal so we don't need to check both.
|
||||
var domain gomatrixserverlib.ServerName
|
||||
if _, domain, err = gomatrixserverlib.SplitID('@', event.Sender()); err != nil {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusForbidden,
|
||||
JSON: jsonerror.Forbidden("The sender of the join is invalid"),
|
||||
}
|
||||
} else if domain != request.Origin() {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusForbidden,
|
||||
JSON: jsonerror.Forbidden("The sender of the join must belong to the origin server"),
|
||||
}
|
||||
}
|
||||
|
||||
// Check that the room ID is correct.
|
||||
if event.RoomID() != roomID {
|
||||
return util.JSONResponse{
|
||||
|
|
@ -227,14 +266,6 @@ func SendJoin(
|
|||
}
|
||||
}
|
||||
|
||||
// Check that the event is from the server sending the request.
|
||||
if event.Origin() != request.Origin() {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusForbidden,
|
||||
JSON: jsonerror.Forbidden("The join must be sent by the server it originated on"),
|
||||
}
|
||||
}
|
||||
|
||||
// Check that this is in fact a join event
|
||||
membership, err := event.Membership()
|
||||
if err != nil {
|
||||
|
|
@ -251,10 +282,17 @@ func SendJoin(
|
|||
}
|
||||
|
||||
// Check that the event is signed by the server sending the request.
|
||||
redacted := event.Redact()
|
||||
redacted, err := gomatrixserverlib.RedactEventJSON(event.JSON(), event.Version())
|
||||
if err != nil {
|
||||
logrus.WithError(err).Errorf("XXX: join.go")
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.BadJSON("The event JSON could not be redacted"),
|
||||
}
|
||||
}
|
||||
verifyRequests := []gomatrixserverlib.VerifyJSONRequest{{
|
||||
ServerName: event.Origin(),
|
||||
Message: redacted.JSON(),
|
||||
Message: redacted,
|
||||
AtTS: event.OriginServerTS(),
|
||||
StrictValidityChecking: true,
|
||||
}}
|
||||
|
|
@ -314,6 +352,40 @@ func SendJoin(
|
|||
}
|
||||
}
|
||||
|
||||
// If the membership content contains a user ID for a server that is not
|
||||
// ours then we should kick it back.
|
||||
var memberContent gomatrixserverlib.MemberContent
|
||||
if err := json.Unmarshal(event.Content(), &memberContent); err != nil {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.BadJSON(err.Error()),
|
||||
}
|
||||
}
|
||||
if memberContent.AuthorisedVia != "" {
|
||||
_, domain, err := gomatrixserverlib.SplitID('@', memberContent.AuthorisedVia)
|
||||
if err != nil {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.BadJSON(fmt.Sprintf("The authorising username %q is invalid.", memberContent.AuthorisedVia)),
|
||||
}
|
||||
}
|
||||
if domain != cfg.Matrix.ServerName {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.BadJSON(fmt.Sprintf("The authorising username %q does not belong to this server.", memberContent.AuthorisedVia)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sign the membership event. This is required for restricted joins to work
|
||||
// in the case that the authorised via user is one of our own users. It also
|
||||
// doesn't hurt to do it even if it isn't a restricted join.
|
||||
signed := event.Sign(
|
||||
string(cfg.Matrix.ServerName),
|
||||
cfg.Matrix.KeyID,
|
||||
cfg.Matrix.PrivateKey,
|
||||
)
|
||||
|
||||
// Send the events to the room server.
|
||||
// We are responsible for notifying other servers that the user has joined
|
||||
// the room, so set SendAsServer to cfg.Matrix.ServerName
|
||||
|
|
@ -323,7 +395,7 @@ func SendJoin(
|
|||
InputRoomEvents: []api.InputRoomEvent{
|
||||
{
|
||||
Kind: api.KindNew,
|
||||
Event: event.Headered(stateAndAuthChainResponse.RoomVersion),
|
||||
Event: signed.Headered(stateAndAuthChainResponse.RoomVersion),
|
||||
SendAsServer: string(cfg.Matrix.ServerName),
|
||||
TransactionID: nil,
|
||||
},
|
||||
|
|
@ -354,10 +426,77 @@ func SendJoin(
|
|||
StateEvents: gomatrixserverlib.NewEventJSONsFromHeaderedEvents(stateAndAuthChainResponse.StateEvents),
|
||||
AuthEvents: gomatrixserverlib.NewEventJSONsFromHeaderedEvents(stateAndAuthChainResponse.AuthChainEvents),
|
||||
Origin: cfg.Matrix.ServerName,
|
||||
Event: signed.JSON(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// checkRestrictedJoin finds out whether or not we can assist in processing
|
||||
// a restricted room join. If the room version does not support restricted
|
||||
// joins then this function returns with no side effects. This returns three
|
||||
// values:
|
||||
// * an optional JSON response body (i.e. M_UNABLE_TO_AUTHORISE_JOIN) which
|
||||
// should always be sent back to the client if one is specified
|
||||
// * a user ID of an authorising user, typically a user that has power to
|
||||
// issue invites in the room, if one has been found
|
||||
// * an error if there was a problem finding out if this was allowable,
|
||||
// like if the room version isn't known or a problem happened talking to
|
||||
// the roomserver
|
||||
func checkRestrictedJoin(
|
||||
httpReq *http.Request,
|
||||
rsAPI api.FederationRoomserverAPI,
|
||||
roomVersion gomatrixserverlib.RoomVersion,
|
||||
roomID, userID string,
|
||||
) (*util.JSONResponse, string, error) {
|
||||
if allowRestricted, err := roomVersion.MayAllowRestrictedJoinsInEventAuth(); err != nil {
|
||||
return nil, "", err
|
||||
} else if !allowRestricted {
|
||||
return nil, "", nil
|
||||
}
|
||||
req := &api.QueryRestrictedJoinAllowedRequest{
|
||||
RoomID: roomID,
|
||||
UserID: userID,
|
||||
}
|
||||
res := &api.QueryRestrictedJoinAllowedResponse{}
|
||||
if err := rsAPI.QueryRestrictedJoinAllowed(httpReq.Context(), req, res); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
switch {
|
||||
case !res.Restricted:
|
||||
// The join rules for the room don't restrict membership.
|
||||
return nil, "", nil
|
||||
|
||||
case !res.Resident:
|
||||
// The join rules restrict membership but our server isn't currently
|
||||
// joined to all of the allowed rooms, so we can't actually decide
|
||||
// whether or not to allow the user to join. This error code should
|
||||
// tell the joining server to try joining via another resident server
|
||||
// instead.
|
||||
return &util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.UnableToAuthoriseJoin("This server cannot authorise the join."),
|
||||
}, "", nil
|
||||
|
||||
case !res.Allowed:
|
||||
// The join rules restrict membership, our server is in the relevant
|
||||
// rooms and the user wasn't joined to join any of the allowed rooms
|
||||
// and therefore can't join this room.
|
||||
return &util.JSONResponse{
|
||||
Code: http.StatusForbidden,
|
||||
JSON: jsonerror.Forbidden("You are not joined to any matching rooms."),
|
||||
}, "", nil
|
||||
|
||||
default:
|
||||
// The join rules restrict membership, our server is in the relevant
|
||||
// rooms and the user was allowed to join because they belong to one
|
||||
// of the allowed rooms. We now need to pick one of our own local users
|
||||
// from within the room to use as the authorising user ID, so that it
|
||||
// can be referred to from within the membership content.
|
||||
return nil, res.AuthorisedVia, nil
|
||||
}
|
||||
}
|
||||
|
||||
type eventsByDepth []*gomatrixserverlib.HeaderedEvent
|
||||
|
||||
func (e eventsByDepth) Len() int {
|
||||
|
|
|
|||
|
|
@ -231,10 +231,17 @@ func SendLeave(
|
|||
}
|
||||
|
||||
// Check that the event is signed by the server sending the request.
|
||||
redacted := event.Redact()
|
||||
redacted, err := gomatrixserverlib.RedactEventJSON(event.JSON(), event.Version())
|
||||
if err != nil {
|
||||
logrus.WithError(err).Errorf("XXX: leave.go")
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.BadJSON("The event JSON could not be redacted"),
|
||||
}
|
||||
}
|
||||
verifyRequests := []gomatrixserverlib.VerifyJSONRequest{{
|
||||
ServerName: event.Origin(),
|
||||
Message: redacted.JSON(),
|
||||
Message: redacted,
|
||||
AtTS: event.OriginServerTS(),
|
||||
StrictValidityChecking: true,
|
||||
}}
|
||||
|
|
|
|||
|
|
@ -501,11 +501,7 @@ func (t *txnReq) processDeviceListUpdate(ctx context.Context, e gomatrixserverli
|
|||
} else if serverName != t.Origin {
|
||||
return
|
||||
}
|
||||
var inputRes keyapi.InputDeviceListUpdateResponse
|
||||
t.keyAPI.InputDeviceListUpdate(context.Background(), &keyapi.InputDeviceListUpdateRequest{
|
||||
Event: payload,
|
||||
}, &inputRes)
|
||||
if inputRes.Error != nil {
|
||||
util.GetLogger(ctx).WithError(inputRes.Error).WithField("user_id", payload.UserID).Error("failed to InputDeviceListUpdate")
|
||||
if err := t.producer.SendDeviceListUpdate(ctx, &payload); err != nil {
|
||||
util.GetLogger(ctx).WithError(err).WithField("user_id", payload.UserID).Error("failed to InputDeviceListUpdate")
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,23 +15,13 @@
|
|||
package deltas
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
"github.com/pressly/goose"
|
||||
)
|
||||
|
||||
func LoadFromGoose() {
|
||||
goose.AddMigration(UpRemoveRoomsTable, DownRemoveRoomsTable)
|
||||
}
|
||||
|
||||
func LoadRemoveRoomsTable(m *sqlutil.Migrations) {
|
||||
m.AddMigration(UpRemoveRoomsTable, DownRemoveRoomsTable)
|
||||
}
|
||||
|
||||
func UpRemoveRoomsTable(tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
func UpRemoveRoomsTable(ctx context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.ExecContext(ctx, `
|
||||
DROP TABLE IF EXISTS federationsender_rooms;
|
||||
`)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -38,6 +38,10 @@ CREATE TABLE IF NOT EXISTS federationsender_queue_edus (
|
|||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS federationsender_queue_edus_json_nid_idx
|
||||
ON federationsender_queue_edus (json_nid, server_name);
|
||||
CREATE INDEX IF NOT EXISTS federationsender_queue_edus_nid_idx
|
||||
ON federationsender_queue_edus (json_nid);
|
||||
CREATE INDEX IF NOT EXISTS federationsender_queue_edus_server_name_idx
|
||||
ON federationsender_queue_edus (server_name);
|
||||
`
|
||||
|
||||
const insertQueueEDUSQL = "" +
|
||||
|
|
|
|||
|
|
@ -33,6 +33,9 @@ CREATE TABLE IF NOT EXISTS federationsender_queue_json (
|
|||
-- The JSON body. Text so that we preserve UTF-8.
|
||||
json_body TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS federationsender_queue_json_json_nid_idx
|
||||
ON federationsender_queue_json (json_nid);
|
||||
`
|
||||
|
||||
const insertJSONSQL = "" +
|
||||
|
|
|
|||
|
|
@ -36,6 +36,10 @@ CREATE TABLE IF NOT EXISTS federationsender_queue_pdus (
|
|||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS federationsender_queue_pdus_pdus_json_nid_idx
|
||||
ON federationsender_queue_pdus (json_nid, server_name);
|
||||
CREATE INDEX IF NOT EXISTS federationsender_queue_pdus_json_nid_idx
|
||||
ON federationsender_queue_pdus (json_nid);
|
||||
CREATE INDEX IF NOT EXISTS federationsender_queue_pdus_server_name_idx
|
||||
ON federationsender_queue_pdus (server_name);
|
||||
`
|
||||
|
||||
const insertQueuePDUSQL = "" +
|
||||
|
|
|
|||
|
|
@ -82,10 +82,13 @@ func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions,
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := sqlutil.NewMigrations()
|
||||
deltas.LoadRemoveRoomsTable(m)
|
||||
deltas.LoadAddExpiresAt(m)
|
||||
if err = m.RunDeltas(d.db, dbProperties); err != nil {
|
||||
m := sqlutil.NewMigrator(d.db)
|
||||
m.AddMigrations(sqlutil.Migration{
|
||||
Version: "federationsender: drop federationsender_rooms",
|
||||
Up: deltas.UpRemoveRoomsTable,
|
||||
})
|
||||
err = m.Up(base.Context())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = queueEDUs.Prepare(); err != nil {
|
||||
|
|
|
|||
|
|
@ -15,23 +15,13 @@
|
|||
package deltas
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
"github.com/pressly/goose"
|
||||
)
|
||||
|
||||
func LoadFromGoose() {
|
||||
goose.AddMigration(UpRemoveRoomsTable, DownRemoveRoomsTable)
|
||||
}
|
||||
|
||||
func LoadRemoveRoomsTable(m *sqlutil.Migrations) {
|
||||
m.AddMigration(UpRemoveRoomsTable, DownRemoveRoomsTable)
|
||||
}
|
||||
|
||||
func UpRemoveRoomsTable(tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
func UpRemoveRoomsTable(ctx context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.ExecContext(ctx, `
|
||||
DROP TABLE IF EXISTS federationsender_rooms;
|
||||
`)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -39,6 +39,10 @@ CREATE TABLE IF NOT EXISTS federationsender_queue_edus (
|
|||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS federationsender_queue_edus_json_nid_idx
|
||||
ON federationsender_queue_edus (json_nid, server_name);
|
||||
CREATE INDEX IF NOT EXISTS federationsender_queue_edus_nid_idx
|
||||
ON federationsender_queue_edus (json_nid);
|
||||
CREATE INDEX IF NOT EXISTS federationsender_queue_edus_server_name_idx
|
||||
ON federationsender_queue_edus (server_name);
|
||||
`
|
||||
|
||||
const insertQueueEDUSQL = "" +
|
||||
|
|
|
|||
|
|
@ -35,6 +35,9 @@ CREATE TABLE IF NOT EXISTS federationsender_queue_json (
|
|||
-- The JSON body. Text so that we preserve UTF-8.
|
||||
json_body TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS federationsender_queue_json_json_nid_idx
|
||||
ON federationsender_queue_json (json_nid);
|
||||
`
|
||||
|
||||
const insertJSONSQL = "" +
|
||||
|
|
|
|||
|
|
@ -38,6 +38,10 @@ CREATE TABLE IF NOT EXISTS federationsender_queue_pdus (
|
|||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS federationsender_queue_pdus_pdus_json_nid_idx
|
||||
ON federationsender_queue_pdus (json_nid, server_name);
|
||||
CREATE INDEX IF NOT EXISTS federationsender_queue_pdus_json_nid_idx
|
||||
ON federationsender_queue_pdus (json_nid);
|
||||
CREATE INDEX IF NOT EXISTS federationsender_queue_pdus_server_name_idx
|
||||
ON federationsender_queue_pdus (server_name);
|
||||
`
|
||||
|
||||
const insertQueuePDUSQL = "" +
|
||||
|
|
|
|||
|
|
@ -81,10 +81,13 @@ func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions,
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := sqlutil.NewMigrations()
|
||||
deltas.LoadRemoveRoomsTable(m)
|
||||
deltas.LoadAddExpiresAt(m)
|
||||
if err = m.RunDeltas(d.db, dbProperties); err != nil {
|
||||
m := sqlutil.NewMigrator(d.db)
|
||||
m.AddMigrations(sqlutil.Migration{
|
||||
Version: "federationsender: drop federationsender_rooms",
|
||||
Up: deltas.UpRemoveRoomsTable,
|
||||
})
|
||||
err = m.Up(base.Context())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = queueEDUs.Prepare(); err != nil {
|
||||
|
|
|
|||
107
go.mod
107
go.mod
|
|
@ -1,68 +1,113 @@
|
|||
module github.com/matrix-org/dendrite
|
||||
|
||||
replace github.com/nats-io/nats-server/v2 => github.com/neilalexander/nats-server/v2 v2.8.3-0.20220513095553-73a9a246d34f
|
||||
|
||||
replace github.com/nats-io/nats.go => github.com/neilalexander/nats.go v1.13.1-0.20220419101051-b262d9f0be1e
|
||||
|
||||
require (
|
||||
github.com/Arceliar/ironwood v0.0.0-20211125050254-8951369625d0
|
||||
github.com/Arceliar/ironwood v0.0.0-20220306165321-319147a02d98
|
||||
github.com/Arceliar/phony v0.0.0-20210209235338-dde1a8dca979
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0
|
||||
github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect
|
||||
github.com/MFAshby/stdemuxerhook v1.0.0
|
||||
github.com/Masterminds/semver/v3 v3.1.1
|
||||
github.com/codeclysm/extract v2.2.0+incompatible
|
||||
github.com/containerd/containerd v1.6.2 // indirect
|
||||
github.com/docker/docker v20.10.14+incompatible
|
||||
github.com/dgraph-io/ristretto v0.1.1-0.20220403145359-8e850b710d6d
|
||||
github.com/docker/docker v20.10.16+incompatible
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/frankban/quicktest v1.14.3 // indirect
|
||||
github.com/getsentry/sentry-go v0.13.0
|
||||
github.com/gologme/log v1.3.0
|
||||
github.com/google/go-cmp v0.5.7
|
||||
github.com/google/go-cmp v0.5.8
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/gorilla/websocket v1.5.0
|
||||
github.com/h2non/filetype v1.1.3 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.4
|
||||
github.com/juju/testing v0.0.0-20220203020004-a0ff61f03494 // indirect
|
||||
github.com/kardianos/minwinsvc v1.0.0
|
||||
github.com/lib/pq v1.10.5
|
||||
github.com/matrix-org/dugong v0.0.0-20210921133753-66e6b1c67e2e
|
||||
github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91
|
||||
github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20220513103617-eee8fd528433
|
||||
github.com/matrix-org/pinecone v0.0.0-20220408153826-2999ea29ed48
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20220725104114-b6003e522771
|
||||
github.com/matrix-org/pinecone v0.0.0-20220708135211-1ce778fcde6a
|
||||
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4
|
||||
github.com/mattn/go-sqlite3 v1.14.10
|
||||
github.com/miekg/dns v1.1.31 // indirect
|
||||
github.com/nats-io/nats-server/v2 v2.7.4-0.20220309205833-773636c1c5bb
|
||||
github.com/nats-io/nats.go v1.14.0
|
||||
github.com/mattn/go-sqlite3 v1.14.13
|
||||
github.com/nats-io/nats-server/v2 v2.8.5-0.20220731184415-903a06a5b4ee
|
||||
github.com/nats-io/nats.go v1.16.1-0.20220731182438-87bbea85922b
|
||||
github.com/neilalexander/utp v0.1.1-0.20210727203401-54ae7b1cd5f9
|
||||
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646
|
||||
github.com/ngrok/sqlmw v0.0.0-20211220175533-9d16fdc47b31
|
||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||
github.com/ngrok/sqlmw v0.0.0-20220520173518-97c9c04efc79
|
||||
github.com/opentracing/opentracing-go v1.2.0
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pressly/goose v2.7.0+incompatible
|
||||
github.com/prometheus/client_golang v1.12.1
|
||||
github.com/prometheus/client_golang v1.12.2
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/stretchr/testify v1.7.1
|
||||
github.com/tidwall/gjson v1.14.1
|
||||
github.com/tidwall/sjson v1.2.4
|
||||
github.com/uber/jaeger-client-go v2.30.0+incompatible
|
||||
github.com/uber/jaeger-lib v2.4.1+incompatible
|
||||
github.com/yggdrasil-network/yggdrasil-go v0.4.3
|
||||
go.uber.org/atomic v1.9.0
|
||||
golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122
|
||||
golang.org/x/image v0.0.0-20220321031419-a8550c1d254a
|
||||
golang.org/x/mobile v0.0.0-20220407111146-e579adbbc4a2
|
||||
golang.org/x/net v0.0.0-20220407224826-aac1ed45d8e3
|
||||
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6 // indirect
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e
|
||||
golang.org/x/image v0.0.0-20220413100746-70e8d0d3baa9
|
||||
golang.org/x/mobile v0.0.0-20220518205345-8578da9835fd
|
||||
golang.org/x/net v0.0.0-20220524220425-1d687d428aca
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467
|
||||
gopkg.in/h2non/bimg.v1 v1.1.9
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
nhooyr.io/websocket v1.8.7
|
||||
)
|
||||
|
||||
go 1.16
|
||||
require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.1 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/cheekybits/genny v1.0.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/distribution v2.7.1+incompatible // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
github.com/frankban/quicktest v1.14.3 // indirect
|
||||
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/h2non/filetype v1.1.3 // indirect
|
||||
github.com/juju/errors v0.0.0-20220203013757-bd733f3c86b9 // indirect
|
||||
github.com/juju/testing v0.0.0-20220203020004-a0ff61f03494 // indirect
|
||||
github.com/klauspost/compress v1.15.9 // indirect
|
||||
github.com/lucas-clemente/quic-go v0.26.0 // indirect
|
||||
github.com/marten-seemann/qtls-go1-16 v0.1.5 // indirect
|
||||
github.com/marten-seemann/qtls-go1-17 v0.1.1 // indirect
|
||||
github.com/marten-seemann/qtls-go1-18 v0.1.1 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/miekg/dns v1.1.49 // indirect
|
||||
github.com/minio/highwayhash v1.0.2 // indirect
|
||||
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/nats-io/jwt/v2 v2.3.0 // indirect
|
||||
github.com/nats-io/nkeys v0.3.0 // indirect
|
||||
github.com/nats-io/nuid v1.0.1 // indirect
|
||||
github.com/nxadm/tail v1.4.8 // indirect
|
||||
github.com/onsi/ginkgo v1.16.4 // indirect
|
||||
github.com/onsi/gomega v1.17.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.32.1 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/stretchr/objx v0.2.0 // indirect
|
||||
github.com/tidwall/match v1.1.1 // indirect
|
||||
github.com/tidwall/pretty v1.2.0 // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
|
||||
golang.org/x/text v0.3.8-0.20211004125949-5bd84dd9b33b // indirect
|
||||
golang.org/x/time v0.0.0-20220411224347-583f2d630306 // indirect
|
||||
golang.org/x/tools v0.1.10 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/protobuf v1.27.1 // indirect
|
||||
gopkg.in/macaroon.v2 v2.1.0 // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0 // indirect
|
||||
gotest.tools/v3 v3.0.3 // indirect
|
||||
)
|
||||
|
||||
go 1.18
|
||||
|
|
|
|||
18
internal/caching/cache_eventstatekeys.go
Normal file
18
internal/caching/cache_eventstatekeys.go
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
package caching
|
||||
|
||||
import "github.com/matrix-org/dendrite/roomserver/types"
|
||||
|
||||
// EventStateKeyCache contains the subset of functions needed for
|
||||
// a room event state key cache.
|
||||
type EventStateKeyCache interface {
|
||||
GetEventStateKey(eventStateKeyNID types.EventStateKeyNID) (string, bool)
|
||||
StoreEventStateKey(eventStateKeyNID types.EventStateKeyNID, eventStateKey string)
|
||||
}
|
||||
|
||||
func (c Caches) GetEventStateKey(eventStateKeyNID types.EventStateKeyNID) (string, bool) {
|
||||
return c.RoomServerStateKeys.Get(eventStateKeyNID)
|
||||
}
|
||||
|
||||
func (c Caches) StoreEventStateKey(eventStateKeyNID types.EventStateKeyNID, eventStateKey string) {
|
||||
c.RoomServerStateKeys.Set(eventStateKeyNID, eventStateKey)
|
||||
}
|
||||
|
|
@ -1,18 +1,9 @@
|
|||
package caching
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
)
|
||||
|
||||
const (
|
||||
FederationEventCacheName = "federation_event"
|
||||
FederationEventCacheMaxEntries = 256
|
||||
FederationEventCacheMutable = true // to allow use of Unset only
|
||||
FederationEventCacheMaxAge = CacheNoMaxAge
|
||||
)
|
||||
|
||||
// FederationCache contains the subset of functions needed for
|
||||
// a federation event cache.
|
||||
type FederationCache interface {
|
||||
|
|
@ -26,43 +17,25 @@ type FederationCache interface {
|
|||
}
|
||||
|
||||
func (c Caches) GetFederationQueuedPDU(eventNID int64) (*gomatrixserverlib.HeaderedEvent, bool) {
|
||||
key := fmt.Sprintf("%d", eventNID)
|
||||
val, found := c.FederationEvents.Get(key)
|
||||
if found && val != nil {
|
||||
if event, ok := val.(*gomatrixserverlib.HeaderedEvent); ok {
|
||||
return event, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
return c.FederationPDUs.Get(eventNID)
|
||||
}
|
||||
|
||||
func (c Caches) StoreFederationQueuedPDU(eventNID int64, event *gomatrixserverlib.HeaderedEvent) {
|
||||
key := fmt.Sprintf("%d", eventNID)
|
||||
c.FederationEvents.Set(key, event)
|
||||
c.FederationPDUs.Set(eventNID, event)
|
||||
}
|
||||
|
||||
func (c Caches) EvictFederationQueuedPDU(eventNID int64) {
|
||||
key := fmt.Sprintf("%d", eventNID)
|
||||
c.FederationEvents.Unset(key)
|
||||
c.FederationPDUs.Unset(eventNID)
|
||||
}
|
||||
|
||||
func (c Caches) GetFederationQueuedEDU(eventNID int64) (*gomatrixserverlib.EDU, bool) {
|
||||
key := fmt.Sprintf("%d", eventNID)
|
||||
val, found := c.FederationEvents.Get(key)
|
||||
if found && val != nil {
|
||||
if event, ok := val.(*gomatrixserverlib.EDU); ok {
|
||||
return event, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
return c.FederationEDUs.Get(eventNID)
|
||||
}
|
||||
|
||||
func (c Caches) StoreFederationQueuedEDU(eventNID int64, event *gomatrixserverlib.EDU) {
|
||||
key := fmt.Sprintf("%d", eventNID)
|
||||
c.FederationEvents.Set(key, event)
|
||||
c.FederationEDUs.Set(eventNID, event)
|
||||
}
|
||||
|
||||
func (c Caches) EvictFederationQueuedEDU(eventNID int64) {
|
||||
key := fmt.Sprintf("%d", eventNID)
|
||||
c.FederationEvents.Unset(key)
|
||||
c.FederationEDUs.Unset(eventNID)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,67 +1,35 @@
|
|||
package caching
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
)
|
||||
|
||||
const (
|
||||
LazyLoadCacheName = "lazy_load_members"
|
||||
LazyLoadCacheMaxEntries = 128
|
||||
LazyLoadCacheMaxUserEntries = 128
|
||||
LazyLoadCacheMutable = true
|
||||
LazyLoadCacheMaxAge = time.Minute * 30
|
||||
)
|
||||
type lazyLoadingCacheKey struct {
|
||||
UserID string // the user we're querying on behalf of
|
||||
DeviceID string // the user we're querying on behalf of
|
||||
RoomID string // the room in question
|
||||
TargetUserID string // the user whose membership we're asking about
|
||||
}
|
||||
|
||||
type LazyLoadCache interface {
|
||||
StoreLazyLoadedUser(device *userapi.Device, roomID, userID, eventID string)
|
||||
IsLazyLoadedUserCached(device *userapi.Device, roomID, userID string) (string, bool)
|
||||
}
|
||||
|
||||
func (c Caches) lazyLoadCacheForUser(device *userapi.Device) (*InMemoryLRUCachePartition, error) {
|
||||
cacheName := fmt.Sprintf("%s/%s", device.UserID, device.ID)
|
||||
userCache, ok := c.LazyLoading.Get(cacheName)
|
||||
if ok && userCache != nil {
|
||||
if cache, ok := userCache.(*InMemoryLRUCachePartition); ok {
|
||||
return cache, nil
|
||||
}
|
||||
}
|
||||
cache, err := NewInMemoryLRUCachePartition(
|
||||
LazyLoadCacheName,
|
||||
LazyLoadCacheMutable,
|
||||
LazyLoadCacheMaxUserEntries,
|
||||
LazyLoadCacheMaxAge,
|
||||
false,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.LazyLoading.Set(cacheName, cache)
|
||||
go cacheCleaner(cache)
|
||||
return cache, nil
|
||||
}
|
||||
|
||||
func (c Caches) StoreLazyLoadedUser(device *userapi.Device, roomID, userID, eventID string) {
|
||||
cache, err := c.lazyLoadCacheForUser(device)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
cacheKey := fmt.Sprintf("%s/%s/%s/%s", device.UserID, device.ID, roomID, userID)
|
||||
cache.Set(cacheKey, eventID)
|
||||
c.LazyLoading.Set(lazyLoadingCacheKey{
|
||||
UserID: device.UserID,
|
||||
DeviceID: device.ID,
|
||||
RoomID: roomID,
|
||||
TargetUserID: userID,
|
||||
}, eventID)
|
||||
}
|
||||
|
||||
func (c Caches) IsLazyLoadedUserCached(device *userapi.Device, roomID, userID string) (string, bool) {
|
||||
cache, err := c.lazyLoadCacheForUser(device)
|
||||
if err != nil {
|
||||
return "", false
|
||||
}
|
||||
|
||||
cacheKey := fmt.Sprintf("%s/%s/%s/%s", device.UserID, device.ID, roomID, userID)
|
||||
val, ok := cache.Get(cacheKey)
|
||||
if !ok {
|
||||
return "", ok
|
||||
}
|
||||
return val.(string), ok
|
||||
return c.LazyLoading.Get(lazyLoadingCacheKey{
|
||||
UserID: device.UserID,
|
||||
DeviceID: device.ID,
|
||||
RoomID: roomID,
|
||||
TargetUserID: userID,
|
||||
})
|
||||
}
|
||||
|
|
|
|||
21
internal/caching/cache_roomevents.go
Normal file
21
internal/caching/cache_roomevents.go
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
package caching
|
||||
|
||||
import (
|
||||
"github.com/matrix-org/dendrite/roomserver/types"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
)
|
||||
|
||||
// RoomServerEventsCache contains the subset of functions needed for
|
||||
// a roomserver event cache.
|
||||
type RoomServerEventsCache interface {
|
||||
GetRoomServerEvent(eventNID types.EventNID) (*gomatrixserverlib.Event, bool)
|
||||
StoreRoomServerEvent(eventNID types.EventNID, event *gomatrixserverlib.Event)
|
||||
}
|
||||
|
||||
func (c Caches) GetRoomServerEvent(eventNID types.EventNID) (*gomatrixserverlib.Event, bool) {
|
||||
return c.RoomServerEvents.Get(int64(eventNID))
|
||||
}
|
||||
|
||||
func (c Caches) StoreRoomServerEvent(eventNID types.EventNID, event *gomatrixserverlib.Event) {
|
||||
c.RoomServerEvents.Set(int64(eventNID), event)
|
||||
}
|
||||
|
|
@ -1,8 +1,6 @@
|
|||
package caching
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/matrix-org/dendrite/roomserver/types"
|
||||
)
|
||||
|
||||
|
|
@ -14,35 +12,22 @@ import (
|
|||
// used from other components as we currently have no way to invalidate
|
||||
// the cache in downstream components.
|
||||
|
||||
const (
|
||||
RoomInfoCacheName = "roominfo"
|
||||
RoomInfoCacheMaxEntries = 1024
|
||||
RoomInfoCacheMutable = true
|
||||
RoomInfoCacheMaxAge = time.Minute * 5
|
||||
)
|
||||
|
||||
// RoomInfosCache contains the subset of functions needed for
|
||||
// a room Info cache. It must only be used from the roomserver only
|
||||
// It is not safe for use from other components.
|
||||
type RoomInfoCache interface {
|
||||
GetRoomInfo(roomID string) (roomInfo types.RoomInfo, ok bool)
|
||||
StoreRoomInfo(roomID string, roomInfo types.RoomInfo)
|
||||
GetRoomInfo(roomID string) (roomInfo *types.RoomInfo, ok bool)
|
||||
StoreRoomInfo(roomID string, roomInfo *types.RoomInfo)
|
||||
}
|
||||
|
||||
// GetRoomInfo must only be called from the roomserver only. It is not
|
||||
// safe for use from other components.
|
||||
func (c Caches) GetRoomInfo(roomID string) (types.RoomInfo, bool) {
|
||||
val, found := c.RoomInfos.Get(roomID)
|
||||
if found && val != nil {
|
||||
if roomInfo, ok := val.(types.RoomInfo); ok {
|
||||
return roomInfo, true
|
||||
}
|
||||
}
|
||||
return types.RoomInfo{}, false
|
||||
func (c Caches) GetRoomInfo(roomID string) (*types.RoomInfo, bool) {
|
||||
return c.RoomInfos.Get(roomID)
|
||||
}
|
||||
|
||||
// StoreRoomInfo must only be called from the roomserver only. It is not
|
||||
// safe for use from other components.
|
||||
func (c Caches) StoreRoomInfo(roomID string, roomInfo types.RoomInfo) {
|
||||
func (c Caches) StoreRoomInfo(roomID string, roomInfo *types.RoomInfo) {
|
||||
c.RoomInfos.Set(roomID, roomInfo)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,22 +1,15 @@
|
|||
package caching
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/matrix-org/dendrite/roomserver/types"
|
||||
)
|
||||
|
||||
const (
|
||||
RoomServerRoomIDsCacheName = "roomserver_room_ids"
|
||||
RoomServerRoomIDsCacheMaxEntries = 1024
|
||||
RoomServerRoomIDsCacheMutable = false
|
||||
RoomServerRoomIDsCacheMaxAge = CacheNoMaxAge
|
||||
)
|
||||
|
||||
type RoomServerCaches interface {
|
||||
RoomServerNIDsCache
|
||||
RoomVersionCache
|
||||
RoomInfoCache
|
||||
RoomServerEventsCache
|
||||
EventStateKeyCache
|
||||
}
|
||||
|
||||
// RoomServerNIDsCache contains the subset of functions needed for
|
||||
|
|
@ -27,15 +20,9 @@ type RoomServerNIDsCache interface {
|
|||
}
|
||||
|
||||
func (c Caches) GetRoomServerRoomID(roomNID types.RoomNID) (string, bool) {
|
||||
val, found := c.RoomServerRoomIDs.Get(strconv.Itoa(int(roomNID)))
|
||||
if found && val != nil {
|
||||
if roomID, ok := val.(string); ok {
|
||||
return roomID, true
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
return c.RoomServerRoomIDs.Get(roomNID)
|
||||
}
|
||||
|
||||
func (c Caches) StoreRoomServerRoomID(roomNID types.RoomNID, roomID string) {
|
||||
c.RoomServerRoomIDs.Set(strconv.Itoa(int(roomNID)), roomID)
|
||||
c.RoomServerRoomIDs.Set(roomNID, roomID)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,13 +2,6 @@ package caching
|
|||
|
||||
import "github.com/matrix-org/gomatrixserverlib"
|
||||
|
||||
const (
|
||||
RoomVersionCacheName = "room_versions"
|
||||
RoomVersionCacheMaxEntries = 1024
|
||||
RoomVersionCacheMutable = false
|
||||
RoomVersionCacheMaxAge = CacheNoMaxAge
|
||||
)
|
||||
|
||||
// RoomVersionsCache contains the subset of functions needed for
|
||||
// a room version cache.
|
||||
type RoomVersionCache interface {
|
||||
|
|
@ -17,13 +10,7 @@ type RoomVersionCache interface {
|
|||
}
|
||||
|
||||
func (c Caches) GetRoomVersion(roomID string) (gomatrixserverlib.RoomVersion, bool) {
|
||||
val, found := c.RoomVersions.Get(roomID)
|
||||
if found && val != nil {
|
||||
if roomVersion, ok := val.(gomatrixserverlib.RoomVersion); ok {
|
||||
return roomVersion, true
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
return c.RoomVersions.Get(roomID)
|
||||
}
|
||||
|
||||
func (c Caches) StoreRoomVersion(roomID string, roomVersion gomatrixserverlib.RoomVersion) {
|
||||
|
|
|
|||
|
|
@ -6,13 +6,6 @@ import (
|
|||
"github.com/matrix-org/gomatrixserverlib"
|
||||
)
|
||||
|
||||
const (
|
||||
ServerKeyCacheName = "server_key"
|
||||
ServerKeyCacheMaxEntries = 4096
|
||||
ServerKeyCacheMutable = true
|
||||
ServerKeyCacheMaxAge = CacheNoMaxAge
|
||||
)
|
||||
|
||||
// ServerKeyCache contains the subset of functions needed for
|
||||
// a server key cache.
|
||||
type ServerKeyCache interface {
|
||||
|
|
@ -34,18 +27,13 @@ func (c Caches) GetServerKey(
|
|||
) (gomatrixserverlib.PublicKeyLookupResult, bool) {
|
||||
key := fmt.Sprintf("%s/%s", request.ServerName, request.KeyID)
|
||||
val, found := c.ServerKeys.Get(key)
|
||||
if found && val != nil {
|
||||
if keyLookupResult, ok := val.(gomatrixserverlib.PublicKeyLookupResult); ok {
|
||||
if !keyLookupResult.WasValidAt(timestamp, true) {
|
||||
// The key wasn't valid at the requested timestamp so don't
|
||||
// return it. The caller will have to work out what to do.
|
||||
c.ServerKeys.Unset(key)
|
||||
return gomatrixserverlib.PublicKeyLookupResult{}, false
|
||||
}
|
||||
return keyLookupResult, true
|
||||
}
|
||||
if found && !val.WasValidAt(timestamp, true) {
|
||||
// The key wasn't valid at the requested timestamp so don't
|
||||
// return it. The caller will have to work out what to do.
|
||||
c.ServerKeys.Unset(key)
|
||||
return gomatrixserverlib.PublicKeyLookupResult{}, false
|
||||
}
|
||||
return gomatrixserverlib.PublicKeyLookupResult{}, false
|
||||
return val, found
|
||||
}
|
||||
|
||||
func (c Caches) StoreServerKey(
|
||||
|
|
|
|||
|
|
@ -1,31 +1,16 @@
|
|||
package caching
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
)
|
||||
|
||||
const (
|
||||
SpaceSummaryRoomsCacheName = "space_summary_rooms"
|
||||
SpaceSummaryRoomsCacheMaxEntries = 100
|
||||
SpaceSummaryRoomsCacheMutable = true
|
||||
SpaceSummaryRoomsCacheMaxAge = time.Minute * 5
|
||||
)
|
||||
|
||||
type SpaceSummaryRoomsCache interface {
|
||||
GetSpaceSummary(roomID string) (r gomatrixserverlib.MSC2946SpacesResponse, ok bool)
|
||||
StoreSpaceSummary(roomID string, r gomatrixserverlib.MSC2946SpacesResponse)
|
||||
}
|
||||
|
||||
func (c Caches) GetSpaceSummary(roomID string) (r gomatrixserverlib.MSC2946SpacesResponse, ok bool) {
|
||||
val, found := c.SpaceSummaryRooms.Get(roomID)
|
||||
if found && val != nil {
|
||||
if resp, ok := val.(gomatrixserverlib.MSC2946SpacesResponse); ok {
|
||||
return resp, true
|
||||
}
|
||||
}
|
||||
return r, false
|
||||
return c.SpaceSummaryRooms.Get(roomID)
|
||||
}
|
||||
|
||||
func (c Caches) StoreSpaceSummary(roomID string, r gomatrixserverlib.MSC2946SpacesResponse) {
|
||||
|
|
|
|||
|
|
@ -1,28 +1,53 @@
|
|||
// Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package caching
|
||||
|
||||
import (
|
||||
"time"
|
||||
"github.com/matrix-org/dendrite/roomserver/types"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
)
|
||||
|
||||
// Caches contains a set of references to caches. They may be
|
||||
// different implementations as long as they satisfy the Cache
|
||||
// interface.
|
||||
type Caches struct {
|
||||
RoomVersions Cache // RoomVersionCache
|
||||
ServerKeys Cache // ServerKeyCache
|
||||
RoomServerRoomNIDs Cache // RoomServerNIDsCache
|
||||
RoomServerRoomIDs Cache // RoomServerNIDsCache
|
||||
RoomInfos Cache // RoomInfoCache
|
||||
FederationEvents Cache // FederationEventsCache
|
||||
SpaceSummaryRooms Cache // SpaceSummaryRoomsCache
|
||||
LazyLoading Cache // LazyLoadCache
|
||||
RoomVersions Cache[string, gomatrixserverlib.RoomVersion] // room ID -> room version
|
||||
ServerKeys Cache[string, gomatrixserverlib.PublicKeyLookupResult] // server name -> server keys
|
||||
RoomServerRoomNIDs Cache[string, types.RoomNID] // room ID -> room NID
|
||||
RoomServerRoomIDs Cache[types.RoomNID, string] // room NID -> room ID
|
||||
RoomServerEvents Cache[int64, *gomatrixserverlib.Event] // event NID -> event
|
||||
RoomServerStateKeys Cache[types.EventStateKeyNID, string] // event NID -> event state key
|
||||
RoomInfos Cache[string, *types.RoomInfo] // room ID -> room info
|
||||
FederationPDUs Cache[int64, *gomatrixserverlib.HeaderedEvent] // queue NID -> PDU
|
||||
FederationEDUs Cache[int64, *gomatrixserverlib.EDU] // queue NID -> EDU
|
||||
SpaceSummaryRooms Cache[string, gomatrixserverlib.MSC2946SpacesResponse] // room ID -> space response
|
||||
LazyLoading Cache[lazyLoadingCacheKey, string] // composite key -> event ID
|
||||
}
|
||||
|
||||
// Cache is the interface that an implementation must satisfy.
|
||||
type Cache interface {
|
||||
Get(key string) (value interface{}, ok bool)
|
||||
Set(key string, value interface{})
|
||||
Unset(key string)
|
||||
type Cache[K keyable, T any] interface {
|
||||
Get(key K) (value T, ok bool)
|
||||
Set(key K, value T)
|
||||
Unset(key K)
|
||||
}
|
||||
|
||||
const CacheNoMaxAge = time.Duration(0)
|
||||
type keyable interface {
|
||||
// from https://github.com/dgraph-io/ristretto/blob/8e850b710d6df0383c375ec6a7beae4ce48fc8d5/z/z.go#L34
|
||||
~uint64 | ~string | []byte | byte | ~int | ~int32 | ~uint32 | ~int64 | lazyLoadingCacheKey
|
||||
}
|
||||
|
||||
type costable interface {
|
||||
CacheCost() int
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,189 +0,0 @@
|
|||
package caching
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
func NewInMemoryLRUCache(enablePrometheus bool) (*Caches, error) {
|
||||
roomVersions, err := NewInMemoryLRUCachePartition(
|
||||
RoomVersionCacheName,
|
||||
RoomVersionCacheMutable,
|
||||
RoomVersionCacheMaxEntries,
|
||||
RoomVersionCacheMaxAge,
|
||||
enablePrometheus,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serverKeys, err := NewInMemoryLRUCachePartition(
|
||||
ServerKeyCacheName,
|
||||
ServerKeyCacheMutable,
|
||||
ServerKeyCacheMaxEntries,
|
||||
ServerKeyCacheMaxAge,
|
||||
enablePrometheus,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
roomServerRoomIDs, err := NewInMemoryLRUCachePartition(
|
||||
RoomServerRoomIDsCacheName,
|
||||
RoomServerRoomIDsCacheMutable,
|
||||
RoomServerRoomIDsCacheMaxEntries,
|
||||
RoomServerRoomIDsCacheMaxAge,
|
||||
enablePrometheus,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
roomInfos, err := NewInMemoryLRUCachePartition(
|
||||
RoomInfoCacheName,
|
||||
RoomInfoCacheMutable,
|
||||
RoomInfoCacheMaxEntries,
|
||||
RoomInfoCacheMaxAge,
|
||||
enablePrometheus,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
federationEvents, err := NewInMemoryLRUCachePartition(
|
||||
FederationEventCacheName,
|
||||
FederationEventCacheMutable,
|
||||
FederationEventCacheMaxEntries,
|
||||
FederationEventCacheMaxAge,
|
||||
enablePrometheus,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
spaceRooms, err := NewInMemoryLRUCachePartition(
|
||||
SpaceSummaryRoomsCacheName,
|
||||
SpaceSummaryRoomsCacheMutable,
|
||||
SpaceSummaryRoomsCacheMaxEntries,
|
||||
SpaceSummaryRoomsCacheMaxAge,
|
||||
enablePrometheus,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lazyLoadCache, err := NewInMemoryLRUCachePartition(
|
||||
LazyLoadCacheName,
|
||||
LazyLoadCacheMutable,
|
||||
LazyLoadCacheMaxEntries,
|
||||
LazyLoadCacheMaxAge,
|
||||
enablePrometheus,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go cacheCleaner(
|
||||
roomVersions, serverKeys, roomServerRoomIDs,
|
||||
roomInfos, federationEvents, spaceRooms, lazyLoadCache,
|
||||
)
|
||||
return &Caches{
|
||||
RoomVersions: roomVersions,
|
||||
ServerKeys: serverKeys,
|
||||
RoomServerRoomIDs: roomServerRoomIDs,
|
||||
RoomInfos: roomInfos,
|
||||
FederationEvents: federationEvents,
|
||||
SpaceSummaryRooms: spaceRooms,
|
||||
LazyLoading: lazyLoadCache,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func cacheCleaner(caches ...*InMemoryLRUCachePartition) {
|
||||
for {
|
||||
time.Sleep(time.Minute)
|
||||
for _, cache := range caches {
|
||||
// Hold onto the last 10% of the cache entries, since
|
||||
// otherwise a quiet period might cause us to evict all
|
||||
// cache entries entirely.
|
||||
if cache.lru.Len() > cache.maxEntries/10 {
|
||||
cache.lru.RemoveOldest()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type InMemoryLRUCachePartition struct {
|
||||
name string
|
||||
mutable bool
|
||||
maxEntries int
|
||||
maxAge time.Duration
|
||||
lru *lru.Cache
|
||||
}
|
||||
|
||||
type inMemoryLRUCacheEntry struct {
|
||||
value interface{}
|
||||
created time.Time
|
||||
}
|
||||
|
||||
func NewInMemoryLRUCachePartition(name string, mutable bool, maxEntries int, maxAge time.Duration, enablePrometheus bool) (*InMemoryLRUCachePartition, error) {
|
||||
var err error
|
||||
cache := InMemoryLRUCachePartition{
|
||||
name: name,
|
||||
mutable: mutable,
|
||||
maxEntries: maxEntries,
|
||||
maxAge: maxAge,
|
||||
}
|
||||
cache.lru, err = lru.New(maxEntries)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if enablePrometheus {
|
||||
promauto.NewGaugeFunc(prometheus.GaugeOpts{
|
||||
Namespace: "dendrite",
|
||||
Subsystem: "caching_in_memory_lru",
|
||||
Name: name,
|
||||
}, func() float64 {
|
||||
return float64(cache.lru.Len())
|
||||
})
|
||||
}
|
||||
return &cache, nil
|
||||
}
|
||||
|
||||
func (c *InMemoryLRUCachePartition) Set(key string, value interface{}) {
|
||||
if !c.mutable {
|
||||
if peek, ok := c.lru.Peek(key); ok {
|
||||
if entry, ok := peek.(*inMemoryLRUCacheEntry); ok && entry.value != value {
|
||||
panic(fmt.Sprintf("invalid use of immutable cache tries to mutate existing value of %q", key))
|
||||
}
|
||||
}
|
||||
}
|
||||
c.lru.Add(key, &inMemoryLRUCacheEntry{
|
||||
value: value,
|
||||
created: time.Now(),
|
||||
})
|
||||
}
|
||||
|
||||
func (c *InMemoryLRUCachePartition) Unset(key string) {
|
||||
if !c.mutable {
|
||||
panic(fmt.Sprintf("invalid use of immutable cache tries to unset value of %q", key))
|
||||
}
|
||||
c.lru.Remove(key)
|
||||
}
|
||||
|
||||
func (c *InMemoryLRUCachePartition) Get(key string) (value interface{}, ok bool) {
|
||||
v, ok := c.lru.Get(key)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
entry, ok := v.(*inMemoryLRUCacheEntry)
|
||||
switch {
|
||||
case ok && c.maxAge == CacheNoMaxAge:
|
||||
return entry.value, ok // There's no maximum age policy
|
||||
case ok && time.Since(entry.created) < c.maxAge:
|
||||
return entry.value, ok // The value for the key isn't stale
|
||||
default:
|
||||
// Either the key was found and it was stale, or the key
|
||||
// wasn't found at all
|
||||
c.lru.Remove(key)
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
206
internal/caching/impl_ristretto.go
Normal file
206
internal/caching/impl_ristretto.go
Normal file
|
|
@ -0,0 +1,206 @@
|
|||
// Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package caching
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/dgraph-io/ristretto"
|
||||
"github.com/dgraph-io/ristretto/z"
|
||||
"github.com/matrix-org/dendrite/roomserver/types"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
const (
|
||||
roomVersionsCache byte = iota + 1
|
||||
serverKeysCache
|
||||
roomNIDsCache
|
||||
roomIDsCache
|
||||
roomEventsCache
|
||||
roomInfosCache
|
||||
federationPDUsCache
|
||||
federationEDUsCache
|
||||
spaceSummaryRoomsCache
|
||||
lazyLoadingCache
|
||||
eventStateKeyCache
|
||||
)
|
||||
|
||||
func NewRistrettoCache(maxCost config.DataUnit, maxAge time.Duration, enablePrometheus bool) *Caches {
|
||||
cache, err := ristretto.NewCache(&ristretto.Config{
|
||||
NumCounters: int64((maxCost / 1024) * 10), // 10 counters per 1KB data, affects bloom filter size
|
||||
BufferItems: 64, // recommended by the ristretto godocs as a sane buffer size value
|
||||
MaxCost: int64(maxCost), // max cost is in bytes, as per the Dendrite config
|
||||
Metrics: true,
|
||||
KeyToHash: func(key interface{}) (uint64, uint64) {
|
||||
return z.KeyToHash(key)
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if enablePrometheus {
|
||||
promauto.NewGaugeFunc(prometheus.GaugeOpts{
|
||||
Namespace: "dendrite",
|
||||
Subsystem: "caching_ristretto",
|
||||
Name: "ratio",
|
||||
}, func() float64 {
|
||||
return float64(cache.Metrics.Ratio())
|
||||
})
|
||||
promauto.NewGaugeFunc(prometheus.GaugeOpts{
|
||||
Namespace: "dendrite",
|
||||
Subsystem: "caching_ristretto",
|
||||
Name: "cost",
|
||||
}, func() float64 {
|
||||
return float64(cache.Metrics.CostAdded() - cache.Metrics.CostEvicted())
|
||||
})
|
||||
}
|
||||
return &Caches{
|
||||
RoomVersions: &RistrettoCachePartition[string, gomatrixserverlib.RoomVersion]{ // room ID -> room version
|
||||
cache: cache,
|
||||
Prefix: roomVersionsCache,
|
||||
MaxAge: maxAge,
|
||||
},
|
||||
ServerKeys: &RistrettoCachePartition[string, gomatrixserverlib.PublicKeyLookupResult]{ // server name -> server keys
|
||||
cache: cache,
|
||||
Prefix: serverKeysCache,
|
||||
Mutable: true,
|
||||
MaxAge: maxAge,
|
||||
},
|
||||
RoomServerRoomNIDs: &RistrettoCachePartition[string, types.RoomNID]{ // room ID -> room NID
|
||||
cache: cache,
|
||||
Prefix: roomNIDsCache,
|
||||
MaxAge: maxAge,
|
||||
},
|
||||
RoomServerRoomIDs: &RistrettoCachePartition[types.RoomNID, string]{ // room NID -> room ID
|
||||
cache: cache,
|
||||
Prefix: roomIDsCache,
|
||||
MaxAge: maxAge,
|
||||
},
|
||||
RoomServerEvents: &RistrettoCostedCachePartition[int64, *gomatrixserverlib.Event]{ // event NID -> event
|
||||
&RistrettoCachePartition[int64, *gomatrixserverlib.Event]{
|
||||
cache: cache,
|
||||
Prefix: roomEventsCache,
|
||||
MaxAge: maxAge,
|
||||
},
|
||||
},
|
||||
RoomServerStateKeys: &RistrettoCachePartition[types.EventStateKeyNID, string]{ // event NID -> event state key
|
||||
cache: cache,
|
||||
Prefix: eventStateKeyCache,
|
||||
MaxAge: maxAge,
|
||||
},
|
||||
RoomInfos: &RistrettoCachePartition[string, *types.RoomInfo]{ // room ID -> room info
|
||||
cache: cache,
|
||||
Prefix: roomInfosCache,
|
||||
Mutable: true,
|
||||
MaxAge: maxAge,
|
||||
},
|
||||
FederationPDUs: &RistrettoCostedCachePartition[int64, *gomatrixserverlib.HeaderedEvent]{ // queue NID -> PDU
|
||||
&RistrettoCachePartition[int64, *gomatrixserverlib.HeaderedEvent]{
|
||||
cache: cache,
|
||||
Prefix: federationPDUsCache,
|
||||
Mutable: true,
|
||||
MaxAge: lesserOf(time.Hour/2, maxAge),
|
||||
},
|
||||
},
|
||||
FederationEDUs: &RistrettoCostedCachePartition[int64, *gomatrixserverlib.EDU]{ // queue NID -> EDU
|
||||
&RistrettoCachePartition[int64, *gomatrixserverlib.EDU]{
|
||||
cache: cache,
|
||||
Prefix: federationEDUsCache,
|
||||
Mutable: true,
|
||||
MaxAge: lesserOf(time.Hour/2, maxAge),
|
||||
},
|
||||
},
|
||||
SpaceSummaryRooms: &RistrettoCachePartition[string, gomatrixserverlib.MSC2946SpacesResponse]{ // room ID -> space response
|
||||
cache: cache,
|
||||
Prefix: spaceSummaryRoomsCache,
|
||||
Mutable: true,
|
||||
MaxAge: maxAge,
|
||||
},
|
||||
LazyLoading: &RistrettoCachePartition[lazyLoadingCacheKey, string]{ // composite key -> event ID
|
||||
cache: cache,
|
||||
Prefix: lazyLoadingCache,
|
||||
Mutable: true,
|
||||
MaxAge: maxAge,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type RistrettoCostedCachePartition[k keyable, v costable] struct {
|
||||
*RistrettoCachePartition[k, v]
|
||||
}
|
||||
|
||||
func (c *RistrettoCostedCachePartition[K, V]) Set(key K, value V) {
|
||||
cost := value.CacheCost()
|
||||
c.setWithCost(key, value, int64(cost))
|
||||
}
|
||||
|
||||
type RistrettoCachePartition[K keyable, V any] struct {
|
||||
cache *ristretto.Cache
|
||||
Prefix byte
|
||||
Mutable bool
|
||||
MaxAge time.Duration
|
||||
}
|
||||
|
||||
func (c *RistrettoCachePartition[K, V]) setWithCost(key K, value V, cost int64) {
|
||||
bkey := fmt.Sprintf("%c%v", c.Prefix, key)
|
||||
if !c.Mutable {
|
||||
if v, ok := c.cache.Get(bkey); ok && v != nil && !reflect.DeepEqual(v, value) {
|
||||
panic(fmt.Sprintf("invalid use of immutable cache tries to change value of %v from %v to %v", key, v, value))
|
||||
}
|
||||
}
|
||||
c.cache.SetWithTTL(bkey, value, int64(len(bkey))+cost, c.MaxAge)
|
||||
}
|
||||
|
||||
func (c *RistrettoCachePartition[K, V]) Set(key K, value V) {
|
||||
var cost int64
|
||||
if cv, ok := any(value).(string); ok {
|
||||
cost = int64(len(cv))
|
||||
} else {
|
||||
cost = int64(unsafe.Sizeof(value))
|
||||
}
|
||||
c.setWithCost(key, value, cost)
|
||||
}
|
||||
|
||||
func (c *RistrettoCachePartition[K, V]) Unset(key K) {
|
||||
bkey := fmt.Sprintf("%c%v", c.Prefix, key)
|
||||
if !c.Mutable {
|
||||
panic(fmt.Sprintf("invalid use of immutable cache tries to unset value of %v", key))
|
||||
}
|
||||
c.cache.Del(bkey)
|
||||
}
|
||||
|
||||
func (c *RistrettoCachePartition[K, V]) Get(key K) (value V, ok bool) {
|
||||
bkey := fmt.Sprintf("%c%v", c.Prefix, key)
|
||||
v, ok := c.cache.Get(bkey)
|
||||
if !ok || v == nil {
|
||||
var empty V
|
||||
return empty, false
|
||||
}
|
||||
value, ok = v.(V)
|
||||
return
|
||||
}
|
||||
|
||||
func lesserOf(a, b time.Duration) time.Duration {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
|
@ -170,20 +170,18 @@ func truncateAuthAndPrevEvents(auth, prev []gomatrixserverlib.EventReference) (
|
|||
|
||||
// RedactEvent redacts the given event and sets the unsigned field appropriately. This should be used by
|
||||
// downstream components to the roomserver when an OutputTypeRedactedEvent occurs.
|
||||
func RedactEvent(redactionEvent, redactedEvent *gomatrixserverlib.Event) (*gomatrixserverlib.Event, error) {
|
||||
func RedactEvent(redactionEvent, redactedEvent *gomatrixserverlib.Event) error {
|
||||
// sanity check
|
||||
if redactionEvent.Type() != gomatrixserverlib.MRoomRedaction {
|
||||
return nil, fmt.Errorf("RedactEvent: redactionEvent isn't a redaction event, is '%s'", redactionEvent.Type())
|
||||
return fmt.Errorf("RedactEvent: redactionEvent isn't a redaction event, is '%s'", redactionEvent.Type())
|
||||
}
|
||||
r := redactedEvent.Redact()
|
||||
err := r.SetUnsignedField("redacted_because", redactionEvent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
redactedEvent.Redact()
|
||||
if err := redactedEvent.SetUnsignedField("redacted_because", redactionEvent); err != nil {
|
||||
return err
|
||||
}
|
||||
// NOTSPEC: sytest relies on this unspecced field existing :(
|
||||
err = r.SetUnsignedField("redacted_by", redactionEvent.EventID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if err := redactedEvent.SetUnsignedField("redacted_by", redactionEvent.EventID()); err != nil {
|
||||
return err
|
||||
}
|
||||
return r, nil
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ import (
|
|||
|
||||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
"github.com/matrix-org/util"
|
||||
)
|
||||
|
||||
|
|
@ -17,6 +18,7 @@ type RateLimits struct {
|
|||
enabled bool
|
||||
requestThreshold int64
|
||||
cooloffDuration time.Duration
|
||||
exemptUserIDs map[string]struct{}
|
||||
}
|
||||
|
||||
func NewRateLimits(cfg *config.RateLimiting) *RateLimits {
|
||||
|
|
@ -25,6 +27,10 @@ func NewRateLimits(cfg *config.RateLimiting) *RateLimits {
|
|||
enabled: cfg.Enabled,
|
||||
requestThreshold: cfg.Threshold,
|
||||
cooloffDuration: time.Duration(cfg.CooloffMS) * time.Millisecond,
|
||||
exemptUserIDs: map[string]struct{}{},
|
||||
}
|
||||
for _, userID := range cfg.ExemptUserIDs {
|
||||
l.exemptUserIDs[userID] = struct{}{}
|
||||
}
|
||||
if l.enabled {
|
||||
go l.clean()
|
||||
|
|
@ -52,7 +58,7 @@ func (l *RateLimits) clean() {
|
|||
}
|
||||
}
|
||||
|
||||
func (l *RateLimits) Limit(req *http.Request) *util.JSONResponse {
|
||||
func (l *RateLimits) Limit(req *http.Request, device *userapi.Device) *util.JSONResponse {
|
||||
// If rate limiting is disabled then do nothing.
|
||||
if !l.enabled {
|
||||
return nil
|
||||
|
|
@ -67,9 +73,26 @@ func (l *RateLimits) Limit(req *http.Request) *util.JSONResponse {
|
|||
|
||||
// First of all, work out if X-Forwarded-For was sent to us. If not
|
||||
// then we'll just use the IP address of the caller.
|
||||
caller := req.RemoteAddr
|
||||
if forwardedFor := req.Header.Get("X-Forwarded-For"); forwardedFor != "" {
|
||||
caller = forwardedFor
|
||||
var caller string
|
||||
if device != nil {
|
||||
switch device.AccountType {
|
||||
case userapi.AccountTypeAdmin:
|
||||
return nil // don't rate-limit server administrators
|
||||
case userapi.AccountTypeAppService:
|
||||
return nil // don't rate-limit appservice users
|
||||
default:
|
||||
if _, ok := l.exemptUserIDs[device.UserID]; ok {
|
||||
// If the user is exempt from rate limiting then do nothing.
|
||||
return nil
|
||||
}
|
||||
caller = device.UserID + device.ID
|
||||
}
|
||||
} else {
|
||||
if forwardedFor := req.Header.Get("X-Forwarded-For"); forwardedFor != "" {
|
||||
caller = forwardedFor
|
||||
} else {
|
||||
caller = req.RemoteAddr
|
||||
}
|
||||
}
|
||||
|
||||
// Look up the caller's channel, if they have one.
|
||||
|
|
|
|||
|
|
@ -1,130 +1,142 @@
|
|||
// Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sqlutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/pressly/goose"
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Migrations struct {
|
||||
registeredGoMigrations map[int64]*goose.Migration
|
||||
const createDBMigrationsSQL = "" +
|
||||
"CREATE TABLE IF NOT EXISTS db_migrations (" +
|
||||
" version TEXT PRIMARY KEY NOT NULL," +
|
||||
" time TEXT NOT NULL," +
|
||||
" dendrite_version TEXT NOT NULL" +
|
||||
");"
|
||||
|
||||
const insertVersionSQL = "" +
|
||||
"INSERT INTO db_migrations (version, time, dendrite_version)" +
|
||||
" VALUES ($1, $2, $3)"
|
||||
|
||||
const selectDBMigrationsSQL = "SELECT version FROM db_migrations"
|
||||
|
||||
// Migration defines a migration to be run.
|
||||
type Migration struct {
|
||||
// Version is a simple description/name of this migration.
|
||||
Version string
|
||||
// Up defines the function to execute for an upgrade.
|
||||
Up func(ctx context.Context, txn *sql.Tx) error
|
||||
// Down defines the function to execute for a downgrade (not implemented yet).
|
||||
Down func(ctx context.Context, txn *sql.Tx) error
|
||||
}
|
||||
|
||||
func NewMigrations() *Migrations {
|
||||
return &Migrations{
|
||||
registeredGoMigrations: make(map[int64]*goose.Migration),
|
||||
// Migrator
|
||||
type Migrator struct {
|
||||
db *sql.DB
|
||||
migrations []Migration
|
||||
knownMigrations map[string]struct{}
|
||||
mutex *sync.Mutex
|
||||
}
|
||||
|
||||
// NewMigrator creates a new DB migrator.
|
||||
func NewMigrator(db *sql.DB) *Migrator {
|
||||
return &Migrator{
|
||||
db: db,
|
||||
migrations: []Migration{},
|
||||
knownMigrations: make(map[string]struct{}),
|
||||
mutex: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
// Copy-pasted from goose directly to store migrations into a map we control
|
||||
|
||||
// AddMigration adds a migration.
|
||||
func (m *Migrations) AddMigration(up func(*sql.Tx) error, down func(*sql.Tx) error) {
|
||||
_, filename, _, _ := runtime.Caller(1)
|
||||
m.AddNamedMigration(filename, up, down)
|
||||
}
|
||||
|
||||
// AddNamedMigration : Add a named migration.
|
||||
func (m *Migrations) AddNamedMigration(filename string, up func(*sql.Tx) error, down func(*sql.Tx) error) {
|
||||
v, _ := goose.NumericComponent(filename)
|
||||
migration := &goose.Migration{Version: v, Next: -1, Previous: -1, Registered: true, UpFn: up, DownFn: down, Source: filename}
|
||||
|
||||
if existing, ok := m.registeredGoMigrations[v]; ok {
|
||||
panic(fmt.Sprintf("failed to add migration %q: version conflicts with %q", filename, existing.Source))
|
||||
// AddMigrations appends migrations to the list of migrations. Migrations are executed
|
||||
// in the order they are added to the list. De-duplicates migrations using their Version field.
|
||||
func (m *Migrator) AddMigrations(migrations ...Migration) {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
for _, mig := range migrations {
|
||||
if _, ok := m.knownMigrations[mig.Version]; !ok {
|
||||
m.migrations = append(m.migrations, mig)
|
||||
m.knownMigrations[mig.Version] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
m.registeredGoMigrations[v] = migration
|
||||
}
|
||||
|
||||
// RunDeltas up to the latest version.
|
||||
func (m *Migrations) RunDeltas(db *sql.DB, props *config.DatabaseOptions) error {
|
||||
maxVer := goose.MaxVersion
|
||||
minVer := int64(0)
|
||||
migrations, err := m.collect(minVer, maxVer)
|
||||
// Up executes all migrations in order they were added.
|
||||
func (m *Migrator) Up(ctx context.Context) error {
|
||||
var (
|
||||
err error
|
||||
dendriteVersion = internal.VersionString()
|
||||
)
|
||||
// ensure there is a table for known migrations
|
||||
executedMigrations, err := m.ExecutedMigrations(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("runDeltas: Failed to collect migrations: %w", err)
|
||||
return fmt.Errorf("unable to create/get migrations: %w", err)
|
||||
}
|
||||
if props.ConnectionString.IsPostgres() {
|
||||
if err = goose.SetDialect("postgres"); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if props.ConnectionString.IsSQLite() {
|
||||
if err = goose.SetDialect("sqlite3"); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("unknown connection string: %s", props.ConnectionString)
|
||||
}
|
||||
for {
|
||||
current, err := goose.EnsureDBVersion(db)
|
||||
if err != nil {
|
||||
return fmt.Errorf("runDeltas: Failed to EnsureDBVersion: %w", err)
|
||||
}
|
||||
|
||||
next, err := migrations.Next(current)
|
||||
if err != nil {
|
||||
if err == goose.ErrNoNextVersion {
|
||||
return nil
|
||||
return WithTransaction(m.db, func(txn *sql.Tx) error {
|
||||
for i := range m.migrations {
|
||||
now := time.Now().UTC().Format(time.RFC3339)
|
||||
migration := m.migrations[i]
|
||||
logrus.Debugf("Executing database migration '%s'", migration.Version)
|
||||
// Skip migration if it was already executed
|
||||
if _, ok := executedMigrations[migration.Version]; ok {
|
||||
continue
|
||||
}
|
||||
err = migration.Up(ctx, txn)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to execute migration '%s': %w", migration.Version, err)
|
||||
}
|
||||
_, err = txn.ExecContext(ctx, insertVersionSQL,
|
||||
migration.Version,
|
||||
now,
|
||||
dendriteVersion,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to insert executed migrations: %w", err)
|
||||
}
|
||||
|
||||
return fmt.Errorf("runDeltas: Failed to load next migration to %+v : %w", next, err)
|
||||
}
|
||||
|
||||
if err = next.Up(db); err != nil {
|
||||
return fmt.Errorf("runDeltas: Failed run migration: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (m *Migrations) collect(current, target int64) (goose.Migrations, error) {
|
||||
var migrations goose.Migrations
|
||||
|
||||
// Go migrations registered via goose.AddMigration().
|
||||
for _, migration := range m.registeredGoMigrations {
|
||||
v, err := goose.NumericComponent(migration.Source)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if versionFilter(v, current, target) {
|
||||
migrations = append(migrations, migration)
|
||||
// ExecutedMigrations returns a map with already executed migrations in addition to creating the
|
||||
// migrations table, if it doesn't exist.
|
||||
func (m *Migrator) ExecutedMigrations(ctx context.Context) (map[string]struct{}, error) {
|
||||
result := make(map[string]struct{})
|
||||
_, err := m.db.ExecContext(ctx, createDBMigrationsSQL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create db_migrations: %w", err)
|
||||
}
|
||||
rows, err := m.db.QueryContext(ctx, selectDBMigrationsSQL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to query db_migrations: %w", err)
|
||||
}
|
||||
defer internal.CloseAndLogIfError(ctx, rows, "ExecutedMigrations: rows.close() failed")
|
||||
var version string
|
||||
for rows.Next() {
|
||||
if err = rows.Scan(&version); err != nil {
|
||||
return nil, fmt.Errorf("unable to scan version: %w", err)
|
||||
}
|
||||
result[version] = struct{}{}
|
||||
}
|
||||
|
||||
migrations = sortAndConnectMigrations(migrations)
|
||||
|
||||
return migrations, nil
|
||||
}
|
||||
|
||||
func sortAndConnectMigrations(migrations goose.Migrations) goose.Migrations {
|
||||
sort.Sort(migrations)
|
||||
|
||||
// now that we're sorted in the appropriate direction,
|
||||
// populate next and previous for each migration
|
||||
for i, m := range migrations {
|
||||
prev := int64(-1)
|
||||
if i > 0 {
|
||||
prev = migrations[i-1].Version
|
||||
migrations[i-1].Next = m.Version
|
||||
}
|
||||
migrations[i].Previous = prev
|
||||
}
|
||||
|
||||
return migrations
|
||||
}
|
||||
|
||||
func versionFilter(v, current, target int64) bool {
|
||||
|
||||
if target > current {
|
||||
return v > current && v <= target
|
||||
}
|
||||
|
||||
if target < current {
|
||||
return v <= current && v > target
|
||||
}
|
||||
|
||||
return false
|
||||
return result, rows.Err()
|
||||
}
|
||||
|
|
|
|||
112
internal/sqlutil/migrate_test.go
Normal file
112
internal/sqlutil/migrate_test.go
Normal file
|
|
@ -0,0 +1,112 @@
|
|||
package sqlutil_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
"github.com/matrix-org/dendrite/test"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
var dummyMigrations = []sqlutil.Migration{
|
||||
{
|
||||
Version: "init",
|
||||
Up: func(ctx context.Context, txn *sql.Tx) error {
|
||||
_, err := txn.ExecContext(ctx, "CREATE TABLE IF NOT EXISTS dummy ( test TEXT );")
|
||||
return err
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: "v2",
|
||||
Up: func(ctx context.Context, txn *sql.Tx) error {
|
||||
_, err := txn.ExecContext(ctx, "ALTER TABLE dummy ADD COLUMN test2 TEXT;")
|
||||
return err
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: "v2", // duplicate, this migration will be skipped
|
||||
Up: func(ctx context.Context, txn *sql.Tx) error {
|
||||
_, err := txn.ExecContext(ctx, "ALTER TABLE dummy ADD COLUMN test2 TEXT;")
|
||||
return err
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: "multiple execs",
|
||||
Up: func(ctx context.Context, txn *sql.Tx) error {
|
||||
_, err := txn.ExecContext(ctx, "ALTER TABLE dummy ADD COLUMN test3 TEXT;")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = txn.ExecContext(ctx, "ALTER TABLE dummy ADD COLUMN test4 TEXT;")
|
||||
return err
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var failMigration = sqlutil.Migration{
|
||||
Version: "iFail",
|
||||
Up: func(ctx context.Context, txn *sql.Tx) error {
|
||||
return fmt.Errorf("iFail")
|
||||
},
|
||||
Down: nil,
|
||||
}
|
||||
|
||||
func Test_migrations_Up(t *testing.T) {
|
||||
withFail := append(dummyMigrations, failMigration)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
migrations []sqlutil.Migration
|
||||
wantResult map[string]struct{}
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "dummy migration",
|
||||
migrations: dummyMigrations,
|
||||
wantResult: map[string]struct{}{
|
||||
"init": {},
|
||||
"v2": {},
|
||||
"multiple execs": {},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "with fail",
|
||||
migrations: withFail,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
test.WithAllDatabases(t, func(t *testing.T, dbType test.DBType) {
|
||||
conStr, close := test.PrepareDBConnectionString(t, dbType)
|
||||
defer close()
|
||||
driverName := "sqlite3"
|
||||
if dbType == test.DBTypePostgres {
|
||||
driverName = "postgres"
|
||||
}
|
||||
db, err := sql.Open(driverName, conStr)
|
||||
if err != nil {
|
||||
t.Errorf("unable to open database: %v", err)
|
||||
}
|
||||
m := sqlutil.NewMigrator(db)
|
||||
m.AddMigrations(tt.migrations...)
|
||||
if err = m.Up(ctx); (err != nil) != tt.wantErr {
|
||||
t.Errorf("Up() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
result, err := m.ExecutedMigrations(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("unable to get executed migrations: %v", err)
|
||||
}
|
||||
if !tt.wantErr && !reflect.DeepEqual(result, tt.wantResult) {
|
||||
t.Errorf("expected: %+v, got %v", tt.wantResult, result)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -37,7 +37,7 @@ type traceInterceptor struct {
|
|||
sqlmw.NullInterceptor
|
||||
}
|
||||
|
||||
func (in *traceInterceptor) StmtQueryContext(ctx context.Context, stmt driver.StmtQueryContext, query string, args []driver.NamedValue) (driver.Rows, error) {
|
||||
func (in *traceInterceptor) StmtQueryContext(ctx context.Context, stmt driver.StmtQueryContext, query string, args []driver.NamedValue) (context.Context, driver.Rows, error) {
|
||||
startedAt := time.Now()
|
||||
rows, err := stmt.QueryContext(ctx, args)
|
||||
|
||||
|
|
@ -45,7 +45,7 @@ func (in *traceInterceptor) StmtQueryContext(ctx context.Context, stmt driver.St
|
|||
|
||||
logrus.WithField("duration", time.Since(startedAt)).WithField(logrus.ErrorKey, err).Debug("executed sql query ", query, " args: ", args)
|
||||
|
||||
return rows, err
|
||||
return ctx, rows, err
|
||||
}
|
||||
|
||||
func (in *traceInterceptor) StmtExecContext(ctx context.Context, stmt driver.StmtExecContext, query string, args []driver.NamedValue) (driver.Result, error) {
|
||||
|
|
|
|||
|
|
@ -16,8 +16,8 @@ var build string
|
|||
|
||||
const (
|
||||
VersionMajor = 0
|
||||
VersionMinor = 8
|
||||
VersionPatch = 5
|
||||
VersionMinor = 9
|
||||
VersionPatch = 0
|
||||
VersionTag = "" // example: "rc1"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -62,8 +62,6 @@ type FederationKeyAPI interface {
|
|||
QueryKeys(ctx context.Context, req *QueryKeysRequest, res *QueryKeysResponse)
|
||||
QuerySignatures(ctx context.Context, req *QuerySignaturesRequest, res *QuerySignaturesResponse)
|
||||
QueryDeviceMessages(ctx context.Context, req *QueryDeviceMessagesRequest, res *QueryDeviceMessagesResponse)
|
||||
// InputDeviceListUpdate from a federated server EDU
|
||||
InputDeviceListUpdate(ctx context.Context, req *InputDeviceListUpdateRequest, res *InputDeviceListUpdateResponse)
|
||||
PerformUploadDeviceKeys(ctx context.Context, req *PerformUploadDeviceKeysRequest, res *PerformUploadDeviceKeysResponse)
|
||||
PerformClaimKeys(ctx context.Context, req *PerformClaimKeysRequest, res *PerformClaimKeysResponse)
|
||||
}
|
||||
|
|
@ -337,11 +335,3 @@ type QuerySignaturesResponse struct {
|
|||
// The request error, if any
|
||||
Error *KeyError
|
||||
}
|
||||
|
||||
type InputDeviceListUpdateRequest struct {
|
||||
Event gomatrixserverlib.DeviceListUpdateEvent
|
||||
}
|
||||
|
||||
type InputDeviceListUpdateResponse struct {
|
||||
Error *KeyError
|
||||
}
|
||||
|
|
|
|||
82
keyserver/consumers/devicelistupdate.go
Normal file
82
keyserver/consumers/devicelistupdate.go
Normal file
|
|
@ -0,0 +1,82 @@
|
|||
// Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package consumers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/matrix-org/dendrite/keyserver/internal"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/nats-io/nats.go"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// DeviceListUpdateConsumer consumes device list updates that came in over federation.
|
||||
type DeviceListUpdateConsumer struct {
|
||||
ctx context.Context
|
||||
jetstream nats.JetStreamContext
|
||||
durable string
|
||||
topic string
|
||||
updater *internal.DeviceListUpdater
|
||||
}
|
||||
|
||||
// NewDeviceListUpdateConsumer creates a new DeviceListConsumer. Call Start() to begin consuming from key servers.
|
||||
func NewDeviceListUpdateConsumer(
|
||||
process *process.ProcessContext,
|
||||
cfg *config.KeyServer,
|
||||
js nats.JetStreamContext,
|
||||
updater *internal.DeviceListUpdater,
|
||||
) *DeviceListUpdateConsumer {
|
||||
return &DeviceListUpdateConsumer{
|
||||
ctx: process.Context(),
|
||||
jetstream: js,
|
||||
durable: cfg.Matrix.JetStream.Prefixed("KeyServerInputDeviceListConsumer"),
|
||||
topic: cfg.Matrix.JetStream.Prefixed(jetstream.InputDeviceListUpdate),
|
||||
updater: updater,
|
||||
}
|
||||
}
|
||||
|
||||
// Start consuming from key servers
|
||||
func (t *DeviceListUpdateConsumer) Start() error {
|
||||
return jetstream.JetStreamConsumer(
|
||||
t.ctx, t.jetstream, t.topic, t.durable, t.onMessage,
|
||||
nats.DeliverAll(), nats.ManualAck(),
|
||||
)
|
||||
}
|
||||
|
||||
// onMessage is called in response to a message received on the
|
||||
// key change events topic from the key server.
|
||||
func (t *DeviceListUpdateConsumer) onMessage(ctx context.Context, msg *nats.Msg) bool {
|
||||
var m gomatrixserverlib.DeviceListUpdateEvent
|
||||
if err := json.Unmarshal(msg.Data, &m); err != nil {
|
||||
logrus.WithError(err).Errorf("Failed to read from device list update input topic")
|
||||
return true
|
||||
}
|
||||
err := t.updater.Update(ctx, m)
|
||||
if err != nil {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"user_id": m.UserID,
|
||||
"device_id": m.DeviceID,
|
||||
"stream_id": m.StreamID,
|
||||
"prev_id": m.PrevID,
|
||||
}).WithError(err).Errorf("Failed to update device list")
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
@ -374,7 +374,7 @@ func (u *DeviceListUpdater) processServer(serverName gomatrixserverlib.ServerNam
|
|||
// fetch stale device lists
|
||||
userIDs, err := u.db.StaleDeviceLists(ctx, []gomatrixserverlib.ServerName{serverName})
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("failed to load stale device lists")
|
||||
logger.WithError(err).Error("Failed to load stale device lists")
|
||||
return waitTime, true
|
||||
}
|
||||
failCount := 0
|
||||
|
|
@ -399,7 +399,7 @@ func (u *DeviceListUpdater) processServer(serverName gomatrixserverlib.ServerNam
|
|||
}
|
||||
} else {
|
||||
waitTime = time.Hour
|
||||
logger.WithError(err).WithField("user_id", userID).Warn("GetUserDevices returned unknown error type")
|
||||
logger.WithError(err).WithField("user_id", userID).Debug("GetUserDevices returned unknown error type")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
|
@ -422,12 +422,12 @@ func (u *DeviceListUpdater) processServer(serverName gomatrixserverlib.ServerNam
|
|||
}
|
||||
err = u.updateDeviceList(&res)
|
||||
if err != nil {
|
||||
logger.WithError(err).WithField("user_id", userID).Error("fetched device list but failed to store/emit it")
|
||||
logger.WithError(err).WithField("user_id", userID).Error("Fetched device list but failed to store/emit it")
|
||||
failCount += 1
|
||||
}
|
||||
}
|
||||
if failCount > 0 {
|
||||
logger.WithField("total", len(userIDs)).WithField("failed", failCount).WithField("wait", waitTime).Error("failed to query device keys for some users")
|
||||
logger.WithField("total", len(userIDs)).WithField("failed", failCount).WithField("wait", waitTime).Warn("Failed to query device keys for some users")
|
||||
}
|
||||
for _, userID := range userIDs {
|
||||
// always clear the channel to unblock Update calls regardless of success/failure
|
||||
|
|
|
|||
|
|
@ -47,17 +47,6 @@ func (a *KeyInternalAPI) SetUserAPI(i userapi.KeyserverUserAPI) {
|
|||
a.UserAPI = i
|
||||
}
|
||||
|
||||
func (a *KeyInternalAPI) InputDeviceListUpdate(
|
||||
ctx context.Context, req *api.InputDeviceListUpdateRequest, res *api.InputDeviceListUpdateResponse,
|
||||
) {
|
||||
err := a.Updater.Update(ctx, req.Event)
|
||||
if err != nil {
|
||||
res.Error = &api.KeyError{
|
||||
Err: fmt.Sprintf("failed to update device list: %s", err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *KeyInternalAPI) QueryKeyChanges(ctx context.Context, req *api.QueryKeyChangesRequest, res *api.QueryKeyChangesResponse) {
|
||||
userIDs, latest, err := a.DB.KeyChanges(ctx, req.Offset, req.ToOffset)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -63,20 +63,6 @@ type httpKeyInternalAPI struct {
|
|||
func (h *httpKeyInternalAPI) SetUserAPI(i userapi.KeyserverUserAPI) {
|
||||
// no-op: doesn't need it
|
||||
}
|
||||
func (h *httpKeyInternalAPI) InputDeviceListUpdate(
|
||||
ctx context.Context, req *api.InputDeviceListUpdateRequest, res *api.InputDeviceListUpdateResponse,
|
||||
) {
|
||||
span, ctx := opentracing.StartSpanFromContext(ctx, "InputDeviceListUpdate")
|
||||
defer span.Finish()
|
||||
|
||||
apiURL := h.apiURL + InputDeviceListUpdatePath
|
||||
err := httputil.PostJSON(ctx, span, h.httpClient, apiURL, req, res)
|
||||
if err != nil {
|
||||
res.Error = &api.KeyError{
|
||||
Err: err.Error(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *httpKeyInternalAPI) PerformClaimKeys(
|
||||
ctx context.Context,
|
||||
|
|
|
|||
|
|
@ -25,17 +25,6 @@ import (
|
|||
)
|
||||
|
||||
func AddRoutes(internalAPIMux *mux.Router, s api.KeyInternalAPI) {
|
||||
internalAPIMux.Handle(InputDeviceListUpdatePath,
|
||||
httputil.MakeInternalAPI("inputDeviceListUpdate", func(req *http.Request) util.JSONResponse {
|
||||
request := api.InputDeviceListUpdateRequest{}
|
||||
response := api.InputDeviceListUpdateResponse{}
|
||||
if err := json.NewDecoder(req.Body).Decode(&request); err != nil {
|
||||
return util.MessageResponse(http.StatusBadRequest, err.Error())
|
||||
}
|
||||
s.InputDeviceListUpdate(req.Context(), &request, &response)
|
||||
return util.JSONResponse{Code: http.StatusOK, JSON: &response}
|
||||
}),
|
||||
)
|
||||
internalAPIMux.Handle(PerformClaimKeysPath,
|
||||
httputil.MakeInternalAPI("performClaimKeys", func(req *http.Request) util.JSONResponse {
|
||||
request := api.PerformClaimKeysRequest{}
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ import (
|
|||
"github.com/gorilla/mux"
|
||||
fedsenderapi "github.com/matrix-org/dendrite/federationapi/api"
|
||||
"github.com/matrix-org/dendrite/keyserver/api"
|
||||
"github.com/matrix-org/dendrite/keyserver/consumers"
|
||||
"github.com/matrix-org/dendrite/keyserver/internal"
|
||||
"github.com/matrix-org/dendrite/keyserver/inthttp"
|
||||
"github.com/matrix-org/dendrite/keyserver/producers"
|
||||
|
|
@ -59,10 +60,17 @@ func NewInternalAPI(
|
|||
updater := internal.NewDeviceListUpdater(db, ap, keyChangeProducer, fedClient, 8) // 8 workers TODO: configurable
|
||||
ap.Updater = updater
|
||||
go func() {
|
||||
if err := updater.Start(); err != nil {
|
||||
if err = updater.Start(); err != nil {
|
||||
logrus.WithError(err).Panicf("failed to start device list updater")
|
||||
}
|
||||
}()
|
||||
|
||||
dlConsumer := consumers.NewDeviceListUpdateConsumer(
|
||||
base.ProcessContext, cfg, js, updater,
|
||||
)
|
||||
if err = dlConsumer.Start(); err != nil {
|
||||
logrus.WithError(err).Panic("failed to start device list consumer")
|
||||
}
|
||||
|
||||
return ap
|
||||
}
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue