mirror of
https://github.com/matrix-org/dendrite.git
synced 2025-12-07 06:53:09 -06:00
Merge pull request #22 from globekeeper/release/upstream
Upstream release v0.9.1
This commit is contained in:
commit
2fd63adf8b
14
.github/workflows/dendrite.yml
vendored
14
.github/workflows/dendrite.yml
vendored
|
|
@ -52,7 +52,7 @@ jobs:
|
|||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
go: ["1.18"]
|
||||
go: ["1.18", "1.19"]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Setup go
|
||||
|
|
@ -82,7 +82,7 @@ jobs:
|
|||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
go: ["1.18"]
|
||||
go: ["1.18", "1.19"]
|
||||
goos: ["linux"]
|
||||
goarch: ["amd64"]
|
||||
steps:
|
||||
|
|
@ -106,6 +106,7 @@ jobs:
|
|||
GOOS: ${{ matrix.goos }}
|
||||
GOARCH: ${{ matrix.goarch }}
|
||||
CGO_ENABLED: 1
|
||||
CGO_CFLAGS: -fno-stack-protector
|
||||
run: go build -trimpath -v -o "bin/" ./cmd/...
|
||||
|
||||
# Dummy step to gate other tests on without repeating the whole list
|
||||
|
|
@ -305,7 +306,14 @@ jobs:
|
|||
|
||||
integration-tests-done:
|
||||
name: Integration tests passed
|
||||
needs: [initial-tests-done, upgrade_test, upgrade_test_direct, sytest, complement]
|
||||
needs:
|
||||
[
|
||||
initial-tests-done,
|
||||
upgrade_test,
|
||||
upgrade_test_direct,
|
||||
sytest,
|
||||
complement,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
|
||||
steps:
|
||||
|
|
|
|||
14
CHANGES.md
14
CHANGES.md
|
|
@ -1,5 +1,19 @@
|
|||
# Changelog
|
||||
|
||||
## Dendrite 0.9.1 (2022-08-03)
|
||||
|
||||
### Fixes
|
||||
|
||||
* Upgrades a dependency which caused issues building Dendrite with Go 1.19
|
||||
* The roomserver will no longer give up prematurely after failing to call `/state_ids`
|
||||
* Removes the faulty room info cache, which caused of a number of race conditions and occasional bugs (including when creating and joining rooms)
|
||||
* The media endpoint now sets the `Cache-Control` header correctly to prevent web-based clients from hitting media endpoints excessively
|
||||
* The sync API will now advance the PDU stream position correctly in all cases (contributed by [sergekh2](https://github.com/sergekh2))
|
||||
* The sync API will now delete the correct range of send-to-device messages when advancing the stream position
|
||||
* The device list `changed` key in the `/sync` response should now return the correct users
|
||||
* A data race when looking up missing state has been fixed
|
||||
* The `/send_join` API is now applying stronger validation to the received membership event
|
||||
|
||||
## Dendrite 0.9.0 (2022-08-01)
|
||||
|
||||
### Features
|
||||
|
|
|
|||
|
|
@ -196,14 +196,14 @@ func fillPublicRoomsReq(httpReq *http.Request, request *PublicRoomReq) *util.JSO
|
|||
|
||||
// sliceInto returns a subslice of `slice` which honours the since/limit values given.
|
||||
//
|
||||
// 0 1 2 3 4 5 6 index
|
||||
// [A, B, C, D, E, F, G] slice
|
||||
// 0 1 2 3 4 5 6 index
|
||||
// [A, B, C, D, E, F, G] slice
|
||||
//
|
||||
// limit=3 => A,B,C (prev='', next='3')
|
||||
// limit=3&since=3 => D,E,F (prev='0', next='6')
|
||||
// limit=3&since=6 => G (prev='3', next='')
|
||||
// limit=3 => A,B,C (prev='', next='3')
|
||||
// limit=3&since=3 => D,E,F (prev='0', next='6')
|
||||
// limit=3&since=6 => G (prev='3', next='')
|
||||
//
|
||||
// A value of '-1' for prev/next indicates no position.
|
||||
// A value of '-1' for prev/next indicates no position.
|
||||
func sliceInto(slice []gomatrixserverlib.PublicRoom, since int64, limit int16) (subset []gomatrixserverlib.PublicRoom, prev, next int) {
|
||||
prev = -1
|
||||
next = -1
|
||||
|
|
|
|||
|
|
@ -63,9 +63,10 @@ var sendEventDuration = prometheus.NewHistogramVec(
|
|||
)
|
||||
|
||||
// SendEvent implements:
|
||||
// /rooms/{roomID}/send/{eventType}
|
||||
// /rooms/{roomID}/send/{eventType}/{txnID}
|
||||
// /rooms/{roomID}/state/{eventType}/{stateKey}
|
||||
//
|
||||
// /rooms/{roomID}/send/{eventType}
|
||||
// /rooms/{roomID}/send/{eventType}/{txnID}
|
||||
// /rooms/{roomID}/state/{eventType}/{stateKey}
|
||||
func SendEvent(
|
||||
req *http.Request,
|
||||
device *userapi.Device,
|
||||
|
|
|
|||
|
|
@ -38,8 +38,9 @@ type threePIDsResponse struct {
|
|||
}
|
||||
|
||||
// RequestEmailToken implements:
|
||||
// POST /account/3pid/email/requestToken
|
||||
// POST /register/email/requestToken
|
||||
//
|
||||
// POST /account/3pid/email/requestToken
|
||||
// POST /register/email/requestToken
|
||||
func RequestEmailToken(req *http.Request, threePIDAPI api.ClientUserAPI, cfg *config.ClientAPI) util.JSONResponse {
|
||||
var body threepid.EmailAssociationRequest
|
||||
if reqErr := httputil.UnmarshalJSONRequest(req, &body); reqErr != nil {
|
||||
|
|
|
|||
|
|
@ -30,7 +30,8 @@ import (
|
|||
)
|
||||
|
||||
// RequestTurnServer implements:
|
||||
// GET /voip/turnServer
|
||||
//
|
||||
// GET /voip/turnServer
|
||||
func RequestTurnServer(req *http.Request, device *api.Device, cfg *config.ClientAPI) util.JSONResponse {
|
||||
turnConfig := cfg.TURN
|
||||
|
||||
|
|
|
|||
|
|
@ -47,7 +47,9 @@ const HEAD = "HEAD"
|
|||
// We cannot use the dockerfile associated with the repo with each version sadly due to changes in
|
||||
// Docker versions. Specifically, earlier Dendrite versions are incompatible with newer Docker clients
|
||||
// due to the error:
|
||||
// When using COPY with more than one source file, the destination must be a directory and end with a /
|
||||
//
|
||||
// When using COPY with more than one source file, the destination must be a directory and end with a /
|
||||
//
|
||||
// We need to run a postgres anyway, so use the dockerfile associated with Complement instead.
|
||||
const Dockerfile = `FROM golang:1.18-stretch as build
|
||||
RUN apt-get update && apt-get install -y postgresql
|
||||
|
|
@ -95,7 +97,9 @@ CMD /build/run_dendrite.sh `
|
|||
const dendriteUpgradeTestLabel = "dendrite_upgrade_test"
|
||||
|
||||
// downloadArchive downloads an arbitrary github archive of the form:
|
||||
// https://github.com/matrix-org/dendrite/archive/v0.3.11.tar.gz
|
||||
//
|
||||
// https://github.com/matrix-org/dendrite/archive/v0.3.11.tar.gz
|
||||
//
|
||||
// and re-tarballs it without the top-level directory which contains branch information. It inserts
|
||||
// the contents of `dockerfile` as a root file `Dockerfile` in the re-tarballed directory such that
|
||||
// you can directly feed the retarballed archive to `ImageBuild` to have it run said dockerfile.
|
||||
|
|
|
|||
|
|
@ -18,9 +18,9 @@ type user struct {
|
|||
}
|
||||
|
||||
// runTests performs the following operations:
|
||||
// - register alice and bob with branch name muxed into the localpart
|
||||
// - create a DM room for the 2 users and exchange messages
|
||||
// - create/join a public #global room and exchange messages
|
||||
// - register alice and bob with branch name muxed into the localpart
|
||||
// - create a DM room for the 2 users and exchange messages
|
||||
// - create/join a public #global room and exchange messages
|
||||
func runTests(baseURL, branchName string) error {
|
||||
// register 2 users
|
||||
users := []user{
|
||||
|
|
|
|||
|
|
@ -34,13 +34,16 @@ type JSServer struct {
|
|||
|
||||
// OnRequestFromJS is the function that JS will invoke when there is a new request.
|
||||
// The JS function signature is:
|
||||
// function(reqString: string): Promise<{result: string, error: string}>
|
||||
//
|
||||
// function(reqString: string): Promise<{result: string, error: string}>
|
||||
//
|
||||
// Usage is like:
|
||||
// const res = await global._go_js_server.fetch(reqString);
|
||||
// if (res.error) {
|
||||
// // handle error: this is a 'network' error, not a non-2xx error.
|
||||
// }
|
||||
// const rawHttpResponse = res.result;
|
||||
//
|
||||
// const res = await global._go_js_server.fetch(reqString);
|
||||
// if (res.error) {
|
||||
// // handle error: this is a 'network' error, not a non-2xx error.
|
||||
// }
|
||||
// const rawHttpResponse = res.result;
|
||||
func (h *JSServer) OnRequestFromJS(this js.Value, args []js.Value) interface{} {
|
||||
// we HAVE to spawn a new goroutine and return immediately or else Go will deadlock
|
||||
// if this request blocks at all e.g for /sync calls
|
||||
|
|
|
|||
|
|
@ -113,6 +113,11 @@ global:
|
|||
addresses:
|
||||
# - localhost:4222
|
||||
|
||||
# Disable the validation of TLS certificates of NATS. This is
|
||||
# not recommended in production since it may allow NATS traffic
|
||||
# to be sent to an insecure endpoint.
|
||||
disable_tls_validation: false
|
||||
|
||||
# Persistent directory to store JetStream streams in. This directory should be
|
||||
# preserved across Dendrite restarts.
|
||||
storage_path: ./
|
||||
|
|
@ -187,7 +192,7 @@ client_api:
|
|||
# and appservice users are exempt from rate limiting by default.
|
||||
rate_limiting:
|
||||
enabled: true
|
||||
threshold: 5
|
||||
threshold: 20
|
||||
cooloff_ms: 500
|
||||
exempt_user_ids:
|
||||
# - "@user:domain.com"
|
||||
|
|
|
|||
|
|
@ -103,6 +103,11 @@ global:
|
|||
addresses:
|
||||
- hostname:4222
|
||||
|
||||
# Disable the validation of TLS certificates of NATS. This is
|
||||
# not recommended in production since it may allow NATS traffic
|
||||
# to be sent to an insecure endpoint.
|
||||
disable_tls_validation: false
|
||||
|
||||
# The prefix to use for stream names for this homeserver - really only useful
|
||||
# if you are running more than one Dendrite server on the same NATS deployment.
|
||||
topic_prefix: Dendrite
|
||||
|
|
@ -190,7 +195,7 @@ client_api:
|
|||
# and appservice users are exempt from rate limiting by default.
|
||||
rate_limiting:
|
||||
enabled: true
|
||||
threshold: 5
|
||||
threshold: 20
|
||||
cooloff_ms: 500
|
||||
exempt_user_ids:
|
||||
# - "@user:domain.com"
|
||||
|
|
|
|||
|
|
@ -208,9 +208,10 @@ func (s *OutputRoomEventConsumer) processMessage(ore api.OutputNewRoomEvent, rew
|
|||
// joinedHostsAtEvent works out a list of matrix servers that were joined to
|
||||
// the room at the event (including peeking ones)
|
||||
// It is important to use the state at the event for sending messages because:
|
||||
// 1) We shouldn't send messages to servers that weren't in the room.
|
||||
// 2) If a server is kicked from the rooms it should still be told about the
|
||||
// kick event,
|
||||
// 1. We shouldn't send messages to servers that weren't in the room.
|
||||
// 2. If a server is kicked from the rooms it should still be told about the
|
||||
// kick event,
|
||||
//
|
||||
// Usually the list can be calculated locally, but sometimes it will need fetch
|
||||
// events from the room server.
|
||||
// Returns an error if there was a problem talking to the room server.
|
||||
|
|
|
|||
|
|
@ -202,6 +202,14 @@ func SendJoin(
|
|||
}
|
||||
}
|
||||
|
||||
// Check that the event is from the server sending the request.
|
||||
if event.Origin() != request.Origin() {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusForbidden,
|
||||
JSON: jsonerror.Forbidden("The join must be sent by the server it originated on"),
|
||||
}
|
||||
}
|
||||
|
||||
// Check that a state key is provided.
|
||||
if event.StateKey() == nil || event.StateKeyEquals("") {
|
||||
return util.JSONResponse{
|
||||
|
|
@ -216,6 +224,22 @@ func SendJoin(
|
|||
}
|
||||
}
|
||||
|
||||
// Check that the sender belongs to the server that is sending us
|
||||
// the request. By this point we've already asserted that the sender
|
||||
// and the state key are equal so we don't need to check both.
|
||||
var domain gomatrixserverlib.ServerName
|
||||
if _, domain, err = gomatrixserverlib.SplitID('@', event.Sender()); err != nil {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusForbidden,
|
||||
JSON: jsonerror.Forbidden("The sender of the join is invalid"),
|
||||
}
|
||||
} else if domain != request.Origin() {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusForbidden,
|
||||
JSON: jsonerror.Forbidden("The sender of the join must belong to the origin server"),
|
||||
}
|
||||
}
|
||||
|
||||
// Check that the room ID is correct.
|
||||
if event.RoomID() != roomID {
|
||||
return util.JSONResponse{
|
||||
|
|
@ -242,14 +266,6 @@ func SendJoin(
|
|||
}
|
||||
}
|
||||
|
||||
// Check that the event is from the server sending the request.
|
||||
if event.Origin() != request.Origin() {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusForbidden,
|
||||
JSON: jsonerror.Forbidden("The join must be sent by the server it originated on"),
|
||||
}
|
||||
}
|
||||
|
||||
// Check that this is in fact a join event
|
||||
membership, err := event.Membership()
|
||||
if err != nil {
|
||||
|
|
@ -419,13 +435,13 @@ func SendJoin(
|
|||
// a restricted room join. If the room version does not support restricted
|
||||
// joins then this function returns with no side effects. This returns three
|
||||
// values:
|
||||
// * an optional JSON response body (i.e. M_UNABLE_TO_AUTHORISE_JOIN) which
|
||||
// should always be sent back to the client if one is specified
|
||||
// * a user ID of an authorising user, typically a user that has power to
|
||||
// issue invites in the room, if one has been found
|
||||
// * an error if there was a problem finding out if this was allowable,
|
||||
// like if the room version isn't known or a problem happened talking to
|
||||
// the roomserver
|
||||
// - an optional JSON response body (i.e. M_UNABLE_TO_AUTHORISE_JOIN) which
|
||||
// should always be sent back to the client if one is specified
|
||||
// - a user ID of an authorising user, typically a user that has power to
|
||||
// issue invites in the room, if one has been found
|
||||
// - an error if there was a problem finding out if this was allowable,
|
||||
// like if the room version isn't known or a problem happened talking to
|
||||
// the roomserver
|
||||
func checkRestrictedJoin(
|
||||
httpReq *http.Request,
|
||||
rsAPI api.FederationRoomserverAPI,
|
||||
|
|
|
|||
12
go.mod
12
go.mod
|
|
@ -4,7 +4,6 @@ require (
|
|||
github.com/Arceliar/ironwood v0.0.0-20220306165321-319147a02d98
|
||||
github.com/Arceliar/phony v0.0.0-20210209235338-dde1a8dca979
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0
|
||||
github.com/MFAshby/stdemuxerhook v1.0.0
|
||||
github.com/Masterminds/semver/v3 v3.1.1
|
||||
github.com/codeclysm/extract v2.2.0+incompatible
|
||||
github.com/dgraph-io/ristretto v0.1.1-0.20220403145359-8e850b710d6d
|
||||
|
|
@ -24,7 +23,7 @@ require (
|
|||
github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91
|
||||
github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20220725104114-b6003e522771
|
||||
github.com/matrix-org/pinecone v0.0.0-20220708135211-1ce778fcde6a
|
||||
github.com/matrix-org/pinecone v0.0.0-20220803093810-b7a830c08fb9
|
||||
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4
|
||||
github.com/matryer/is v1.4.0
|
||||
github.com/mattn/go-sqlite3 v1.14.13
|
||||
|
|
@ -48,7 +47,7 @@ require (
|
|||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e
|
||||
golang.org/x/image v0.0.0-20220413100746-70e8d0d3baa9
|
||||
golang.org/x/mobile v0.0.0-20220518205345-8578da9835fd
|
||||
golang.org/x/net v0.0.0-20220524220425-1d687d428aca
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467
|
||||
gopkg.in/h2non/bimg.v1 v1.1.9
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
|
|
@ -75,10 +74,11 @@ require (
|
|||
github.com/juju/errors v0.0.0-20220203013757-bd733f3c86b9 // indirect
|
||||
github.com/juju/testing v0.0.0-20220203020004-a0ff61f03494 // indirect
|
||||
github.com/klauspost/compress v1.15.9 // indirect
|
||||
github.com/lucas-clemente/quic-go v0.26.0 // indirect
|
||||
github.com/lucas-clemente/quic-go v0.28.1 // indirect
|
||||
github.com/marten-seemann/qtls-go1-16 v0.1.5 // indirect
|
||||
github.com/marten-seemann/qtls-go1-17 v0.1.1 // indirect
|
||||
github.com/marten-seemann/qtls-go1-18 v0.1.1 // indirect
|
||||
github.com/marten-seemann/qtls-go1-17 v0.1.2 // indirect
|
||||
github.com/marten-seemann/qtls-go1-18 v0.1.2 // indirect
|
||||
github.com/marten-seemann/qtls-go1-19 v0.1.0-beta.1 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/miekg/dns v1.1.49 // indirect
|
||||
github.com/minio/highwayhash v1.0.2 // indirect
|
||||
|
|
|
|||
27
go.sum
27
go.sum
|
|
@ -52,8 +52,6 @@ github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20O
|
|||
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||
github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM=
|
||||
github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo=
|
||||
github.com/MFAshby/stdemuxerhook v1.0.0 h1:1XFGzakrsHMv76AeanPDL26NOgwjPl/OUxbGhJthwMc=
|
||||
github.com/MFAshby/stdemuxerhook v1.0.0/go.mod h1:nLMI9FUf9Hz98n+yAXsTMUR4RZQy28uCTLG1Fzvj/uY=
|
||||
github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
|
||||
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
|
||||
github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY=
|
||||
|
|
@ -323,8 +321,8 @@ github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y=
|
|||
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
|
||||
github.com/lib/pq v1.10.5 h1:J+gdV2cUmX7ZqL2B0lFcW0m+egaHC2V3lpO8nWxyYiQ=
|
||||
github.com/lib/pq v1.10.5/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lucas-clemente/quic-go v0.26.0 h1:ALBQXr9UJ8A1LyzvceX4jd9QFsHvlI0RR6BkV16o00A=
|
||||
github.com/lucas-clemente/quic-go v0.26.0/go.mod h1:AzgQoPda7N+3IqMMMkywBKggIFo2KT6pfnlrQ2QieeI=
|
||||
github.com/lucas-clemente/quic-go v0.28.1 h1:Uo0lvVxWg5la9gflIF9lwa39ONq85Xq2D91YNEIslzU=
|
||||
github.com/lucas-clemente/quic-go v0.28.1/go.mod h1:oGz5DKK41cJt5+773+BSO9BXDsREY4HLf7+0odGAPO0=
|
||||
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
||||
github.com/lxn/walk v0.0.0-20210112085537-c389da54e794/go.mod h1:E23UucZGqpuUANJooIbHWCufXvOcT6E7Stq81gU+CSQ=
|
||||
github.com/lxn/win v0.0.0-20210218163916-a377121e959e/go.mod h1:KxxjdtRkfNoYDCUP5ryK7XJJNTnpC8atvtmTheChOtk=
|
||||
|
|
@ -332,10 +330,12 @@ github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN
|
|||
github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc=
|
||||
github.com/marten-seemann/qtls-go1-16 v0.1.5 h1:o9JrYPPco/Nukd/HpOHMHZoBDXQqoNtUCmny98/1uqQ=
|
||||
github.com/marten-seemann/qtls-go1-16 v0.1.5/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk=
|
||||
github.com/marten-seemann/qtls-go1-17 v0.1.1 h1:DQjHPq+aOzUeh9/lixAGunn6rIOQyWChPSI4+hgW7jc=
|
||||
github.com/marten-seemann/qtls-go1-17 v0.1.1/go.mod h1:C2ekUKcDdz9SDWxec1N/MvcXBpaX9l3Nx67XaR84L5s=
|
||||
github.com/marten-seemann/qtls-go1-18 v0.1.1 h1:qp7p7XXUFL7fpBvSS1sWD+uSqPvzNQK43DH+/qEkj0Y=
|
||||
github.com/marten-seemann/qtls-go1-18 v0.1.1/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4=
|
||||
github.com/marten-seemann/qtls-go1-17 v0.1.2 h1:JADBlm0LYiVbuSySCHeY863dNkcpMmDR7s0bLKJeYlQ=
|
||||
github.com/marten-seemann/qtls-go1-17 v0.1.2/go.mod h1:C2ekUKcDdz9SDWxec1N/MvcXBpaX9l3Nx67XaR84L5s=
|
||||
github.com/marten-seemann/qtls-go1-18 v0.1.2 h1:JH6jmzbduz0ITVQ7ShevK10Av5+jBEKAHMntXmIV7kM=
|
||||
github.com/marten-seemann/qtls-go1-18 v0.1.2/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4=
|
||||
github.com/marten-seemann/qtls-go1-19 v0.1.0-beta.1 h1:7m/WlWcSROrcK5NxuXaxYD32BZqe/LEEnBrWcH/cOqQ=
|
||||
github.com/marten-seemann/qtls-go1-19 v0.1.0-beta.1/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI=
|
||||
github.com/matrix-org/dugong v0.0.0-20210921133753-66e6b1c67e2e h1:DP5RC0Z3XdyBEW5dKt8YPeN6vZbm6OzVaGVp7f1BQRM=
|
||||
github.com/matrix-org/dugong v0.0.0-20210921133753-66e6b1c67e2e/go.mod h1:NgPCr+UavRGH6n5jmdX8DuqFZ4JiCWIJoZiuhTRLSUg=
|
||||
github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91 h1:s7fexw2QV3YD/fRrzEDPNGgTlJlvXY0EHHnT87wF3OA=
|
||||
|
|
@ -345,8 +345,8 @@ github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16 h1:ZtO5uywdd5d
|
|||
github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16/go.mod h1:/gBX06Kw0exX1HrwmoBibFA98yBk/jxKpGVeyQbff+s=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20220725104114-b6003e522771 h1:ZIPHFIPNDS9dmEbPEiJbNmyCGJtn9exfpLC7JOcn/bE=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20220725104114-b6003e522771/go.mod h1:jX38yp3SSLJNftBg3PXU1ayd0PCLIiDHQ4xAc9DIixk=
|
||||
github.com/matrix-org/pinecone v0.0.0-20220708135211-1ce778fcde6a h1:DdG8vXMlZ65EAtc4V+3t7zHZ2Gqs24pSnyXS+4BRHUs=
|
||||
github.com/matrix-org/pinecone v0.0.0-20220708135211-1ce778fcde6a/go.mod h1:ulJzsVOTssIVp1j/m5eI//4VpAGDkMt5NrRuAVX7wpc=
|
||||
github.com/matrix-org/pinecone v0.0.0-20220803093810-b7a830c08fb9 h1:ed8yvWhTLk7+sNeK/eOZRTvESFTOHDRevoRoyeqPtvY=
|
||||
github.com/matrix-org/pinecone v0.0.0-20220803093810-b7a830c08fb9/go.mod h1:P4MqPf+u83OPulPJ+XTbSDbbWrdFYNY4LZ/B1PIduFE=
|
||||
github.com/matrix-org/util v0.0.0-20190711121626-527ce5ddefc7/go.mod h1:vVQlW/emklohkZnOPwD3LrZUBqdfsbiyO3p1lNV8F6U=
|
||||
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4 h1:eCEHXWDv9Rm335MSuB49mFUK44bwZPFSDde3ORE3syk=
|
||||
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4/go.mod h1:vVQlW/emklohkZnOPwD3LrZUBqdfsbiyO3p1lNV8F6U=
|
||||
|
|
@ -553,7 +553,6 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
|||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
|
||||
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
|
||||
|
|
@ -663,8 +662,8 @@ golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qx
|
|||
golang.org/x/net v0.0.0-20210927181540-4e4d966f7476/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211011170408-caeb26a5c8c0/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211101193420-4a448f8816b3/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220524220425-1d687d428aca h1:xTaFYiPROfpPhqrfTIDXj0ri1SpfueYT951s4bAuDO8=
|
||||
golang.org/x/net v0.0.0-20220524220425-1d687d428aca/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e h1:TsQ7F31D3bUCLeqPT0u+yjp1guoArKaNKmCr22PYgTQ=
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
|
|
@ -754,6 +753,7 @@ golang.org/x/sys v0.0.0-20220405052023-b1e9470b6e64/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM=
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
|
@ -762,6 +762,7 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3
|
|||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8-0.20211004125949-5bd84dd9b33b h1:NXqSWXSRUSCaFuvitrWtU169I3876zRTalMRbfd6LL0=
|
||||
golang.org/x/text v0.3.8-0.20211004125949-5bd84dd9b33b/go.mod h1:EFNZuWvGYxIRUEX+K8UmCFwYmZjqcrnq15ZuVldZkZ0=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
|
|
|||
|
|
@ -1,33 +0,0 @@
|
|||
package caching
|
||||
|
||||
import (
|
||||
"github.com/matrix-org/dendrite/roomserver/types"
|
||||
)
|
||||
|
||||
// WARNING: This cache is mutable because it's entirely possible that
|
||||
// the IsStub or StateSnaphotNID fields can change, even though the
|
||||
// room version and room NID fields will not. This is only safe because
|
||||
// the RoomInfoCache is used ONLY within the roomserver and because it
|
||||
// will be kept up-to-date by the latest events updater. It MUST NOT be
|
||||
// used from other components as we currently have no way to invalidate
|
||||
// the cache in downstream components.
|
||||
|
||||
// RoomInfosCache contains the subset of functions needed for
|
||||
// a room Info cache. It must only be used from the roomserver only
|
||||
// It is not safe for use from other components.
|
||||
type RoomInfoCache interface {
|
||||
GetRoomInfo(roomID string) (roomInfo *types.RoomInfo, ok bool)
|
||||
StoreRoomInfo(roomID string, roomInfo *types.RoomInfo)
|
||||
}
|
||||
|
||||
// GetRoomInfo must only be called from the roomserver only. It is not
|
||||
// safe for use from other components.
|
||||
func (c Caches) GetRoomInfo(roomID string) (*types.RoomInfo, bool) {
|
||||
return c.RoomInfos.Get(roomID)
|
||||
}
|
||||
|
||||
// StoreRoomInfo must only be called from the roomserver only. It is not
|
||||
// safe for use from other components.
|
||||
func (c Caches) StoreRoomInfo(roomID string, roomInfo *types.RoomInfo) {
|
||||
c.RoomInfos.Set(roomID, roomInfo)
|
||||
}
|
||||
|
|
@ -7,7 +7,6 @@ import (
|
|||
type RoomServerCaches interface {
|
||||
RoomServerNIDsCache
|
||||
RoomVersionCache
|
||||
RoomInfoCache
|
||||
RoomServerEventsCache
|
||||
EventStateKeyCache
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,7 +29,6 @@ type Caches struct {
|
|||
RoomServerRoomIDs Cache[types.RoomNID, string] // room NID -> room ID
|
||||
RoomServerEvents Cache[int64, *gomatrixserverlib.Event] // event NID -> event
|
||||
RoomServerStateKeys Cache[types.EventStateKeyNID, string] // event NID -> event state key
|
||||
RoomInfos Cache[string, *types.RoomInfo] // room ID -> room info
|
||||
FederationPDUs Cache[int64, *gomatrixserverlib.HeaderedEvent] // queue NID -> PDU
|
||||
FederationEDUs Cache[int64, *gomatrixserverlib.EDU] // queue NID -> EDU
|
||||
SpaceSummaryRooms Cache[string, gomatrixserverlib.MSC2946SpacesResponse] // room ID -> space response
|
||||
|
|
|
|||
|
|
@ -35,7 +35,6 @@ const (
|
|||
roomNIDsCache
|
||||
roomIDsCache
|
||||
roomEventsCache
|
||||
roomInfosCache
|
||||
federationPDUsCache
|
||||
federationEDUsCache
|
||||
spaceSummaryRoomsCache
|
||||
|
|
@ -106,12 +105,6 @@ func NewRistrettoCache(maxCost config.DataUnit, maxAge time.Duration, enableProm
|
|||
Prefix: eventStateKeyCache,
|
||||
MaxAge: maxAge,
|
||||
},
|
||||
RoomInfos: &RistrettoCachePartition[string, *types.RoomInfo]{ // room ID -> room info
|
||||
cache: cache,
|
||||
Prefix: roomInfosCache,
|
||||
Mutable: true,
|
||||
MaxAge: maxAge,
|
||||
},
|
||||
FederationPDUs: &RistrettoCostedCachePartition[int64, *gomatrixserverlib.HeaderedEvent]{ // queue NID -> PDU
|
||||
&RistrettoCachePartition[int64, *gomatrixserverlib.HeaderedEvent]{
|
||||
cache: cache,
|
||||
|
|
|
|||
|
|
@ -145,7 +145,7 @@ func setupFileHook(hook config.LogrusHook, level logrus.Level, componentName str
|
|||
})
|
||||
}
|
||||
|
||||
//CloseAndLogIfError Closes io.Closer and logs the error if any
|
||||
// CloseAndLogIfError Closes io.Closer and logs the error if any
|
||||
func CloseAndLogIfError(ctx context.Context, closer io.Closer, message string) {
|
||||
if closer == nil {
|
||||
return
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ var build string
|
|||
const (
|
||||
VersionMajor = 0
|
||||
VersionMinor = 9
|
||||
VersionPatch = 0
|
||||
VersionPatch = 1
|
||||
VersionTag = "" // example: "rc1"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -66,12 +66,14 @@ func init() {
|
|||
// - We don't have unbounded growth in proportion to the number of servers (this is more important in a P2P world where
|
||||
// we have many many servers)
|
||||
// - We can adjust concurrency (at the cost of memory usage) by tuning N, to accommodate mobile devices vs servers.
|
||||
//
|
||||
// The downsides are that:
|
||||
// - Query requests can get queued behind other servers if they hash to the same worker, even if there are other free
|
||||
// workers elsewhere. Whilst suboptimal, provided we cap how long a single request can last (e.g using context timeouts)
|
||||
// we guarantee we will get around to it. Also, more users on a given server does not increase the number of requests
|
||||
// (as /keys/query allows multiple users to be specified) so being stuck behind matrix.org won't materially be any worse
|
||||
// than being stuck behind foo.bar
|
||||
//
|
||||
// In the event that the query fails, a lock is acquired and the server name along with the time to wait before retrying is
|
||||
// set in a map. A restarter goroutine periodically probes this map and injects servers which are ready to be retried.
|
||||
type DeviceListUpdater struct {
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
|
@ -314,6 +315,11 @@ func (a *KeyInternalAPI) QueryKeys(ctx context.Context, req *api.QueryKeysReques
|
|||
for targetKeyID := range masterKey.Keys {
|
||||
sigMap, err := a.DB.CrossSigningSigsForTarget(ctx, req.UserID, targetUserID, targetKeyID)
|
||||
if err != nil {
|
||||
// Stop executing the function if the context was canceled/the deadline was exceeded,
|
||||
// as we can't continue without a valid context.
|
||||
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
||||
return
|
||||
}
|
||||
logrus.WithError(err).Errorf("a.DB.CrossSigningSigsForTarget failed")
|
||||
continue
|
||||
}
|
||||
|
|
@ -335,6 +341,11 @@ func (a *KeyInternalAPI) QueryKeys(ctx context.Context, req *api.QueryKeysReques
|
|||
for targetKeyID, key := range forUserID {
|
||||
sigMap, err := a.DB.CrossSigningSigsForTarget(ctx, req.UserID, targetUserID, gomatrixserverlib.KeyID(targetKeyID))
|
||||
if err != nil {
|
||||
// Stop executing the function if the context was canceled/the deadline was exceeded,
|
||||
// as we can't continue without a valid context.
|
||||
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
||||
return
|
||||
}
|
||||
logrus.WithError(err).Errorf("a.DB.CrossSigningSigsForTarget failed")
|
||||
continue
|
||||
}
|
||||
|
|
|
|||
|
|
@ -149,6 +149,9 @@ func makeDownloadAPI(
|
|||
}
|
||||
}
|
||||
|
||||
// Cache media for at least one day.
|
||||
w.Header().Set("Cache-Control", "public,max-age=86400,s-maxage=86400")
|
||||
|
||||
Download(
|
||||
w,
|
||||
req,
|
||||
|
|
|
|||
|
|
@ -60,8 +60,9 @@ import (
|
|||
// per-room durable consumers will only progress through the stream
|
||||
// as events are processed.
|
||||
//
|
||||
// A BC * -> positions of each consumer (* = ephemeral)
|
||||
// ⌄ ⌄⌄ ⌄
|
||||
// A BC * -> positions of each consumer (* = ephemeral)
|
||||
// ⌄ ⌄⌄ ⌄
|
||||
//
|
||||
// ABAABCAABCAA -> newest (letter = subject for each message)
|
||||
//
|
||||
// In this example, A is still processing an event but has two
|
||||
|
|
|
|||
|
|
@ -35,17 +35,17 @@ import (
|
|||
// event to the output log.
|
||||
// The latest events are the events that aren't referenced by another event in the database:
|
||||
//
|
||||
// Time goes down the page. 1 is the m.room.create event (root).
|
||||
// Time goes down the page. 1 is the m.room.create event (root).
|
||||
//
|
||||
// 1 After storing 1 the latest events are {1}
|
||||
// | After storing 2 the latest events are {2}
|
||||
// 2 After storing 3 the latest events are {3}
|
||||
// / \ After storing 4 the latest events are {3,4}
|
||||
// 3 4 After storing 5 the latest events are {5,4}
|
||||
// | | After storing 6 the latest events are {5,6}
|
||||
// 5 6 <--- latest After storing 7 the latest events are {6,7}
|
||||
// |
|
||||
// 7 <----- latest
|
||||
// 1 After storing 1 the latest events are {1}
|
||||
// | After storing 2 the latest events are {2}
|
||||
// 2 After storing 3 the latest events are {3}
|
||||
// / \ After storing 4 the latest events are {3,4}
|
||||
// 3 4 After storing 5 the latest events are {5,4}
|
||||
// | | After storing 6 the latest events are {5,6}
|
||||
// 5 6 <--- latest After storing 7 the latest events are {6,7}
|
||||
// |
|
||||
// 7 <----- latest
|
||||
//
|
||||
// Can only be called once at a time
|
||||
func (r *Inputer) updateLatestEvents(
|
||||
|
|
|
|||
|
|
@ -326,8 +326,10 @@ func (t *missingStateReq) lookupStateAfterEvent(ctx context.Context, roomVersion
|
|||
return respState, true, nil
|
||||
}
|
||||
|
||||
logrus.WithContext(ctx).Warnf("State for event %s not available locally, falling back to federation (via %d servers)", eventID, len(t.servers))
|
||||
respState, err := t.lookupStateBeforeEvent(ctx, roomVersion, roomID, eventID)
|
||||
if err != nil {
|
||||
logrus.WithContext(ctx).WithError(err).Errorf("Failed to look up state before event %s", eventID)
|
||||
return nil, false, fmt.Errorf("t.lookupStateBeforeEvent: %w", err)
|
||||
}
|
||||
|
||||
|
|
@ -339,6 +341,7 @@ func (t *missingStateReq) lookupStateAfterEvent(ctx context.Context, roomVersion
|
|||
case nil:
|
||||
// do nothing
|
||||
default:
|
||||
logrus.WithContext(ctx).WithError(err).Errorf("Failed to look up event %s", eventID)
|
||||
return nil, false, fmt.Errorf("t.lookupEvent: %w", err)
|
||||
}
|
||||
h = t.cacheAndReturn(h)
|
||||
|
|
@ -375,11 +378,7 @@ func (t *missingStateReq) lookupStateAfterEventLocally(ctx context.Context, room
|
|||
defer span.Finish()
|
||||
|
||||
var res parsedRespState
|
||||
roomInfo, err := t.db.RoomInfo(ctx, roomID)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
roomState := state.NewStateResolution(t.db, roomInfo)
|
||||
roomState := state.NewStateResolution(t.db, t.roomInfo)
|
||||
stateAtEvents, err := t.db.StateAtEventIDs(ctx, []string{eventID})
|
||||
if err != nil {
|
||||
util.GetLogger(ctx).WithField("room_id", roomID).WithError(err).Warnf("failed to get state after %s locally", eventID)
|
||||
|
|
@ -666,9 +665,22 @@ func (t *missingStateReq) lookupMissingStateViaStateIDs(ctx context.Context, roo
|
|||
|
||||
util.GetLogger(ctx).WithField("room_id", roomID).Infof("lookupMissingStateViaStateIDs %s", eventID)
|
||||
// fetch the state event IDs at the time of the event
|
||||
stateIDs, err := t.federation.LookupStateIDs(ctx, t.origin, roomID, eventID)
|
||||
var stateIDs gomatrixserverlib.RespStateIDs
|
||||
var err error
|
||||
count := 0
|
||||
totalctx, totalcancel := context.WithTimeout(ctx, time.Minute*5)
|
||||
for _, serverName := range t.servers {
|
||||
reqctx, reqcancel := context.WithTimeout(totalctx, time.Second*20)
|
||||
stateIDs, err = t.federation.LookupStateIDs(reqctx, serverName, roomID, eventID)
|
||||
reqcancel()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
count++
|
||||
}
|
||||
totalcancel()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("t.federation.LookupStateIDs tried %d server(s), last error: %w", count, err)
|
||||
}
|
||||
// work out which auth/state IDs are missing
|
||||
wantIDs := append(stateIDs.StateEventIDs, stateIDs.AuthEventIDs...)
|
||||
|
|
@ -754,9 +766,8 @@ func (t *missingStateReq) lookupMissingStateViaStateIDs(ctx context.Context, roo
|
|||
|
||||
// Define what we'll do in order to fetch the missing event ID.
|
||||
fetch := func(missingEventID string) {
|
||||
var h *gomatrixserverlib.Event
|
||||
h, err = t.lookupEvent(ctx, roomVersion, roomID, missingEventID, false)
|
||||
switch err.(type) {
|
||||
h, herr := t.lookupEvent(ctx, roomVersion, roomID, missingEventID, false)
|
||||
switch herr.(type) {
|
||||
case verifySigError:
|
||||
return
|
||||
case nil:
|
||||
|
|
@ -765,7 +776,7 @@ func (t *missingStateReq) lookupMissingStateViaStateIDs(ctx context.Context, roo
|
|||
util.GetLogger(ctx).WithFields(logrus.Fields{
|
||||
"event_id": missingEventID,
|
||||
"room_id": roomID,
|
||||
}).Warn("Failed to fetch missing event")
|
||||
}).WithError(herr).Warn("Failed to fetch missing event")
|
||||
return
|
||||
}
|
||||
haveEventsMutex.Lock()
|
||||
|
|
|
|||
|
|
@ -523,7 +523,8 @@ func (b *backfillRequester) ProvideEvents(roomVer gomatrixserverlib.RoomVersion,
|
|||
|
||||
// joinEventsFromHistoryVisibility returns all CURRENTLY joined members if our server can read the room history
|
||||
// TODO: Long term we probably want a history_visibility table which stores eventNID | visibility_enum so we can just
|
||||
// pull all events and then filter by that table.
|
||||
//
|
||||
// pull all events and then filter by that table.
|
||||
func joinEventsFromHistoryVisibility(
|
||||
ctx context.Context, db storage.Database, roomID string, stateEntries []types.StateEntry,
|
||||
thisServer gomatrixserverlib.ServerName) ([]types.Event, error) {
|
||||
|
|
|
|||
|
|
@ -357,8 +357,8 @@ func (r *Queryer) QueryServerAllowedToSeeEvent(
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info == nil {
|
||||
return fmt.Errorf("QueryServerAllowedToSeeEvent: no room info for room %s", roomID)
|
||||
if info == nil || info.IsStub() {
|
||||
return nil
|
||||
}
|
||||
response.AllowedToSeeEvent, err = helpers.CheckServerAllowedToSeeEvent(
|
||||
ctx, r.DB, info, request.EventID, request.ServerName, inRoomRes.IsInRoom,
|
||||
|
|
|
|||
|
|
@ -217,6 +217,14 @@ func (u *RoomUpdater) SetLatestEvents(
|
|||
roomNID types.RoomNID, latest []types.StateAtEventAndReference, lastEventNIDSent types.EventNID,
|
||||
currentStateSnapshotNID types.StateSnapshotNID,
|
||||
) error {
|
||||
switch {
|
||||
case len(latest) == 0:
|
||||
return fmt.Errorf("cannot set latest events with no latest event references")
|
||||
case currentStateSnapshotNID == 0:
|
||||
return fmt.Errorf("cannot set latest events with invalid state snapshot NID")
|
||||
case lastEventNIDSent == 0:
|
||||
return fmt.Errorf("cannot set latest events with invalid latest event NID")
|
||||
}
|
||||
eventNIDs := make([]types.EventNID, len(latest))
|
||||
for i := range latest {
|
||||
eventNIDs[i] = latest[i].EventNID
|
||||
|
|
@ -229,8 +237,10 @@ func (u *RoomUpdater) SetLatestEvents(
|
|||
// Since it's entirely possible that this types.RoomInfo came from the
|
||||
// cache, we should make sure to update that entry so that the next run
|
||||
// works from live data.
|
||||
u.roomInfo.SetStateSnapshotNID(currentStateSnapshotNID)
|
||||
u.roomInfo.SetIsStub(false)
|
||||
if u.roomInfo != nil {
|
||||
u.roomInfo.SetStateSnapshotNID(currentStateSnapshotNID)
|
||||
u.roomInfo.SetIsStub(false)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -156,13 +156,13 @@ func (d *Database) RoomInfo(ctx context.Context, roomID string) (*types.RoomInfo
|
|||
}
|
||||
|
||||
func (d *Database) roomInfo(ctx context.Context, txn *sql.Tx, roomID string) (*types.RoomInfo, error) {
|
||||
if roomInfo, ok := d.Cache.GetRoomInfo(roomID); ok && roomInfo != nil {
|
||||
return roomInfo, nil
|
||||
}
|
||||
roomInfo, err := d.RoomsTable.SelectRoomInfo(ctx, txn, roomID)
|
||||
if err == nil && roomInfo != nil {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if roomInfo != nil {
|
||||
d.Cache.StoreRoomServerRoomID(roomInfo.RoomNID, roomID)
|
||||
d.Cache.StoreRoomInfo(roomID, roomInfo)
|
||||
d.Cache.StoreRoomVersion(roomID, roomInfo.RoomVersion)
|
||||
}
|
||||
return roomInfo, err
|
||||
}
|
||||
|
|
@ -489,8 +489,8 @@ func (d *Database) events(
|
|||
fetchNIDList := make([]types.RoomNID, 0, len(uniqueRoomNIDs))
|
||||
for n := range uniqueRoomNIDs {
|
||||
if roomID, ok := d.Cache.GetRoomServerRoomID(n); ok {
|
||||
if roomInfo, ok := d.Cache.GetRoomInfo(roomID); ok {
|
||||
roomVersions[n] = roomInfo.RoomVersion
|
||||
if roomVersion, ok := d.Cache.GetRoomVersion(roomID); ok {
|
||||
roomVersions[n] = roomVersion
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
|
@ -676,7 +676,7 @@ func (d *Database) storeEvent(
|
|||
succeeded := false
|
||||
if updater == nil {
|
||||
var roomInfo *types.RoomInfo
|
||||
roomInfo, err = d.RoomInfo(ctx, event.RoomID())
|
||||
roomInfo, err = d.roomInfo(ctx, txn, event.RoomID())
|
||||
if err != nil {
|
||||
return 0, 0, types.StateAtEvent{}, nil, "", fmt.Errorf("d.RoomInfo: %w", err)
|
||||
}
|
||||
|
|
@ -747,9 +747,6 @@ func (d *Database) MissingAuthPrevEvents(
|
|||
func (d *Database) assignRoomNID(
|
||||
ctx context.Context, roomID string, roomVersion gomatrixserverlib.RoomVersion,
|
||||
) (types.RoomNID, error) {
|
||||
if roomInfo, ok := d.Cache.GetRoomInfo(roomID); ok {
|
||||
return roomInfo.RoomNID, nil
|
||||
}
|
||||
// Check if we already have a numeric ID in the database.
|
||||
roomNID, err := d.RoomsTable.SelectRoomNID(ctx, nil, roomID)
|
||||
if err == sql.ErrNoRows {
|
||||
|
|
@ -822,8 +819,9 @@ func extractRoomVersionFromCreateEvent(event *gomatrixserverlib.Event) (
|
|||
// "servers should not apply or send redactions to clients until both the redaction event and original event have been seen, and are valid."
|
||||
// https://matrix.org/docs/spec/rooms/v3#authorization-rules-for-events
|
||||
// These cases are:
|
||||
// - This is a redaction event, redact the event it references if we know about it.
|
||||
// - This is a normal event which may have been previously redacted.
|
||||
// - This is a redaction event, redact the event it references if we know about it.
|
||||
// - This is a normal event which may have been previously redacted.
|
||||
//
|
||||
// In the first case, check if we have the referenced event then apply the redaction, else store it
|
||||
// in the redactions table with validated=FALSE. In the second case, check if there is a redaction for it:
|
||||
// if there is then apply the redactions and set validated=TRUE.
|
||||
|
|
|
|||
|
|
@ -178,7 +178,9 @@ type StrippedEvent struct {
|
|||
}
|
||||
|
||||
// ExtractContentValue from the given state event. For example, given an m.room.name event with:
|
||||
// content: { name: "Foo" }
|
||||
//
|
||||
// content: { name: "Foo" }
|
||||
//
|
||||
// this returns "Foo".
|
||||
func ExtractContentValue(ev *gomatrixserverlib.HeaderedEvent) string {
|
||||
content := ev.Content()
|
||||
|
|
|
|||
|
|
@ -310,3 +310,16 @@ func (r *RoomInfo) SetIsStub(isStub bool) {
|
|||
defer r.mu.Unlock()
|
||||
r.isStub = isStub
|
||||
}
|
||||
|
||||
func (r *RoomInfo) CopyFrom(r2 *RoomInfo) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
r2.mu.RLock()
|
||||
defer r2.mu.RUnlock()
|
||||
|
||||
r.RoomNID = r2.RoomNID
|
||||
r.RoomVersion = r2.RoomVersion
|
||||
r.stateSnapshotNID = r2.stateSnapshotNID
|
||||
r.isStub = r2.isStub
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,6 +17,10 @@ type JetStream struct {
|
|||
TopicPrefix string `yaml:"topic_prefix"`
|
||||
// Keep all storage in memory. This is mostly useful for unit tests.
|
||||
InMemory bool `yaml:"in_memory"`
|
||||
// Disable logging. This is mostly useful for unit tests.
|
||||
NoLog bool `yaml:"-"`
|
||||
// Disables TLS validation. This should NOT be used in production
|
||||
DisableTLSValidation bool `yaml:"disable_tls_validation"`
|
||||
}
|
||||
|
||||
func (c *JetStream) Prefixed(name string) string {
|
||||
|
|
@ -32,6 +36,8 @@ func (c *JetStream) Defaults(generate bool) {
|
|||
c.TopicPrefix = "Dendrite"
|
||||
if generate {
|
||||
c.StoragePath = Path("./")
|
||||
c.NoLog = true
|
||||
c.DisableTLSValidation = true
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package jetstream
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
|
@ -45,6 +46,7 @@ func (s *NATSInstance) Prepare(process *process.ProcessContext, cfg *config.JetS
|
|||
NoSystemAccount: true,
|
||||
MaxPayload: 16 * 1024 * 1024,
|
||||
NoSigs: true,
|
||||
NoLog: cfg.NoLog,
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
|
@ -75,7 +77,13 @@ func (s *NATSInstance) Prepare(process *process.ProcessContext, cfg *config.JetS
|
|||
func setupNATS(process *process.ProcessContext, cfg *config.JetStream, nc *natsclient.Conn) (natsclient.JetStreamContext, *natsclient.Conn) {
|
||||
if nc == nil {
|
||||
var err error
|
||||
nc, err = natsclient.Connect(strings.Join(cfg.Addresses, ","))
|
||||
opts := []nats.Option{}
|
||||
if cfg.DisableTLSValidation {
|
||||
opts = append(opts, nats.Secure(&tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}))
|
||||
}
|
||||
nc, err = natsclient.Connect(strings.Join(cfg.Addresses, ","), opts...)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Panic("Unable to connect to NATS")
|
||||
return nil, nil
|
||||
|
|
|
|||
|
|
@ -32,15 +32,19 @@ var (
|
|||
)
|
||||
|
||||
// Basic sanity check of MSC2836 logic. Injects a thread that looks like:
|
||||
// A
|
||||
// |
|
||||
// B
|
||||
// / \
|
||||
//
|
||||
// A
|
||||
// |
|
||||
// B
|
||||
// / \
|
||||
//
|
||||
// C D
|
||||
// /|\
|
||||
// E F G
|
||||
// |
|
||||
// H
|
||||
//
|
||||
// /|\
|
||||
// E F G
|
||||
// |
|
||||
// H
|
||||
//
|
||||
// And makes sure POST /event_relationships works with various parameters
|
||||
func TestMSC2836(t *testing.T) {
|
||||
alice := "@alice:localhost"
|
||||
|
|
|
|||
|
|
@ -25,10 +25,9 @@ import (
|
|||
"github.com/matrix-org/dendrite/syncapi/types"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/util"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const DeviceListLogName = "dl"
|
||||
|
||||
// DeviceOTKCounts adds one-time key counts to the /sync response
|
||||
func DeviceOTKCounts(ctx context.Context, keyAPI keyapi.SyncKeyAPI, userID, deviceID string, res *types.Response) error {
|
||||
var queryRes keyapi.QueryOneTimeKeysResponse
|
||||
|
|
@ -93,18 +92,13 @@ func DeviceListCatchup(
|
|||
queryRes.UserIDs = append(queryRes.UserIDs, joinUserIDs...)
|
||||
queryRes.UserIDs = append(queryRes.UserIDs, leaveUserIDs...)
|
||||
queryRes.UserIDs = util.UniqueStrings(queryRes.UserIDs)
|
||||
var sharedUsersMap map[string]int
|
||||
sharedUsersMap, queryRes.UserIDs = filterSharedUsers(ctx, db, userID, queryRes.UserIDs)
|
||||
util.GetLogger(ctx).Debugf(
|
||||
"QueryKeyChanges request off=%d,to=%d response off=%d uids=%v",
|
||||
offset, toOffset, queryRes.Offset, queryRes.UserIDs,
|
||||
)
|
||||
sharedUsersMap := filterSharedUsers(ctx, db, userID, queryRes.UserIDs)
|
||||
userSet := make(map[string]bool)
|
||||
for _, userID := range res.DeviceLists.Changed {
|
||||
userSet[userID] = true
|
||||
}
|
||||
for _, userID := range queryRes.UserIDs {
|
||||
if !userSet[userID] {
|
||||
for userID, count := range sharedUsersMap {
|
||||
if !userSet[userID] && count > 0 {
|
||||
res.DeviceLists.Changed = append(res.DeviceLists.Changed, userID)
|
||||
hasNew = true
|
||||
userSet[userID] = true
|
||||
|
|
@ -113,7 +107,7 @@ func DeviceListCatchup(
|
|||
// Finally, add in users who have joined or left.
|
||||
// TODO: This is sub-optimal because we will add users to `changed` even if we already shared a room with them.
|
||||
for _, userID := range joinUserIDs {
|
||||
if !userSet[userID] {
|
||||
if !userSet[userID] && sharedUsersMap[userID] > 0 {
|
||||
res.DeviceLists.Changed = append(res.DeviceLists.Changed, userID)
|
||||
hasNew = true
|
||||
userSet[userID] = true
|
||||
|
|
@ -126,6 +120,13 @@ func DeviceListCatchup(
|
|||
}
|
||||
}
|
||||
|
||||
util.GetLogger(ctx).WithFields(logrus.Fields{
|
||||
"user_id": userID,
|
||||
"from": offset,
|
||||
"to": toOffset,
|
||||
"response_offset": queryRes.Offset,
|
||||
}).Debugf("QueryKeyChanges request result: %+v", res.DeviceLists)
|
||||
|
||||
return types.StreamPosition(queryRes.Offset), hasNew, nil
|
||||
}
|
||||
|
||||
|
|
@ -220,24 +221,27 @@ func TrackChangedUsers(
|
|||
// it down to include only users who the requesting user shares a room with.
|
||||
func filterSharedUsers(
|
||||
ctx context.Context, db storage.SharedUsers, userID string, usersWithChangedKeys []string,
|
||||
) (map[string]int, []string) {
|
||||
) map[string]int {
|
||||
sharedUsersMap := make(map[string]int, len(usersWithChangedKeys))
|
||||
for _, userID := range usersWithChangedKeys {
|
||||
sharedUsersMap[userID] = 0
|
||||
for _, changedUserID := range usersWithChangedKeys {
|
||||
sharedUsersMap[changedUserID] = 0
|
||||
if changedUserID == userID {
|
||||
// We forcibly put ourselves in this list because we should be notified about our own device updates
|
||||
// and if we are in 0 rooms then we don't technically share any room with ourselves so we wouldn't
|
||||
// be notified about key changes.
|
||||
sharedUsersMap[userID] = 1
|
||||
}
|
||||
}
|
||||
sharedUsers, err := db.SharedUsers(ctx, userID, usersWithChangedKeys)
|
||||
if err != nil {
|
||||
util.GetLogger(ctx).WithError(err).Errorf("db.SharedUsers failed: %s", err)
|
||||
// default to all users so we do needless queries rather than miss some important device update
|
||||
return nil, usersWithChangedKeys
|
||||
return sharedUsersMap
|
||||
}
|
||||
for _, userID := range sharedUsers {
|
||||
sharedUsersMap[userID]++
|
||||
}
|
||||
// We forcibly put ourselves in this list because we should be notified about our own device updates
|
||||
// and if we are in 0 rooms then we don't technically share any room with ourselves so we wouldn't
|
||||
// be notified about key changes.
|
||||
sharedUsersMap[userID] = 1
|
||||
return sharedUsersMap, sharedUsers
|
||||
return sharedUsersMap
|
||||
}
|
||||
|
||||
func joinedRooms(res *types.Response, userID string) []string {
|
||||
|
|
|
|||
|
|
@ -129,6 +129,7 @@ type wantCatchup struct {
|
|||
}
|
||||
|
||||
func assertCatchup(t *testing.T, hasNew bool, syncResponse *types.Response, want wantCatchup) {
|
||||
t.Helper()
|
||||
if hasNew != want.hasNew {
|
||||
t.Errorf("got hasNew=%v want %v", hasNew, want.hasNew)
|
||||
}
|
||||
|
|
@ -363,13 +364,14 @@ func TestKeyChangeCatchupChangeAndLeft(t *testing.T) {
|
|||
|
||||
// tests that joining/leaving the SAME room puts users in `left` if the final state is leave.
|
||||
// NB: Consider the case:
|
||||
// - Alice and Bob are in a room.
|
||||
// - Alice goes offline, Charlie joins, sends encrypted messages then leaves the room.
|
||||
// - Alice comes back online. Technically nothing has changed in the set of users between those two points in time,
|
||||
// it's still just (Alice,Bob) but then we won't be tracking Charlie -- is this okay though? It's device keys
|
||||
// which are only relevant when actively sending events I think? And if Alice does need the keys she knows
|
||||
// charlie's (user_id, device_id) so can just hit /keys/query - no need to keep updated about it because she
|
||||
// doesn't share any rooms with him.
|
||||
// - Alice and Bob are in a room.
|
||||
// - Alice goes offline, Charlie joins, sends encrypted messages then leaves the room.
|
||||
// - Alice comes back online. Technically nothing has changed in the set of users between those two points in time,
|
||||
// it's still just (Alice,Bob) but then we won't be tracking Charlie -- is this okay though? It's device keys
|
||||
// which are only relevant when actively sending events I think? And if Alice does need the keys she knows
|
||||
// charlie's (user_id, device_id) so can just hit /keys/query - no need to keep updated about it because she
|
||||
// doesn't share any rooms with him.
|
||||
//
|
||||
// Ergo, we put them in `left` as it is simpler.
|
||||
func TestKeyChangeCatchupChangeAndLeftSameRoom(t *testing.T) {
|
||||
newShareUser := "@berta:localhost"
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ type filterResponse struct {
|
|||
FilterID string `json:"filter_id"`
|
||||
}
|
||||
|
||||
//PutFilter implements POST /_matrix/client/r0/user/{userId}/filter
|
||||
// PutFilter implements POST /_matrix/client/r0/user/{userId}/filter
|
||||
func PutFilter(
|
||||
req *http.Request, device *api.Device, syncDB storage.Database, userID string,
|
||||
) util.JSONResponse {
|
||||
|
|
|
|||
|
|
@ -112,7 +112,7 @@ const selectEventsWithEventIDsSQL = "" +
|
|||
const selectSharedUsersSQL = "" +
|
||||
"SELECT state_key FROM syncapi_current_room_state WHERE room_id = ANY(" +
|
||||
" SELECT room_id FROM syncapi_current_room_state WHERE state_key = $1 AND membership='join'" +
|
||||
") AND state_key = ANY($2) AND membership='join';"
|
||||
") AND state_key = ANY($2) AND membership IN ('join', 'invite');"
|
||||
|
||||
type currentRoomStateStatements struct {
|
||||
upsertRoomStateStmt *sql.Stmt
|
||||
|
|
@ -407,7 +407,7 @@ func (s *currentRoomStateStatements) SelectSharedUsers(
|
|||
ctx context.Context, txn *sql.Tx, userID string, otherUserIDs []string,
|
||||
) ([]string, error) {
|
||||
stmt := sqlutil.TxStmt(txn, s.selectSharedUsersStmt)
|
||||
rows, err := stmt.QueryContext(ctx, userID, otherUserIDs)
|
||||
rows, err := stmt.QueryContext(ctx, userID, pq.Array(otherUserIDs))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ const selectSendToDeviceMessagesSQL = `
|
|||
|
||||
const deleteSendToDeviceMessagesSQL = `
|
||||
DELETE FROM syncapi_send_to_device
|
||||
WHERE user_id = $1 AND device_id = $2 AND id < $3
|
||||
WHERE user_id = $1 AND device_id = $2 AND id <= $3
|
||||
`
|
||||
|
||||
const selectMaxSendToDeviceIDSQL = "" +
|
||||
|
|
|
|||
|
|
@ -94,9 +94,9 @@ const selectEventsWithEventIDsSQL = "" +
|
|||
" FROM syncapi_current_room_state WHERE event_id IN ($1)"
|
||||
|
||||
const selectSharedUsersSQL = "" +
|
||||
"SELECT state_key FROM syncapi_current_room_state WHERE room_id = ANY(" +
|
||||
"SELECT state_key FROM syncapi_current_room_state WHERE room_id IN(" +
|
||||
" SELECT room_id FROM syncapi_current_room_state WHERE state_key = $1 AND membership='join'" +
|
||||
") AND state_key IN ($2) AND membership='join';"
|
||||
") AND state_key IN ($2) AND membership IN ('join', 'invite');"
|
||||
|
||||
type currentRoomStateStatements struct {
|
||||
db *sql.DB
|
||||
|
|
@ -420,25 +420,28 @@ func (s *currentRoomStateStatements) SelectStateEvent(
|
|||
func (s *currentRoomStateStatements) SelectSharedUsers(
|
||||
ctx context.Context, txn *sql.Tx, userID string, otherUserIDs []string,
|
||||
) ([]string, error) {
|
||||
query := strings.Replace(selectSharedUsersSQL, "($2)", sqlutil.QueryVariadicOffset(len(otherUserIDs), 1), 1)
|
||||
stmt, err := s.db.Prepare(query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("SelectSharedUsers s.db.Prepare: %w", err)
|
||||
}
|
||||
defer internal.CloseAndLogIfError(ctx, stmt, "SelectSharedUsers: stmt.close() failed")
|
||||
rows, err := sqlutil.TxStmt(txn, stmt).QueryContext(ctx, userID, otherUserIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer internal.CloseAndLogIfError(ctx, rows, "selectSharedUsersStmt: rows.close() failed")
|
||||
|
||||
var stateKey string
|
||||
result := make([]string, 0, len(otherUserIDs))
|
||||
for rows.Next() {
|
||||
if err := rows.Scan(&stateKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result = append(result, stateKey)
|
||||
params := make([]interface{}, len(otherUserIDs)+1)
|
||||
params[0] = userID
|
||||
for k, v := range otherUserIDs {
|
||||
params[k+1] = v
|
||||
}
|
||||
return result, rows.Err()
|
||||
|
||||
result := make([]string, 0, len(otherUserIDs))
|
||||
query := strings.Replace(selectSharedUsersSQL, "($2)", sqlutil.QueryVariadicOffset(len(otherUserIDs), 1), 1)
|
||||
err := sqlutil.RunLimitedVariablesQuery(
|
||||
ctx, query, s.db, params, sqlutil.SQLite3MaxVariables,
|
||||
func(rows *sql.Rows) error {
|
||||
var stateKey string
|
||||
for rows.Next() {
|
||||
if err := rows.Scan(&stateKey); err != nil {
|
||||
return err
|
||||
}
|
||||
result = append(result, stateKey)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
)
|
||||
|
||||
return result, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@ const selectSendToDeviceMessagesSQL = `
|
|||
|
||||
const deleteSendToDeviceMessagesSQL = `
|
||||
DELETE FROM syncapi_send_to_device
|
||||
WHERE user_id = $1 AND device_id = $2 AND id < $3
|
||||
WHERE user_id = $1 AND device_id = $2 AND id <= $3
|
||||
`
|
||||
|
||||
const selectMaxSendToDeviceIDSQL = "" +
|
||||
|
|
|
|||
|
|
@ -416,11 +416,6 @@ func TestSendToDeviceBehaviour(t *testing.T) {
|
|||
t.Fatal("first call should have no updates")
|
||||
}
|
||||
|
||||
err = db.CleanSendToDeviceUpdates(context.Background(), alice.ID, deviceID, 100)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Try sending a message.
|
||||
streamPos, err := db.StoreNewSendForDeviceMessage(ctx, alice.ID, deviceID, gomatrixserverlib.SendToDeviceEvent{
|
||||
Sender: bob.ID,
|
||||
|
|
@ -441,43 +436,35 @@ func TestSendToDeviceBehaviour(t *testing.T) {
|
|||
if count := len(events); count != 1 {
|
||||
t.Fatalf("second call should have one update, got %d", count)
|
||||
}
|
||||
err = db.CleanSendToDeviceUpdates(context.Background(), alice.ID, deviceID, streamPos)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// At this point we should still have one message because we haven't progressed the
|
||||
// sync position yet. This is equivalent to the client failing to /sync and retrying
|
||||
// with the same position.
|
||||
streamPos, events, err = db.SendToDeviceUpdatesForSync(ctx, alice.ID, deviceID, 0, 100)
|
||||
streamPos, events, err = db.SendToDeviceUpdatesForSync(ctx, alice.ID, deviceID, 0, streamPos)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(events) != 1 {
|
||||
t.Fatal("third call should have one update still")
|
||||
}
|
||||
err = db.CleanSendToDeviceUpdates(context.Background(), alice.ID, deviceID, streamPos+1)
|
||||
err = db.CleanSendToDeviceUpdates(context.Background(), alice.ID, deviceID, streamPos)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// At this point we should now have no updates, because we've progressed the sync
|
||||
// position. Therefore the update from before will not be sent again.
|
||||
_, events, err = db.SendToDeviceUpdatesForSync(ctx, alice.ID, deviceID, streamPos+1, streamPos+2)
|
||||
_, events, err = db.SendToDeviceUpdatesForSync(ctx, alice.ID, deviceID, streamPos, streamPos+10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(events) != 0 {
|
||||
t.Fatal("fourth call should have no updates")
|
||||
}
|
||||
err = db.CleanSendToDeviceUpdates(context.Background(), alice.ID, deviceID, streamPos+1)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// At this point we should still have no updates, because no new updates have been
|
||||
// sent.
|
||||
_, events, err = db.SendToDeviceUpdatesForSync(ctx, alice.ID, deviceID, streamPos, streamPos+2)
|
||||
_, events, err = db.SendToDeviceUpdatesForSync(ctx, alice.ID, deviceID, streamPos, streamPos+10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
@ -491,7 +478,7 @@ func TestSendToDeviceBehaviour(t *testing.T) {
|
|||
streamPos, err = db.StoreNewSendForDeviceMessage(ctx, alice.ID, deviceID, gomatrixserverlib.SendToDeviceEvent{
|
||||
Sender: bob.ID,
|
||||
Type: "m.type",
|
||||
Content: json.RawMessage(fmt.Sprintf(`{ "count": %d }`, i)),
|
||||
Content: json.RawMessage(fmt.Sprintf(`{"count":%d}`, i)),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
|
|
|||
|
|
@ -122,12 +122,14 @@ type CurrentRoomState interface {
|
|||
//
|
||||
// We persist the previous event IDs as well, one per row, so when we do fetch even
|
||||
// earlier events we can simply delete rows which referenced it. Consider the graph:
|
||||
// A
|
||||
// | Event C has 1 prev_event ID: A.
|
||||
// B C
|
||||
// |___| Event D has 2 prev_event IDs: B and C.
|
||||
// |
|
||||
// D
|
||||
//
|
||||
// A
|
||||
// | Event C has 1 prev_event ID: A.
|
||||
// B C
|
||||
// |___| Event D has 2 prev_event IDs: B and C.
|
||||
// |
|
||||
// D
|
||||
//
|
||||
// The earliest known event we have is D, so this table has 2 rows.
|
||||
// A backfill request gives us C but not B. We delete rows where prev_event=C. This
|
||||
// still means that D is a backwards extremity as we do not have event B. However, event
|
||||
|
|
|
|||
|
|
@ -261,9 +261,9 @@ func (p *PDUStreamProvider) addRoomDeltaToResponse(
|
|||
var pos types.StreamPosition
|
||||
if _, pos, err = p.DB.PositionInTopology(ctx, mostRecentEventID); err == nil {
|
||||
switch {
|
||||
case r.Backwards && pos > latestPosition:
|
||||
case r.Backwards && pos < latestPosition:
|
||||
fallthrough
|
||||
case !r.Backwards && pos < latestPosition:
|
||||
case !r.Backwards && pos > latestPosition:
|
||||
latestPosition = pos
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,11 +3,14 @@ package syncapi
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/matrix-org/dendrite/clientapi/producers"
|
||||
keyapi "github.com/matrix-org/dendrite/keyserver/api"
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
rsapi "github.com/matrix-org/dendrite/roomserver/api"
|
||||
|
|
@ -311,6 +314,139 @@ func testSyncAPIUpdatePresenceImmediately(t *testing.T, dbType test.DBType) {
|
|||
|
||||
}
|
||||
|
||||
func TestSendToDevice(t *testing.T) {
|
||||
test.WithAllDatabases(t, testSendToDevice)
|
||||
}
|
||||
|
||||
func testSendToDevice(t *testing.T, dbType test.DBType) {
|
||||
user := test.NewUser(t)
|
||||
alice := userapi.Device{
|
||||
ID: "ALICEID",
|
||||
UserID: user.ID,
|
||||
AccessToken: "ALICE_BEARER_TOKEN",
|
||||
DisplayName: "Alice",
|
||||
AccountType: userapi.AccountTypeUser,
|
||||
}
|
||||
|
||||
base, close := testrig.CreateBaseDendrite(t, dbType)
|
||||
defer close()
|
||||
|
||||
jsctx, _ := base.NATS.Prepare(base.ProcessContext, &base.Cfg.Global.JetStream)
|
||||
defer jetstream.DeleteAllStreams(jsctx, &base.Cfg.Global.JetStream)
|
||||
|
||||
AddPublicRoutes(base, &syncUserAPI{accounts: []userapi.Device{alice}}, &syncRoomserverAPI{}, &syncKeyAPI{})
|
||||
|
||||
producer := producers.SyncAPIProducer{
|
||||
TopicSendToDeviceEvent: base.Cfg.Global.JetStream.Prefixed(jetstream.OutputSendToDeviceEvent),
|
||||
JetStream: jsctx,
|
||||
}
|
||||
|
||||
msgCounter := 0
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
since string
|
||||
want []string
|
||||
sendMessagesCount int
|
||||
}{
|
||||
{
|
||||
name: "initial sync, no messages",
|
||||
want: []string{},
|
||||
},
|
||||
{
|
||||
name: "initial sync, one new message",
|
||||
sendMessagesCount: 1,
|
||||
want: []string{
|
||||
"message 1",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "initial sync, two new messages", // we didn't advance the since token, so we'll receive two messages
|
||||
sendMessagesCount: 1,
|
||||
want: []string{
|
||||
"message 1",
|
||||
"message 2",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "incremental sync, one message", // this deletes message 1, as we advanced the since token
|
||||
since: types.StreamingToken{SendToDevicePosition: 1}.String(),
|
||||
want: []string{
|
||||
"message 2",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "failed incremental sync, one message", // didn't advance since, so still the same message
|
||||
since: types.StreamingToken{SendToDevicePosition: 1}.String(),
|
||||
want: []string{
|
||||
"message 2",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "incremental sync, no message", // this should delete message 2
|
||||
since: types.StreamingToken{SendToDevicePosition: 2}.String(), // next_batch from previous sync
|
||||
want: []string{},
|
||||
},
|
||||
{
|
||||
name: "incremental sync, three new messages",
|
||||
since: types.StreamingToken{SendToDevicePosition: 2}.String(),
|
||||
sendMessagesCount: 3,
|
||||
want: []string{
|
||||
"message 3", // message 2 was deleted in the previous test
|
||||
"message 4",
|
||||
"message 5",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "initial sync, three messages", // we expect three messages, as we didn't go beyond "2"
|
||||
want: []string{
|
||||
"message 3",
|
||||
"message 4",
|
||||
"message 5",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "incremental sync, no messages", // advance the sync token, no new messages
|
||||
since: types.StreamingToken{SendToDevicePosition: 5}.String(),
|
||||
want: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
for _, tc := range testCases {
|
||||
// Send to-device messages of type "m.dendrite.test" with content `{"dummy":"message $counter"}`
|
||||
for i := 0; i < tc.sendMessagesCount; i++ {
|
||||
msgCounter++
|
||||
msg := map[string]string{
|
||||
"dummy": fmt.Sprintf("message %d", msgCounter),
|
||||
}
|
||||
if err := producer.SendToDevice(ctx, user.ID, user.ID, alice.ID, "m.dendrite.test", msg); err != nil {
|
||||
t.Fatalf("unable to send to device message: %v", err)
|
||||
}
|
||||
}
|
||||
time.Sleep((time.Millisecond * 15) * time.Duration(tc.sendMessagesCount)) // wait a bit, so the messages can be processed
|
||||
// Execute a /sync request, recording the response
|
||||
w := httptest.NewRecorder()
|
||||
base.PublicClientAPIMux.ServeHTTP(w, test.NewRequest(t, "GET", "/_matrix/client/v3/sync", test.WithQueryParams(map[string]string{
|
||||
"access_token": alice.AccessToken,
|
||||
"since": tc.since,
|
||||
})))
|
||||
|
||||
// Extract the to_device.events, # gets all values of an array, in this case a string slice with "message $counter" entries
|
||||
events := gjson.Get(w.Body.String(), "to_device.events.#.content.dummy").Array()
|
||||
got := make([]string, len(events))
|
||||
for i := range events {
|
||||
got[i] = events[i].String()
|
||||
}
|
||||
|
||||
// Ensure the messages we received are as we expect them to be
|
||||
if !reflect.DeepEqual(got, tc.want) {
|
||||
t.Logf("[%s|since=%s]: Sync: %s", tc.name, tc.since, w.Body.String())
|
||||
t.Fatalf("[%s|since=%s]: got: %+v, want: %+v", tc.name, tc.since, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func toNATSMsgs(t *testing.T, base *base.BaseDendrite, input []*gomatrixserverlib.HeaderedEvent) []*nats.Msg {
|
||||
result := make([]*nats.Msg, len(input))
|
||||
for i, ev := range input {
|
||||
|
|
|
|||
|
|
@ -50,6 +50,10 @@ Notifications can be viewed with GET /notifications
|
|||
If remote user leaves room we no longer receive device updates
|
||||
Guest users can join guest_access rooms
|
||||
|
||||
# You'll be shocked to discover this is flakey too
|
||||
|
||||
Inbound /v1/send_join rejects joins from other servers
|
||||
|
||||
# For notifications extension on iOS
|
||||
|
||||
/event/ does not allow access to events before the user joined
|
||||
|
|
|
|||
|
|
@ -32,11 +32,11 @@ func CreateBaseDendrite(t *testing.T, dbType test.DBType) (*base.BaseDendrite, f
|
|||
var cfg config.Dendrite
|
||||
cfg.Defaults(false)
|
||||
cfg.Global.JetStream.InMemory = true
|
||||
|
||||
switch dbType {
|
||||
case test.DBTypePostgres:
|
||||
cfg.Global.Defaults(true) // autogen a signing key
|
||||
cfg.MediaAPI.Defaults(true) // autogen a media path
|
||||
cfg.Global.ServerName = "test"
|
||||
// use a distinct prefix else concurrent postgres/sqlite runs will clash since NATS will use
|
||||
// the file system event with InMemory=true :(
|
||||
cfg.Global.JetStream.TopicPrefix = fmt.Sprintf("Test_%d_", dbType)
|
||||
|
|
@ -50,6 +50,7 @@ func CreateBaseDendrite(t *testing.T, dbType test.DBType) (*base.BaseDendrite, f
|
|||
return base.NewBaseDendrite(&cfg, "Test", base.DisableMetrics), close
|
||||
case test.DBTypeSQLite:
|
||||
cfg.Defaults(true) // sets a sqlite db per component
|
||||
cfg.Global.ServerName = "test"
|
||||
// use a distinct prefix else concurrent postgres/sqlite runs will clash since NATS will use
|
||||
// the file system event with InMemory=true :(
|
||||
cfg.Global.JetStream.TopicPrefix = fmt.Sprintf("Test_%d_", dbType)
|
||||
|
|
|
|||
|
|
@ -297,7 +297,9 @@ func (s *statsStatements) monthlyUsers(ctx context.Context, txn *sql.Tx) (result
|
|||
return
|
||||
}
|
||||
|
||||
/* R30Users counts the number of 30 day retained users, defined as:
|
||||
/*
|
||||
R30Users counts the number of 30 day retained users, defined as:
|
||||
|
||||
- Users who have created their accounts more than 30 days ago
|
||||
- Where last seen at most 30 days ago
|
||||
- Where account creation and last_seen are > 30 days apart
|
||||
|
|
@ -334,7 +336,9 @@ func (s *statsStatements) r30Users(ctx context.Context, txn *sql.Tx) (map[string
|
|||
return result, rows.Err()
|
||||
}
|
||||
|
||||
/* R30UsersV2 counts the number of 30 day retained users, defined as users that:
|
||||
/*
|
||||
R30UsersV2 counts the number of 30 day retained users, defined as users that:
|
||||
|
||||
- Appear more than once in the past 60 days
|
||||
- Have more than 30 days between the most and least recent appearances that occurred in the past 60 days.
|
||||
*/
|
||||
|
|
|
|||
Loading…
Reference in a new issue