mirror of
https://github.com/matrix-org/dendrite.git
synced 2024-11-26 08:11:55 -06:00
merge master (doh)
This commit is contained in:
commit
42b1adf442
|
@ -320,7 +320,7 @@ func (r joinRoomReq) joinRoomUsingServer(roomID string, server gomatrixserverlib
|
|||
}
|
||||
|
||||
if err = r.producer.SendEventWithState(
|
||||
r.req.Context(), gomatrixserverlib.RespState(respSendJoin), event,
|
||||
r.req.Context(), gomatrixserverlib.RespState(respSendJoin.RespState), event,
|
||||
); err != nil {
|
||||
res := httputil.LogThenError(r.req, err)
|
||||
return &res, nil
|
||||
|
|
|
@ -40,9 +40,14 @@ type flow struct {
|
|||
Stages []string `json:"stages"`
|
||||
}
|
||||
|
||||
type loginIdentifier struct {
|
||||
Type string `json:"type"`
|
||||
User string `json:"user"`
|
||||
}
|
||||
|
||||
type passwordRequest struct {
|
||||
User string `json:"user"`
|
||||
Password string `json:"password"`
|
||||
Identifier loginIdentifier `json:"identifier"`
|
||||
Password string `json:"password"`
|
||||
// Both DeviceID and InitialDisplayName can be omitted, or empty strings ("")
|
||||
// Thus a pointer is needed to differentiate between the two
|
||||
InitialDisplayName *string `json:"initial_device_display_name"`
|
||||
|
@ -75,34 +80,43 @@ func Login(
|
|||
}
|
||||
} else if req.Method == http.MethodPost {
|
||||
var r passwordRequest
|
||||
var acc *authtypes.Account
|
||||
resErr := httputil.UnmarshalJSONRequest(req, &r)
|
||||
if resErr != nil {
|
||||
return *resErr
|
||||
}
|
||||
if r.User == "" {
|
||||
switch r.Identifier.Type {
|
||||
case "m.id.user":
|
||||
if r.Identifier.User == "" {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.BadJSON("'user' must be supplied."),
|
||||
}
|
||||
}
|
||||
|
||||
util.GetLogger(req.Context()).WithField("user", r.Identifier.User).Info("Processing login request")
|
||||
|
||||
localpart, err := userutil.ParseUsernameParam(r.Identifier.User, &cfg.Matrix.ServerName)
|
||||
if err != nil {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.InvalidUsername(err.Error()),
|
||||
}
|
||||
}
|
||||
|
||||
acc, err = accountDB.GetAccountByPassword(req.Context(), localpart, r.Password)
|
||||
if err != nil {
|
||||
// Technically we could tell them if the user does not exist by checking if err == sql.ErrNoRows
|
||||
// but that would leak the existence of the user.
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusForbidden,
|
||||
JSON: jsonerror.Forbidden("username or password was incorrect, or the account does not exist"),
|
||||
}
|
||||
}
|
||||
default:
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.BadJSON("'user' must be supplied."),
|
||||
}
|
||||
}
|
||||
|
||||
util.GetLogger(req.Context()).WithField("user", r.User).Info("Processing login request")
|
||||
|
||||
localpart, err := userutil.ParseUsernameParam(r.User, &cfg.Matrix.ServerName)
|
||||
if err != nil {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.InvalidUsername(err.Error()),
|
||||
}
|
||||
}
|
||||
|
||||
acc, err := accountDB.GetAccountByPassword(req.Context(), localpart, r.Password)
|
||||
if err != nil {
|
||||
// Technically we could tell them if the user does not exist by checking if err == sql.ErrNoRows
|
||||
// but that would leak the existence of the user.
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusForbidden,
|
||||
JSON: jsonerror.Forbidden("username or password was incorrect, or the account does not exist"),
|
||||
JSON: jsonerror.BadJSON("login identifier '" + r.Identifier.Type + "' not supported"),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -71,8 +71,8 @@ func main() {
|
|||
)
|
||||
federationapi.SetupFederationAPIComponent(base, accountDB, deviceDB, federation, &keyRing, alias, input, query, asQuery, fedSenderAPI)
|
||||
mediaapi.SetupMediaAPIComponent(base, deviceDB)
|
||||
publicroomsapi.SetupPublicRoomsAPIComponent(base, deviceDB)
|
||||
syncapi.SetupSyncAPIComponent(base, deviceDB, accountDB, query)
|
||||
publicroomsapi.SetupPublicRoomsAPIComponent(base, deviceDB, query)
|
||||
syncapi.SetupSyncAPIComponent(base, deviceDB, accountDB, query, federation, cfg)
|
||||
|
||||
httpHandler := common.WrapHandlerInCORS(base.APIMux)
|
||||
|
||||
|
|
|
@ -26,7 +26,9 @@ func main() {
|
|||
|
||||
deviceDB := base.CreateDeviceDB()
|
||||
|
||||
publicroomsapi.SetupPublicRoomsAPIComponent(base, deviceDB)
|
||||
_, _, query := base.CreateHTTPRoomserverAPIs()
|
||||
|
||||
publicroomsapi.SetupPublicRoomsAPIComponent(base, deviceDB, query)
|
||||
|
||||
base.SetupAndServeHTTP(string(base.Cfg.Bind.PublicRoomsAPI), string(base.Cfg.Listen.PublicRoomsAPI))
|
||||
|
||||
|
|
|
@ -26,10 +26,11 @@ func main() {
|
|||
|
||||
deviceDB := base.CreateDeviceDB()
|
||||
accountDB := base.CreateAccountsDB()
|
||||
federation := base.CreateFederationClient()
|
||||
|
||||
_, _, query := base.CreateHTTPRoomserverAPIs()
|
||||
|
||||
syncapi.SetupSyncAPIComponent(base, deviceDB, accountDB, query)
|
||||
syncapi.SetupSyncAPIComponent(base, deviceDB, accountDB, query, federation, cfg)
|
||||
|
||||
base.SetupAndServeHTTP(string(base.Cfg.Bind.SyncAPI), string(base.Cfg.Listen.SyncAPI))
|
||||
|
||||
|
|
|
@ -20,6 +20,8 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/crypto/ed25519"
|
||||
|
||||
"github.com/matrix-org/dendrite/common/keydb"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/naffka"
|
||||
|
@ -151,7 +153,12 @@ func (b *BaseDendrite) CreateAccountsDB() *accounts.Database {
|
|||
// CreateKeyDB creates a new instance of the key database. Should only be called
|
||||
// once per component.
|
||||
func (b *BaseDendrite) CreateKeyDB() keydb.Database {
|
||||
db, err := keydb.NewDatabase(string(b.Cfg.Database.ServerKey))
|
||||
db, err := keydb.NewDatabase(
|
||||
string(b.Cfg.Database.ServerKey),
|
||||
b.Cfg.Matrix.ServerName,
|
||||
b.Cfg.Matrix.PrivateKey.Public().(ed25519.PublicKey),
|
||||
b.Cfg.Matrix.KeyID,
|
||||
)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Panicf("failed to connect to keys db")
|
||||
}
|
||||
|
|
|
@ -16,9 +16,10 @@ package keydb
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/url"
|
||||
|
||||
"golang.org/x/crypto/ed25519"
|
||||
|
||||
"github.com/matrix-org/dendrite/common/keydb/postgres"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
)
|
||||
|
@ -30,15 +31,20 @@ type Database interface {
|
|||
}
|
||||
|
||||
// NewDatabase opens a database connection.
|
||||
func NewDatabase(dataSourceName string) (Database, error) {
|
||||
func NewDatabase(
|
||||
dataSourceName string,
|
||||
serverName gomatrixserverlib.ServerName,
|
||||
serverKey ed25519.PublicKey,
|
||||
serverKeyID gomatrixserverlib.KeyID,
|
||||
) (Database, error) {
|
||||
uri, err := url.Parse(dataSourceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return postgres.NewDatabase(dataSourceName, serverName, serverKey, serverKeyID)
|
||||
}
|
||||
switch uri.Scheme {
|
||||
case "postgres":
|
||||
return postgres.NewDatabase(dataSourceName)
|
||||
return postgres.NewDatabase(dataSourceName, serverName, serverKey, serverKeyID)
|
||||
default:
|
||||
return nil, errors.New("unknown schema")
|
||||
return postgres.NewDatabase(dataSourceName, serverName, serverKey, serverKeyID)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,6 +18,9 @@ package postgres
|
|||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"math"
|
||||
|
||||
"golang.org/x/crypto/ed25519"
|
||||
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
)
|
||||
|
@ -32,7 +35,12 @@ type Database struct {
|
|||
// It creates the necessary tables if they don't already exist.
|
||||
// It prepares all the SQL statements that it will use.
|
||||
// Returns an error if there was a problem talking to the database.
|
||||
func NewDatabase(dataSourceName string) (*Database, error) {
|
||||
func NewDatabase(
|
||||
dataSourceName string,
|
||||
serverName gomatrixserverlib.ServerName,
|
||||
serverKey ed25519.PublicKey,
|
||||
serverKeyID gomatrixserverlib.KeyID,
|
||||
) (*Database, error) {
|
||||
db, err := sql.Open("postgres", dataSourceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -42,6 +50,28 @@ func NewDatabase(dataSourceName string) (*Database, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Store our own keys so that we don't end up making HTTP requests to find our
|
||||
// own keys
|
||||
index := gomatrixserverlib.PublicKeyLookupRequest{
|
||||
ServerName: serverName,
|
||||
KeyID: serverKeyID,
|
||||
}
|
||||
value := gomatrixserverlib.PublicKeyLookupResult{
|
||||
VerifyKey: gomatrixserverlib.VerifyKey{
|
||||
Key: gomatrixserverlib.Base64String(serverKey),
|
||||
},
|
||||
ValidUntilTS: math.MaxUint64 >> 1,
|
||||
ExpiredTS: gomatrixserverlib.PublicKeyNotExpired,
|
||||
}
|
||||
err = d.StoreKeys(
|
||||
context.Background(),
|
||||
map[gomatrixserverlib.PublicKeyLookupRequest]gomatrixserverlib.PublicKeyLookupResult{
|
||||
index: value,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -2,16 +2,17 @@
|
|||
|
||||
Dendrite uses [SyTest](https://github.com/matrix-org/sytest) for its
|
||||
integration testing. When creating a new PR, add the test IDs (see below) that
|
||||
your PR should allow to pass to `testfile` in dendrite's root directory. Not all
|
||||
PRs need to make new tests pass. If we find your PR should be making a test pass
|
||||
we may ask you to add to that file, as generally Dendrite's progress can be
|
||||
tracked through the amount of SyTest tests it passes.
|
||||
your PR should allow to pass to `sytest-whitelist` in dendrite's root
|
||||
directory. Not all PRs need to make new tests pass. If we find your PR should
|
||||
be making a test pass we may ask you to add to that file, as generally
|
||||
Dendrite's progress can be tracked through the amount of SyTest tests it
|
||||
passes.
|
||||
|
||||
## Finding out which tests to add
|
||||
|
||||
We recommend you run the tests locally by manually setting up SyTest or using a
|
||||
SyTest docker image. After running the tests, a script will print the tests you
|
||||
need to add to `testfile` for you.
|
||||
need to add to `sytest-whitelist`.
|
||||
|
||||
You should proceed after you see no build problems for dendrite after running:
|
||||
|
||||
|
@ -50,16 +51,16 @@ EOF
|
|||
Run the tests:
|
||||
|
||||
```sh
|
||||
./run-tests.pl -I Dendrite::Monolith -d ../dendrite/bin -W ../dendrite/testfile -O tap --all | tee results.tap
|
||||
./run-tests.pl -I Dendrite::Monolith -d ../dendrite/bin -W ../dendrite/sytest-whitelist -O tap --all | tee results.tap
|
||||
```
|
||||
|
||||
where `tee` lets you see the results while they're being piped to the file.
|
||||
|
||||
Once the tests are complete, run the helper script to see if you need to add
|
||||
any newly passing test names to `testfile` in the project's root directory:
|
||||
any newly passing test names to `sytest-whitelist` in the project's root directory:
|
||||
|
||||
```sh
|
||||
../dendrite/show-expected-fail-tests.sh results.tap ../dendrite/testfile
|
||||
../dendrite/show-expected-fail-tests.sh results.tap ../dendrite/sytest-whitelist ../dendrite/sytest-blacklist
|
||||
```
|
||||
|
||||
If the script prints nothing/exits with 0, then you're good to go.
|
||||
|
@ -75,4 +76,4 @@ docker run --rm -v /path/to/dendrite/:/src/ matrixdotorg/sytest-dendrite
|
|||
|
||||
where `/path/to/dendrite/` should be replaced with the actual path to your
|
||||
dendrite source code. The output should tell you if you need to add any tests to
|
||||
`testfile`.
|
||||
`sytest-whitelist`.
|
||||
|
|
|
@ -16,7 +16,6 @@ package storage
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/url"
|
||||
|
||||
"github.com/matrix-org/dendrite/common"
|
||||
|
@ -34,12 +33,12 @@ type Database interface {
|
|||
func NewDatabase(dataSourceName string) (Database, error) {
|
||||
uri, err := url.Parse(dataSourceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return postgres.NewDatabase(dataSourceName)
|
||||
}
|
||||
switch uri.Scheme {
|
||||
case "postgres":
|
||||
return postgres.NewDatabase(dataSourceName)
|
||||
default:
|
||||
return nil, errors.New("unknown schema")
|
||||
return postgres.NewDatabase(dataSourceName)
|
||||
}
|
||||
}
|
||||
|
|
65
go.mod
65
go.mod
|
@ -2,68 +2,41 @@ module github.com/matrix-org/dendrite
|
|||
|
||||
require (
|
||||
github.com/Shopify/sarama v0.0.0-20170127151855-574d3147eee3
|
||||
github.com/alecthomas/gometalinter v2.0.2+incompatible
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4
|
||||
github.com/apache/thrift v0.0.0-20161221203622-b2a4d4ae21c7
|
||||
github.com/beorn7/perks v1.0.1
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd
|
||||
github.com/crossdock/crossdock-go v0.0.0-20160816171116-049aabb0122b
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/eapache/go-resiliency v0.0.0-20160104191539-b86b1ec0dd42
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20160609142408-bb955e01b934
|
||||
github.com/eapache/queue v1.1.0
|
||||
github.com/golang/protobuf v1.3.2
|
||||
github.com/golang/snappy v0.0.0-20170119014723-7db9049039a0
|
||||
github.com/google/shlex v0.0.0-20150127133951-6f45313302b9
|
||||
github.com/gorilla/context v1.1.1
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible // indirect
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect
|
||||
github.com/eapache/go-resiliency v0.0.0-20160104191539-b86b1ec0dd42 // indirect
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20160609142408-bb955e01b934 // indirect
|
||||
github.com/eapache/queue v1.1.0 // indirect
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect
|
||||
github.com/gorilla/mux v1.7.3
|
||||
github.com/jaegertracing/jaeger-client-go v0.0.0-20170921145708-3ad49a1d839b
|
||||
github.com/jaegertracing/jaeger-lib v0.0.0-20170920222118-21a3da6d66fe
|
||||
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6
|
||||
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 // indirect
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
|
||||
github.com/lib/pq v0.0.0-20170918175043-23da1db4f16d
|
||||
github.com/lib/pq v1.2.0
|
||||
github.com/matrix-org/dugong v0.0.0-20171220115018-ea0a4690a0d5
|
||||
github.com/matrix-org/go-http-js-libp2p v0.0.0-20200125063821-6eb06b102bda
|
||||
github.com/matrix-org/gomatrix v0.0.0-20190528120928-7df988a63f26
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20200124100636-0c2ec91d1df5
|
||||
github.com/matrix-org/naffka v0.0.0-20171115094957-662bfd0841d0
|
||||
github.com/matrix-org/util v0.0.0-20171127121716-2e2df66af2f5
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1
|
||||
github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5
|
||||
github.com/nicksnyder/go-i18n v1.8.1
|
||||
github.com/opentracing/opentracing-go v0.0.0-20170806192116-8ebe5d4e236e
|
||||
github.com/pelletier/go-toml v0.0.0-20170904195809-1d6b12b7cb29
|
||||
github.com/pierrec/lz4 v0.0.0-20161206202305-5c9560bfa9ac
|
||||
github.com/pierrec/xxHash v0.0.0-20160112165351-5a004441f897
|
||||
github.com/opentracing/opentracing-go v1.0.2
|
||||
github.com/pierrec/lz4 v0.0.0-20161206202305-5c9560bfa9ac // indirect
|
||||
github.com/pierrec/xxHash v0.0.0-20160112165351-5a004441f897 // indirect
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/pmezard/go-difflib v1.0.0
|
||||
github.com/prometheus/client_golang v1.2.1
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4
|
||||
github.com/prometheus/common v0.7.0
|
||||
github.com/prometheus/procfs v0.0.5
|
||||
github.com/rcrowley/go-metrics v0.0.0-20161128210544-1f30fe9094a5
|
||||
github.com/rcrowley/go-metrics v0.0.0-20161128210544-1f30fe9094a5 // indirect
|
||||
github.com/sirupsen/logrus v1.4.2
|
||||
github.com/stretchr/objx v0.2.0 // indirect
|
||||
github.com/stretchr/testify v1.3.0
|
||||
github.com/tidwall/gjson v1.1.5
|
||||
github.com/tidwall/match v1.0.1
|
||||
github.com/tidwall/sjson v1.0.3
|
||||
github.com/uber-go/atomic v1.3.0
|
||||
github.com/stretchr/testify v1.4.0 // indirect
|
||||
github.com/uber-go/atomic v1.3.0 // indirect
|
||||
github.com/uber/jaeger-client-go v2.15.0+incompatible
|
||||
github.com/uber/jaeger-lib v1.5.0
|
||||
github.com/uber/tchannel-go v0.0.0-20170927010734-b3e26487e291
|
||||
go.uber.org/atomic v1.3.0
|
||||
go.uber.org/multierr v0.0.0-20170829224307-fb7d312c2c04
|
||||
go.uber.org/zap v1.7.1
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47
|
||||
go.uber.org/atomic v1.3.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859 // indirect
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 // indirect
|
||||
gopkg.in/Shopify/sarama.v1 v1.11.0
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9
|
||||
gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20170727041045-23bcc3c4eae3
|
||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
||||
gopkg.in/h2non/bimg.v1 v1.0.18
|
||||
gopkg.in/macaroon.v2 v2.1.0
|
||||
gopkg.in/yaml.v2 v2.2.2
|
||||
)
|
||||
|
||||
|
|
108
go.sum
108
go.sum
|
@ -1,13 +1,11 @@
|
|||
github.com/Shopify/sarama v0.0.0-20170127151855-574d3147eee3 h1:j6BAEHYn1kUyW2j7kY0mOJ/R8A0qWwXpvUAEHGemm/g=
|
||||
github.com/Shopify/sarama v0.0.0-20170127151855-574d3147eee3/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/alecthomas/gometalinter v2.0.2+incompatible/go.mod h1:qfIpQGGz3d+NmgyPBqv+LSh50emm1pt72EtcX2vKYQk=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/apache/thrift v0.0.0-20161221203622-b2a4d4ae21c7/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a h1:BtpsbiV638WQZwhA98cEZw2BsbnQJrbd0BI7tsy0W1c=
|
||||
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
|
@ -16,7 +14,6 @@ github.com/cespare/xxhash/v2 v2.1.0 h1:yTUvW7Vhb89inJ+8irsUqiWjh8iT6sQPZiQzI6ReG
|
|||
github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/crossdock/crossdock-go v0.0.0-20160816171116-049aabb0122b/go.mod h1:v9FBN7gdVTpiD/+LZ7Po0UKvROyT87uLVxTHVky/dlQ=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
|
@ -34,121 +31,90 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9
|
|||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/golang/protobuf v0.0.0-20161117033126-8ee79997227b h1:fE/yi9pibxGEc0gSJuEShcsBXE2d5FW3OudsjE9tKzQ=
|
||||
github.com/golang/protobuf v0.0.0-20161117033126-8ee79997227b/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/snappy v0.0.0-20170119014723-7db9049039a0 h1:FMElzTwkd/2jQ2QzLEzt97JRgvFhYhnYiaQSwZ7tuyU=
|
||||
github.com/golang/snappy v0.0.0-20170119014723-7db9049039a0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/shlex v0.0.0-20150127133951-6f45313302b9/go.mod h1:RpwtwJQFrIEPstU94h88MWPXP2ektJZ8cZ0YntAmXiE=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.3.0 h1:HwSEKGN6U5T2aAQTfu5pW8fiwjSp3IgwdRbkICydk/c=
|
||||
github.com/gorilla/mux v1.3.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
|
||||
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw=
|
||||
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI=
|
||||
github.com/jaegertracing/jaeger-client-go v0.0.0-20170921145708-3ad49a1d839b/go.mod h1:HWG7INeOG1ZE17I/S8eeb+svquXmBS/hf1Obi6hJUyQ=
|
||||
github.com/jaegertracing/jaeger-lib v0.0.0-20170920222118-21a3da6d66fe/go.mod h1:VqeqQrZmZr9G4WdLw4ei9tAHU54iJRkfoFHvTTQn4jQ=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 h1:KAZ1BW2TCmT6PRihDPpocIy1QTtsAsrx6TneU/4+CMg=
|
||||
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/lib/pq v0.0.0-20170918175043-23da1db4f16d h1:Hdtccv31GWxWoCzWsIhZXy5NxEktzAkA8lywhTKu8O4=
|
||||
github.com/lib/pq v0.0.0-20170918175043-23da1db4f16d/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/matrix-org/dugong v0.0.0-20171220115018-ea0a4690a0d5 h1:nMX2t7hbGF0NYDYySx0pCqEKGKAeZIiSqlWSspetlhY=
|
||||
github.com/matrix-org/dugong v0.0.0-20171220115018-ea0a4690a0d5/go.mod h1:NgPCr+UavRGH6n5jmdX8DuqFZ4JiCWIJoZiuhTRLSUg=
|
||||
github.com/matrix-org/go-http-js-libp2p v0.0.0-20200125063821-6eb06b102bda h1:e8QGrSxjAaQ2W5ak33taR2yhQAVBQzjrKJ2zW9/jOIw=
|
||||
github.com/matrix-org/go-http-js-libp2p v0.0.0-20200125063821-6eb06b102bda/go.mod h1:3WluEZ9QXSwU30tWYqktnpC1x9mwZKx1r8uAv8Iq+a4=
|
||||
github.com/matrix-org/gomatrix v0.0.0-20171003113848-a7fc80c8060c h1:aZap604NyBGhAUE0CyNHz6+Pryye5A5mHnYyO4KPPW8=
|
||||
github.com/matrix-org/gomatrix v0.0.0-20171003113848-a7fc80c8060c/go.mod h1:3fxX6gUjWyI/2Bt7J1OLhpCzOfO/bB3AiX0cJtEKud0=
|
||||
github.com/matrix-org/gomatrix v0.0.0-20190130130140-385f072fe9af h1:piaIBNQGIHnni27xRB7VKkEwoWCgAmeuYf8pxAyG0bI=
|
||||
github.com/matrix-org/gomatrix v0.0.0-20190130130140-385f072fe9af/go.mod h1:3fxX6gUjWyI/2Bt7J1OLhpCzOfO/bB3AiX0cJtEKud0=
|
||||
github.com/matrix-org/gomatrix v0.0.0-20190528120928-7df988a63f26 h1:Hr3zjRsq2bhrnp3Ky1qgx/fzCtCALOoGYylh2tpS9K4=
|
||||
github.com/matrix-org/gomatrix v0.0.0-20190528120928-7df988a63f26/go.mod h1:3fxX6gUjWyI/2Bt7J1OLhpCzOfO/bB3AiX0cJtEKud0=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20181109104322-1c2cbc0872f0 h1:3UzhmERBbis4ZaB3imEbZwtDjGz/oVRC2cLLEajCzJA=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20181109104322-1c2cbc0872f0/go.mod h1:YHyhIQUmuXyKtoVfDUMk/DyU93Taamlu6nPZkij/JtA=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20190619132215-178ed5e3b8e2 h1:pYajAEdi3sowj4iSunqctchhcMNW3rDjeeH0T4uDkMY=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20190619132215-178ed5e3b8e2/go.mod h1:sf0RcKOdiwJeTti7A313xsaejNUGYDq02MQZ4JD4w/E=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20190724145009-a6df10ef35d6 h1:B8n1H5Wb1B5jwLzTylBpY0kJCMRqrofT7PmOw4aJFJA=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20190724145009-a6df10ef35d6/go.mod h1:sf0RcKOdiwJeTti7A313xsaejNUGYDq02MQZ4JD4w/E=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20190805173246-3a2199d5ecd6 h1:xr69Hk6QM3RIN6JSvx3RpDowBGpHpDDqhqXCeySwYow=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20190805173246-3a2199d5ecd6/go.mod h1:sf0RcKOdiwJeTti7A313xsaejNUGYDq02MQZ4JD4w/E=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20190814163046-d6285a18401f h1:20CZL7ApB7xgR7sZF9yD/qpsP51Sfx0TTgUJ3vKgnZQ=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20190814163046-d6285a18401f/go.mod h1:sf0RcKOdiwJeTti7A313xsaejNUGYDq02MQZ4JD4w/E=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20200124100636-0c2ec91d1df5 h1:kmRjpmFOenVpOaV/DRlo9p6z/IbOKlUC+hhKsAAh8Qg=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20200124100636-0c2ec91d1df5/go.mod h1:FsKa2pWE/bpQql9H7U4boOPXFoJX/QcqaZZ6ijLkaZI=
|
||||
github.com/matrix-org/naffka v0.0.0-20171115094957-662bfd0841d0 h1:p7WTwG+aXM86+yVrYAiCMW3ZHSmotVvuRbjtt3jC+4A=
|
||||
github.com/matrix-org/naffka v0.0.0-20171115094957-662bfd0841d0/go.mod h1:cXoYQIENbdWIQHt1SyCo6Bl3C3raHwJ0wgVrXHSqf+A=
|
||||
github.com/matrix-org/util v0.0.0-20171013132526-8b1c8ab81986 h1:TiWl4hLvezAhRPM8tPcPDFTysZ7k4T/1J4GPp/iqlZo=
|
||||
github.com/matrix-org/util v0.0.0-20171013132526-8b1c8ab81986/go.mod h1:lePuOiXLNDott7NZfnQvJk0lAZ5HgvIuWGhel6J+RLA=
|
||||
github.com/matrix-org/util v0.0.0-20171127121716-2e2df66af2f5 h1:W7l5CP4V7wPyPb4tYE11dbmeAOwtFQBTW0rf4OonOS8=
|
||||
github.com/matrix-org/util v0.0.0-20171127121716-2e2df66af2f5/go.mod h1:lePuOiXLNDott7NZfnQvJk0lAZ5HgvIuWGhel6J+RLA=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.1.4 h1:rCMZsU2ScVSYcAsOXgmC6+AKOK+6pmQTOcw03nfwYV0=
|
||||
github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/dns v1.1.12 h1:WMhc1ik4LNkTg8U9l3hI1LvxKmIL+f1+WV/SZtCbDDA=
|
||||
github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy/FJl/rCYT0+EuS8+Z0z4=
|
||||
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms=
|
||||
github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5 h1:BvoENQQU+fZ9uukda/RzCAL/191HHwJA5b13R6diVlY=
|
||||
github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8=
|
||||
github.com/nicksnyder/go-i18n v1.8.1/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q=
|
||||
github.com/opentracing/opentracing-go v0.0.0-20170806192116-8ebe5d4e236e h1:4cOVGAdR+woaUwhk6bgWI9ESJQDTaJMr8U4OJlT3J0Q=
|
||||
github.com/opentracing/opentracing-go v0.0.0-20170806192116-8ebe5d4e236e/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/pelletier/go-toml v0.0.0-20170904195809-1d6b12b7cb29/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg=
|
||||
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/pierrec/lz4 v0.0.0-20161206202305-5c9560bfa9ac h1:tKcxwAA5OHUQjL6sWsuCIcP9OnzN+RwKfvomtIOsfy8=
|
||||
github.com/pierrec/lz4 v0.0.0-20161206202305-5c9560bfa9ac/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pierrec/xxHash v0.0.0-20160112165351-5a004441f897 h1:jp3jc/PyyTrTKjJJ6rWnhTbmo7tGgBFyG9AL5FIrO1I=
|
||||
github.com/pierrec/xxHash v0.0.0-20160112165351-5a004441f897/go.mod h1:w2waW5Zoa/Wc4Yqe0wgrIYAGKqRMf7czn2HNKXmuL+I=
|
||||
github.com/pkg/errors v0.0.0-20170505043639-c605e284fe17 h1:chPfVn+gpAM5CTpTyVU9j8J+xgRGwmoDlNDLjKnJiYo=
|
||||
github.com/pkg/errors v0.0.0-20170505043639-c605e284fe17/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.0.0-20180519192340-c51dc758d4bb h1:ghXIh3jvLRo/h3y2O7wBgcmH1th5NXQ4XHwK5CgI//I=
|
||||
github.com/prometheus/client_golang v0.0.0-20180519192340-c51dc758d4bb/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.2.1 h1:JnMpQc6ppsNgw9QPAGF6Dod479itz7lvlsMzzNayLOI=
|
||||
github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U=
|
||||
github.com/prometheus/client_model v0.0.0-20150212101744-fa8ad6fec335 h1:0E/5GnGmzoDCtmzTycjGDWW33H0UBmAhR0h+FC8hWLs=
|
||||
github.com/prometheus/client_model v0.0.0-20150212101744-fa8ad6fec335/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20170108231212-dd2f054febf4 h1:bZG2YNnM/Fjd3kiqaVt13Apkhzz24wBKlxQ+URiggXk=
|
||||
github.com/prometheus/common v0.0.0-20170108231212-dd2f054febf4/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY=
|
||||
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
|
||||
github.com/prometheus/procfs v0.0.0-20170128160123-1878d9fbb537 h1:Lq69k27tHOmljEqDOHDy3b6kQyEie2yWeAiF0OKFMJ8=
|
||||
github.com/prometheus/procfs v0.0.0-20170128160123-1878d9fbb537/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8=
|
||||
github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20161128210544-1f30fe9094a5 h1:gwcdIpH6NU2iF8CmcqD+CP6+1CkRBOhHaPR+iu6raBY=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20161128210544-1f30fe9094a5/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/sirupsen/logrus v0.0.0-20170822132746-89742aefa4b2 h1:+8J/sCAVv2Y9Ct1BKszDFJEVWv6Aynr2O4FYGUg6+Mc=
|
||||
github.com/sirupsen/logrus v0.0.0-20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.3.0 h1:hI/7Q+DtNZ2kINb6qt/lS+IyXnHQe9e90POfeewL/ME=
|
||||
github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
|
@ -156,75 +122,67 @@ github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4
|
|||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v0.0.0-20170809224252-890a5c3458b4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/tidwall/gjson v1.0.2 h1:5BsM7kyEAHAUGEGDkEKO9Mdyiuw6QQ6TSDdarP0Nnmk=
|
||||
github.com/tidwall/gjson v1.0.2/go.mod h1:c/nTNbUr0E0OrXEhq1pwa8iEgc2DOt4ZZqAt1HtCkPA=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/tidwall/gjson v1.1.5 h1:QysILxBeUEY3GTLA0fQVgkQG1zme8NxGvhh2SSqWNwI=
|
||||
github.com/tidwall/gjson v1.1.5/go.mod h1:c/nTNbUr0E0OrXEhq1pwa8iEgc2DOt4ZZqAt1HtCkPA=
|
||||
github.com/tidwall/match v0.0.0-20171002075945-1731857f09b1 h1:pWIN9LOlFRCJFqWIOEbHLvY0WWJddsjH2FQ6N0HKZdU=
|
||||
github.com/tidwall/match v0.0.0-20171002075945-1731857f09b1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
|
||||
github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc=
|
||||
github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
|
||||
github.com/tidwall/sjson v1.0.0 h1:hOrzQPtGKlKAudQVmU43GkxEgG8TOgKyiKUyb7sE0rs=
|
||||
github.com/tidwall/sjson v1.0.0/go.mod h1:bURseu1nuBkFpIES5cz6zBtjmYeOQmEESshn7VpF15Y=
|
||||
github.com/tidwall/sjson v1.0.3 h1:DeF+0LZqvIt4fKYw41aPB29ZGlvwVkHKktoXJ1YW9Y8=
|
||||
github.com/tidwall/sjson v1.0.3/go.mod h1:bURseu1nuBkFpIES5cz6zBtjmYeOQmEESshn7VpF15Y=
|
||||
github.com/uber-go/atomic v1.3.0 h1:ylWoWcs+jXihgo3Us1Sdsatf2R6+OlBGm8fexR3oFG4=
|
||||
github.com/uber-go/atomic v1.3.0/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g=
|
||||
github.com/uber/jaeger-client-go v2.15.0+incompatible h1:NP3qsSqNxh8VYr956ur1N/1C1PjvOJnJykCzcD5QHbk=
|
||||
github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
|
||||
github.com/uber/jaeger-lib v1.5.0 h1:OHbgr8l656Ub3Fw5k9SWnBfIEwvoHQ+W2y+Aa9D1Uyo=
|
||||
github.com/uber/jaeger-lib v1.5.0/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
|
||||
github.com/uber/tchannel-go v0.0.0-20170927010734-b3e26487e291/go.mod h1:Rrgz1eL8kMjW/nEzZos0t+Heq0O4LhnUJVA32OvWKHo=
|
||||
go.uber.org/atomic v1.3.0 h1:vs7fgriifsPbGdK3bNuMWapNn3qnZhCRXc19NRdq010=
|
||||
go.uber.org/atomic v1.3.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/multierr v0.0.0-20170829224307-fb7d312c2c04/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.7.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20180723164146-c126467f60eb/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181106171534-e4dc69e5b2fd h1:VtIkGDhk0ph3t+THbvXHfMZ8QHgsBO39Nh52+74pq7w=
|
||||
golang.org/x/crypto v0.0.0-20181106171534-e4dc69e5b2fd/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190131182504-b8fe1690c613 h1:MQ/ZZiDsUapFFiMS+vzwXkCTeEKaum+Do5rINYJDmxc=
|
||||
golang.org/x/crypto v0.0.0-20190131182504-b8fe1690c613/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/net v0.0.0-20170927055102-0a9397675ba3 h1:tTDpczhDVjW6WN3DinzKcw5juwkDTVn22I7MNlfxSXM=
|
||||
golang.org/x/net v0.0.0-20170927055102-0a9397675ba3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190301231341-16b79f2e4e95 h1:fY7Dsw114eJN4boqzVSbpVHO6rTdhq6/GnXeu+PKnzU=
|
||||
golang.org/x/net v0.0.0-20190301231341-16b79f2e4e95/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20171012164349-43eea11bc926 h1:PY6OU86NqbyZiOzaPnDw6oOjAGtYQqIua16z6y9QkwE=
|
||||
golang.org/x/sys v0.0.0-20171012164349-43eea11bc926/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7 h1:LepdCS8Gf/MVejFIt8lsiexZATdoGVyp5bcyS+rYoUI=
|
||||
golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47 h1:/XfQ9z7ib8eEJX2hdgFTZJ/ntt0swNk5oYBziWeTCvY=
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
gopkg.in/Shopify/sarama.v1 v1.11.0 h1:/3kaCyeYaPbr59IBjeqhIcUOB1vXlIVqXAYa5g5C5F0=
|
||||
gopkg.in/Shopify/sarama.v1 v1.11.0/go.mod h1:AxnvoaevB2nBjNK17cG61A3LleFcWFwVBHBt+cot4Oc=
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20170727041045-23bcc3c4eae3/go.mod h1:3HH7i1SgMqlzxCcBmUHW657sD4Kvv9sC3HpL3YukzwA=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/h2non/bimg.v1 v1.0.18 h1:qn6/RpBHt+7WQqoBcK+aF2puc6nC78eZj5LexxoalT4=
|
||||
gopkg.in/h2non/bimg.v1 v1.0.18/go.mod h1:PgsZL7dLwUbsGm1NYps320GxGgvQNTnecMCZqxV11So=
|
||||
gopkg.in/h2non/gock.v1 v1.0.14 h1:fTeu9fcUvSnLNacYvYI54h+1/XEteDyHvrVCZEEEYNM=
|
||||
gopkg.in/h2non/gock.v1 v1.0.14/go.mod h1:sX4zAkdYX1TRGJ2JY156cFspQn4yRWn6p9EMdODlynE=
|
||||
gopkg.in/macaroon.v2 v2.0.0/go.mod h1:+I6LnTMkm/uV5ew/0nsulNjL16SK4+C8yDmRUzHR17I=
|
||||
gopkg.in/macaroon.v2 v2.1.0/go.mod h1:OUb+TQP/OP0WOerC2Jp/3CwhIKyIa9kQjuc7H24e6/o=
|
||||
gopkg.in/yaml.v2 v2.0.0-20171116090243-287cf08546ab h1:yZ6iByf7GKeJ3gsd1Dr/xaj1DyJ//wxKX1Cdh8LhoAw=
|
||||
gopkg.in/yaml.v2 v2.0.0-20171116090243-287cf08546ab/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
|
|
@ -16,7 +16,6 @@ package storage
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/url"
|
||||
|
||||
"github.com/matrix-org/dendrite/mediaapi/storage/postgres"
|
||||
|
@ -36,12 +35,12 @@ type Database interface {
|
|||
func Open(dataSourceName string) (Database, error) {
|
||||
uri, err := url.Parse(dataSourceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return postgres.Open(dataSourceName)
|
||||
}
|
||||
switch uri.Scheme {
|
||||
case "postgres":
|
||||
return postgres.Open(dataSourceName)
|
||||
default:
|
||||
return nil, errors.New("unknown schema")
|
||||
return postgres.Open(dataSourceName)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,8 +17,10 @@ package publicroomsapi
|
|||
import (
|
||||
"github.com/matrix-org/dendrite/clientapi/auth/storage/devices"
|
||||
"github.com/matrix-org/dendrite/common/basecomponent"
|
||||
"github.com/matrix-org/dendrite/publicroomsapi/consumers"
|
||||
"github.com/matrix-org/dendrite/publicroomsapi/routing"
|
||||
"github.com/matrix-org/dendrite/publicroomsapi/storage"
|
||||
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -27,11 +29,19 @@ import (
|
|||
func SetupPublicRoomsAPIComponent(
|
||||
base *basecomponent.BaseDendrite,
|
||||
deviceDB *devices.Database,
|
||||
rsQueryAPI roomserverAPI.RoomserverQueryAPI,
|
||||
) {
|
||||
publicRoomsDB, err := storage.NewPublicRoomsServerDatabase(string(base.Cfg.Database.PublicRoomsAPI))
|
||||
if err != nil {
|
||||
logrus.WithError(err).Panicf("failed to connect to public rooms db")
|
||||
}
|
||||
|
||||
rsConsumer := consumers.NewOutputRoomEventConsumer(
|
||||
base.Cfg, base.KafkaConsumer, publicRoomsDB, rsQueryAPI,
|
||||
)
|
||||
if err = rsConsumer.Start(); err != nil {
|
||||
logrus.WithError(err).Panic("failed to start public rooms server consumer")
|
||||
}
|
||||
|
||||
routing.Setup(base.APIMux, deviceDB, publicRoomsDB)
|
||||
}
|
||||
|
|
|
@ -42,15 +42,16 @@ func NewPublicRoomsServerDatabase(dataSourceName string) (*PublicRoomsServerData
|
|||
if db, err = sql.Open("postgres", dataSourceName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
partitions := common.PartitionOffsetStatements{}
|
||||
if err = partitions.Prepare(db, "publicroomsapi"); err != nil {
|
||||
storage := PublicRoomsServerDatabase{
|
||||
db: db,
|
||||
}
|
||||
if err = storage.PartitionOffsetStatements.Prepare(db, "publicroomsapi"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
statements := publicRoomsStatements{}
|
||||
if err = statements.prepare(db); err != nil {
|
||||
if err = storage.statements.prepare(db); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &PublicRoomsServerDatabase{db, partitions, statements}, nil
|
||||
return &storage, nil
|
||||
}
|
||||
|
||||
// GetRoomVisibility returns the room visibility as a boolean: true if the room
|
||||
|
|
|
@ -16,7 +16,6 @@ package storage
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/url"
|
||||
|
||||
"github.com/matrix-org/dendrite/common"
|
||||
|
@ -39,12 +38,12 @@ type Database interface {
|
|||
func NewPublicRoomsServerDatabase(dataSourceName string) (Database, error) {
|
||||
uri, err := url.Parse(dataSourceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return postgres.NewPublicRoomsServerDatabase(dataSourceName)
|
||||
}
|
||||
switch uri.Scheme {
|
||||
case "postgres":
|
||||
return postgres.NewPublicRoomsServerDatabase(dataSourceName)
|
||||
default:
|
||||
return nil, errors.New("unknown schema")
|
||||
return postgres.NewPublicRoomsServerDatabase(dataSourceName)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -230,6 +230,20 @@ type QueryBackfillResponse struct {
|
|||
Events []gomatrixserverlib.Event `json:"events"`
|
||||
}
|
||||
|
||||
// QueryServersInRoomAtEventRequest is a request to QueryServersInRoomAtEvent
|
||||
type QueryServersInRoomAtEventRequest struct {
|
||||
// ID of the room to retrieve member servers for.
|
||||
RoomID string `json:"room_id"`
|
||||
// ID of the event for which to retrieve member servers.
|
||||
EventID string `json:"event_id"`
|
||||
}
|
||||
|
||||
// QueryServersInRoomAtEventResponse is a response to QueryServersInRoomAtEvent
|
||||
type QueryServersInRoomAtEventResponse struct {
|
||||
// Servers present in the room for these events.
|
||||
Servers []gomatrixserverlib.ServerName `json:"servers"`
|
||||
}
|
||||
|
||||
// RoomserverQueryAPI is used to query information from the room server.
|
||||
type RoomserverQueryAPI interface {
|
||||
// Query the latest events and state for a room from the room server.
|
||||
|
@ -303,6 +317,12 @@ type RoomserverQueryAPI interface {
|
|||
request *QueryBackfillRequest,
|
||||
response *QueryBackfillResponse,
|
||||
) error
|
||||
|
||||
QueryServersInRoomAtEvent(
|
||||
ctx context.Context,
|
||||
request *QueryServersInRoomAtEventRequest,
|
||||
response *QueryServersInRoomAtEventResponse,
|
||||
) error
|
||||
}
|
||||
|
||||
// RoomserverQueryLatestEventsAndStatePath is the HTTP path for the QueryLatestEventsAndState API.
|
||||
|
@ -332,8 +352,11 @@ const RoomserverQueryMissingEventsPath = "/api/roomserver/queryMissingEvents"
|
|||
// RoomserverQueryStateAndAuthChainPath is the HTTP path for the QueryStateAndAuthChain API
|
||||
const RoomserverQueryStateAndAuthChainPath = "/api/roomserver/queryStateAndAuthChain"
|
||||
|
||||
// RoomserverQueryBackfillPath is the HTTP path for the QueryBackfill API
|
||||
const RoomserverQueryBackfillPath = "/api/roomserver/QueryBackfill"
|
||||
// RoomserverQueryBackfillPath is the HTTP path for the QueryBackfillPath API
|
||||
const RoomserverQueryBackfillPath = "/api/roomserver/queryBackfill"
|
||||
|
||||
// RoomserverQueryServersInRoomAtEventPath is the HTTP path for the QueryServersInRoomAtEvent API
|
||||
const RoomserverQueryServersInRoomAtEventPath = "/api/roomserver/queryServersInRoomAtEvents"
|
||||
|
||||
// NewRoomserverQueryAPIHTTP creates a RoomserverQueryAPI implemented by talking to a HTTP POST API.
|
||||
// If httpClient is nil then it uses the http.DefaultClient
|
||||
|
@ -478,3 +501,16 @@ func (h *httpRoomserverQueryAPI) QueryBackfill(
|
|||
apiURL := h.roomserverURL + RoomserverQueryBackfillPath
|
||||
return commonHTTP.PostJSON(ctx, span, h.httpClient, apiURL, request, response)
|
||||
}
|
||||
|
||||
// QueryServersInRoomAtEvent implements RoomServerQueryAPI
|
||||
func (h *httpRoomserverQueryAPI) QueryServersInRoomAtEvent(
|
||||
ctx context.Context,
|
||||
request *QueryServersInRoomAtEventRequest,
|
||||
response *QueryServersInRoomAtEventResponse,
|
||||
) error {
|
||||
span, ctx := opentracing.StartSpanFromContext(ctx, "QueryServersInRoomAtEvent")
|
||||
defer span.Finish()
|
||||
|
||||
apiURL := h.roomserverURL + RoomserverQueryServersInRoomAtEventPath
|
||||
return commonHTTP.PostJSON(ctx, span, h.httpClient, apiURL, request, response)
|
||||
}
|
||||
|
|
|
@ -609,53 +609,92 @@ func (r *RoomserverQueryAPI) QueryStateAndAuthChain(
|
|||
return err
|
||||
}
|
||||
|
||||
// getAuthChain fetches the auth chain for the given auth events.
|
||||
// An auth chain is the list of all events that are referenced in the
|
||||
// auth_events section, and all their auth_events, recursively.
|
||||
// The returned set of events contain the given events.
|
||||
// Will *not* error if we don't have all auth events.
|
||||
// getAuthChain fetches the auth chain for the given auth events. An auth chain
|
||||
// is the list of all events that are referenced in the auth_events section, and
|
||||
// all their auth_events, recursively. The returned set of events contain the
|
||||
// given events. Will *not* error if we don't have all auth events.
|
||||
func getAuthChain(
|
||||
ctx context.Context, dB RoomserverQueryAPIEventDB, authEventIDs []string,
|
||||
) ([]gomatrixserverlib.Event, error) {
|
||||
var authEvents []gomatrixserverlib.Event
|
||||
|
||||
// List of event ids to fetch. These will be added to the result and
|
||||
// their auth events will be fetched (if they haven't been previously)
|
||||
// List of event IDs to fetch. On each pass, these events will be requested
|
||||
// from the database and the `eventsToFetch` will be updated with any new
|
||||
// events that we have learned about and need to find. When `eventsToFetch`
|
||||
// is eventually empty, we should have reached the end of the chain.
|
||||
eventsToFetch := authEventIDs
|
||||
authEventsMap := make(map[string]gomatrixserverlib.Event)
|
||||
|
||||
// Set of events we've already fetched.
|
||||
fetchedEventMap := make(map[string]bool)
|
||||
|
||||
// Check if there's anything left to do
|
||||
for len(eventsToFetch) > 0 {
|
||||
// Convert eventIDs to events. First need to fetch NIDs
|
||||
// Try to retrieve the events from the database.
|
||||
events, err := dB.EventsFromIDs(ctx, eventsToFetch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Work out a) which events we should add to the returned list of
|
||||
// events and b) which of the auth events we haven't seen yet and
|
||||
// add them to the list of events to fetch.
|
||||
// We've now fetched these events so clear out `eventsToFetch`. Soon we may
|
||||
// add newly discovered events to this for the next pass.
|
||||
eventsToFetch = eventsToFetch[:0]
|
||||
for _, event := range events {
|
||||
fetchedEventMap[event.EventID()] = true
|
||||
authEvents = append(authEvents, event.Event)
|
||||
|
||||
// Now we need to fetch any auth events that we haven't
|
||||
// previously seen.
|
||||
for _, authEventID := range event.AuthEventIDs() {
|
||||
if !fetchedEventMap[authEventID] {
|
||||
fetchedEventMap[authEventID] = true
|
||||
eventsToFetch = append(eventsToFetch, authEventID)
|
||||
for _, event := range events {
|
||||
// Store the event in the event map - this prevents us from requesting it
|
||||
// from the database again.
|
||||
authEventsMap[event.EventID()] = event.Event
|
||||
|
||||
// Extract all of the auth events from the newly obtained event. If we
|
||||
// don't already have a record of the event, record it in the list of
|
||||
// events we want to request for the next pass.
|
||||
for _, authEvent := range event.AuthEvents() {
|
||||
if _, ok := authEventsMap[authEvent.EventID]; !ok {
|
||||
eventsToFetch = append(eventsToFetch, authEvent.EventID)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We've now retrieved all of the events we can. Flatten them down into an
|
||||
// array and return them.
|
||||
var authEvents []gomatrixserverlib.Event
|
||||
for _, event := range authEventsMap {
|
||||
authEvents = append(authEvents, event)
|
||||
}
|
||||
|
||||
return authEvents, nil
|
||||
}
|
||||
|
||||
// QueryServersInRoomAtEvent implements api.RoomserverQueryAPI
|
||||
func (r *RoomserverQueryAPI) QueryServersInRoomAtEvent(
|
||||
ctx context.Context,
|
||||
request *api.QueryServersInRoomAtEventRequest,
|
||||
response *api.QueryServersInRoomAtEventResponse,
|
||||
) error {
|
||||
// getMembershipsBeforeEventNID requires a NID, so retrieving the NID for
|
||||
// the event is necessary.
|
||||
NIDs, err := r.DB.EventNIDs(ctx, []string{request.EventID})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Retrieve all "m.room.member" state events of "join" membership, which
|
||||
// contains the list of users in the room before the event, therefore all
|
||||
// the servers in it at that moment.
|
||||
events, err := r.getMembershipsBeforeEventNID(ctx, NIDs[request.EventID], true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Store the server names in a temporary map to avoid duplicates.
|
||||
servers := make(map[gomatrixserverlib.ServerName]bool)
|
||||
for _, event := range events {
|
||||
servers[event.Origin()] = true
|
||||
}
|
||||
|
||||
// Populate the response.
|
||||
for server := range servers {
|
||||
response.Servers = append(response.Servers, server)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetupHTTP adds the RoomserverQueryAPI handlers to the http.ServeMux.
|
||||
// nolint: gocyclo
|
||||
func (r *RoomserverQueryAPI) SetupHTTP(servMux *http.ServeMux) {
|
||||
|
@ -799,4 +838,18 @@ func (r *RoomserverQueryAPI) SetupHTTP(servMux *http.ServeMux) {
|
|||
return util.JSONResponse{Code: http.StatusOK, JSON: &response}
|
||||
}),
|
||||
)
|
||||
servMux.Handle(
|
||||
api.RoomserverQueryServersInRoomAtEventPath,
|
||||
common.MakeInternalAPI("QueryServersInRoomAtEvent", func(req *http.Request) util.JSONResponse {
|
||||
var request api.QueryServersInRoomAtEventRequest
|
||||
var response api.QueryServersInRoomAtEventResponse
|
||||
if err := json.NewDecoder(req.Body).Decode(&request); err != nil {
|
||||
return util.ErrorResponse(err)
|
||||
}
|
||||
if err := r.QueryServersInRoomAtEvent(req.Context(), &request, &response); err != nil {
|
||||
return util.ErrorResponse(err)
|
||||
}
|
||||
return util.JSONResponse{Code: http.StatusOK, JSON: &response}
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
|
|
@ -16,7 +16,6 @@ package storage
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/url"
|
||||
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
|
@ -61,12 +60,12 @@ type Database interface {
|
|||
func Open(dataSourceName string) (Database, error) {
|
||||
uri, err := url.Parse(dataSourceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return postgres.Open(dataSourceName)
|
||||
}
|
||||
switch uri.Scheme {
|
||||
case "postgres":
|
||||
return postgres.Open(dataSourceName)
|
||||
default:
|
||||
return nil, errors.New("unknown schema")
|
||||
return postgres.Open(dataSourceName)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,45 +1,89 @@
|
|||
#! /bin/bash
|
||||
#
|
||||
# Parses a results.tap file from SyTest output and a file containing test names (a test whitelist)
|
||||
# and checks whether a test name that exists in the whitelist (that should pass), failed or not.
|
||||
#
|
||||
# An optional blacklist file can be added, also containing test names, where if a test name is
|
||||
# present, the script will not error even if the test is in the whitelist file and failed
|
||||
#
|
||||
# For each of these files, lines starting with '#' are ignored.
|
||||
#
|
||||
# Usage ./show-expected-fail-tests.sh results.tap whitelist [blacklist]
|
||||
|
||||
results_file=$1
|
||||
testfile=$2
|
||||
whitelist_file=$2
|
||||
blacklist_file=$3
|
||||
|
||||
fail_build=0
|
||||
|
||||
if [ $# -lt 2 ]; then
|
||||
echo "Usage: $0 results.tap whitelist [blacklist]"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f "$results_file" ]; then
|
||||
echo "ERROR: Specified results file ${results_file} doesn't exist."
|
||||
echo "ERROR: Specified results file '${results_file}' doesn't exist."
|
||||
fail_build=1
|
||||
fi
|
||||
|
||||
if [ ! -f "$testfile" ]; then
|
||||
echo "ERROR: Specified testfile ${testfile} doesn't exist."
|
||||
if [ ! -f "$whitelist_file" ]; then
|
||||
echo "ERROR: Specified test whitelist '${whitelist_file}' doesn't exist."
|
||||
fail_build=1
|
||||
fi
|
||||
|
||||
blacklisted_tests=()
|
||||
|
||||
# Check if a blacklist file was provided
|
||||
if [ $# -eq 3 ]; then
|
||||
# Read test blacklist file
|
||||
if [ ! -f "$blacklist_file" ]; then
|
||||
echo "ERROR: Specified test blacklist file '${blacklist_file}' doesn't exist."
|
||||
fail_build=1
|
||||
fi
|
||||
|
||||
# Read each line, ignoring those that start with '#'
|
||||
blacklisted_tests=""
|
||||
search_non_comments=$(grep -v '^#' ${blacklist_file})
|
||||
while read -r line ; do
|
||||
# Record the blacklisted test name
|
||||
blacklisted_tests+=("${line}")
|
||||
done <<< "${search_non_comments}" # This allows us to edit blacklisted_tests in the while loop
|
||||
fi
|
||||
|
||||
[ "$fail_build" = 0 ] || exit 1
|
||||
|
||||
passed_but_expected_fail=$(grep ' # TODO passed but expected fail' ${results_file} | sed -E 's/^ok [0-9]+ (\(expected fail\) )?//' | sed -E 's/( \([0-9]+ subtests\))? # TODO passed but expected fail$//')
|
||||
tests_to_add=""
|
||||
already_in_testfile=""
|
||||
already_in_whitelist=""
|
||||
|
||||
while read -r test_id; do
|
||||
[ "${test_id}" = "" ] && continue
|
||||
grep "${test_id}" "${testfile}" > /dev/null 2>&1
|
||||
while read -r test_name; do
|
||||
# Ignore empty lines
|
||||
[ "${test_name}" = "" ] && continue
|
||||
|
||||
grep "${test_name}" "${whitelist_file}" > /dev/null 2>&1
|
||||
if [ "$?" != "0" ]; then
|
||||
tests_to_add="${tests_to_add}${test_id}\n"
|
||||
# Check if this test name is blacklisted
|
||||
if printf '%s\n' "${blacklisted_tests[@]}" | grep -q -P "^${test_name}$"; then
|
||||
# Don't notify about this test
|
||||
continue
|
||||
fi
|
||||
|
||||
# Append this test_name to the existing list
|
||||
tests_to_add="${tests_to_add}${test_name}\n"
|
||||
fail_build=1
|
||||
else
|
||||
already_in_testfile="${already_in_testfile}${test_id}\n"
|
||||
already_in_whitelist="${already_in_whitelist}${test_name}\n"
|
||||
fi
|
||||
done <<< "${passed_but_expected_fail}"
|
||||
|
||||
if [ -n "${tests_to_add}" ]; then
|
||||
echo "ERROR: The following passed tests are not present in testfile. Please append them to the file:"
|
||||
echo "ERROR: The following passed tests are not present in $2. Please append them to the file:"
|
||||
echo -e "${tests_to_add}"
|
||||
fi
|
||||
|
||||
if [ -n "${already_in_testfile}" ]; then
|
||||
echo "WARN: Tests in testfile still marked as expected fail:"
|
||||
echo -e "${already_in_testfile}"
|
||||
if [ -n "${already_in_whitelist}" ]; then
|
||||
echo "WARN: Tests in the whitelist still marked as expected fail:"
|
||||
echo -e "${already_in_whitelist}"
|
||||
fi
|
||||
|
||||
exit ${fail_build}
|
||||
|
|
|
@ -90,7 +90,7 @@ func (s *OutputClientDataConsumer) onMessage(msg *sarama.ConsumerMessage) error
|
|||
}).Panicf("could not save account data")
|
||||
}
|
||||
|
||||
s.notifier.OnNewEvent(nil, "", []string{string(msg.Key)}, types.SyncPosition{PDUPosition: pduPos})
|
||||
s.notifier.OnNewEvent(nil, "", []string{string(msg.Key)}, types.PaginationToken{PDUPosition: pduPos})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -133,6 +133,7 @@ func (s *OutputRoomEventConsumer) onNewRoomEvent(
|
|||
msg.AddsStateEventIDs,
|
||||
msg.RemovesStateEventIDs,
|
||||
msg.TransactionID,
|
||||
false,
|
||||
)
|
||||
if err != nil {
|
||||
// panic rather than continue with an inconsistent database
|
||||
|
@ -144,7 +145,7 @@ func (s *OutputRoomEventConsumer) onNewRoomEvent(
|
|||
}).Panicf("roomserver output log: write event failure")
|
||||
return nil
|
||||
}
|
||||
s.notifier.OnNewEvent(&ev, "", nil, types.SyncPosition{PDUPosition: pduPos})
|
||||
s.notifier.OnNewEvent(&ev, "", nil, types.PaginationToken{PDUPosition: pduPos})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -161,7 +162,7 @@ func (s *OutputRoomEventConsumer) onNewInviteEvent(
|
|||
}).Panicf("roomserver output log: write invite failure")
|
||||
return nil
|
||||
}
|
||||
s.notifier.OnNewEvent(&msg.Event, "", nil, types.SyncPosition{PDUPosition: pduPos})
|
||||
s.notifier.OnNewEvent(&msg.Event, "", nil, types.PaginationToken{PDUPosition: pduPos})
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -63,7 +63,12 @@ func NewOutputTypingEventConsumer(
|
|||
// Start consuming from typing api
|
||||
func (s *OutputTypingEventConsumer) Start() error {
|
||||
s.db.SetTypingTimeoutCallback(func(userID, roomID string, latestSyncPosition int64) {
|
||||
s.notifier.OnNewEvent(nil, roomID, nil, types.SyncPosition{TypingPosition: latestSyncPosition})
|
||||
s.notifier.OnNewEvent(
|
||||
nil, roomID, nil,
|
||||
types.PaginationToken{
|
||||
EDUTypingPosition: types.StreamPosition(latestSyncPosition),
|
||||
},
|
||||
)
|
||||
})
|
||||
|
||||
return s.typingConsumer.Start()
|
||||
|
@ -83,7 +88,7 @@ func (s *OutputTypingEventConsumer) onMessage(msg *sarama.ConsumerMessage) error
|
|||
"typing": output.Event.Typing,
|
||||
}).Debug("received data from typing server")
|
||||
|
||||
var typingPos int64
|
||||
var typingPos types.StreamPosition
|
||||
typingEvent := output.Event
|
||||
if typingEvent.Typing {
|
||||
typingPos = s.db.AddTypingUser(typingEvent.UserID, typingEvent.RoomID, output.ExpireTime)
|
||||
|
@ -91,6 +96,6 @@ func (s *OutputTypingEventConsumer) onMessage(msg *sarama.ConsumerMessage) error
|
|||
typingPos = s.db.RemoveTypingUser(typingEvent.UserID, typingEvent.RoomID)
|
||||
}
|
||||
|
||||
s.notifier.OnNewEvent(nil, output.Event.RoomID, nil, types.SyncPosition{TypingPosition: typingPos})
|
||||
s.notifier.OnNewEvent(nil, output.Event.RoomID, nil, types.PaginationToken{EDUTypingPosition: typingPos})
|
||||
return nil
|
||||
}
|
||||
|
|
487
syncapi/routing/messages.go
Normal file
487
syncapi/routing/messages.go
Normal file
|
@ -0,0 +1,487 @@
|
|||
// Copyright 2018 New Vector Ltd
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package routing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/matrix-org/dendrite/clientapi/httputil"
|
||||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||
"github.com/matrix-org/dendrite/common/config"
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/syncapi/storage"
|
||||
"github.com/matrix-org/dendrite/syncapi/types"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type messagesReq struct {
|
||||
ctx context.Context
|
||||
db storage.Database
|
||||
queryAPI api.RoomserverQueryAPI
|
||||
federation *gomatrixserverlib.FederationClient
|
||||
cfg *config.Dendrite
|
||||
roomID string
|
||||
from *types.PaginationToken
|
||||
to *types.PaginationToken
|
||||
wasToProvided bool
|
||||
limit int
|
||||
backwardOrdering bool
|
||||
}
|
||||
|
||||
type messagesResp struct {
|
||||
Start string `json:"start"`
|
||||
End string `json:"end"`
|
||||
Chunk []gomatrixserverlib.ClientEvent `json:"chunk"`
|
||||
}
|
||||
|
||||
const defaultMessagesLimit = 10
|
||||
|
||||
// OnIncomingMessagesRequest implements the /messages endpoint from the
|
||||
// client-server API.
|
||||
// See: https://matrix.org/docs/spec/client_server/latest.html#get-matrix-client-r0-rooms-roomid-messages
|
||||
func OnIncomingMessagesRequest(
|
||||
req *http.Request, db storage.Database, roomID string,
|
||||
federation *gomatrixserverlib.FederationClient,
|
||||
queryAPI api.RoomserverQueryAPI,
|
||||
cfg *config.Dendrite,
|
||||
) util.JSONResponse {
|
||||
var err error
|
||||
|
||||
// Extract parameters from the request's URL.
|
||||
// Pagination tokens.
|
||||
from, err := types.NewPaginationTokenFromString(req.URL.Query().Get("from"))
|
||||
if err != nil {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.InvalidArgumentValue("Invalid from parameter: " + err.Error()),
|
||||
}
|
||||
}
|
||||
|
||||
// Direction to return events from.
|
||||
dir := req.URL.Query().Get("dir")
|
||||
if dir != "b" && dir != "f" {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.MissingArgument("Bad or missing dir query parameter (should be either 'b' or 'f')"),
|
||||
}
|
||||
}
|
||||
// A boolean is easier to handle in this case, especially since dir is sure
|
||||
// to have one of the two accepted values (so dir == "f" <=> !backwardOrdering).
|
||||
backwardOrdering := (dir == "b")
|
||||
|
||||
// Pagination tokens. To is optional, and its default value depends on the
|
||||
// direction ("b" or "f").
|
||||
var to *types.PaginationToken
|
||||
wasToProvided := true
|
||||
if s := req.URL.Query().Get("to"); len(s) > 0 {
|
||||
to, err = types.NewPaginationTokenFromString(s)
|
||||
if err != nil {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.InvalidArgumentValue("Invalid to parameter: " + err.Error()),
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If "to" isn't provided, it defaults to either the earliest stream
|
||||
// position (if we're going backward) or to the latest one (if we're
|
||||
// going forward).
|
||||
to, err = setToDefault(req.Context(), db, backwardOrdering, roomID)
|
||||
if err != nil {
|
||||
return httputil.LogThenError(req, err)
|
||||
}
|
||||
wasToProvided = false
|
||||
}
|
||||
|
||||
// Maximum number of events to return; defaults to 10.
|
||||
limit := defaultMessagesLimit
|
||||
if len(req.URL.Query().Get("limit")) > 0 {
|
||||
limit, err = strconv.Atoi(req.URL.Query().Get("limit"))
|
||||
|
||||
if err != nil {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.InvalidArgumentValue("limit could not be parsed into an integer: " + err.Error()),
|
||||
}
|
||||
}
|
||||
}
|
||||
// TODO: Implement filtering (#587)
|
||||
|
||||
// Check the room ID's format.
|
||||
if _, _, err = gomatrixserverlib.SplitID('!', roomID); err != nil {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.MissingArgument("Bad room ID: " + err.Error()),
|
||||
}
|
||||
}
|
||||
|
||||
mReq := messagesReq{
|
||||
ctx: req.Context(),
|
||||
db: db,
|
||||
queryAPI: queryAPI,
|
||||
federation: federation,
|
||||
cfg: cfg,
|
||||
roomID: roomID,
|
||||
from: from,
|
||||
to: to,
|
||||
wasToProvided: wasToProvided,
|
||||
limit: limit,
|
||||
backwardOrdering: backwardOrdering,
|
||||
}
|
||||
|
||||
clientEvents, start, end, err := mReq.retrieveEvents()
|
||||
if err != nil {
|
||||
return httputil.LogThenError(req, err)
|
||||
}
|
||||
|
||||
// Respond with the events.
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusOK,
|
||||
JSON: messagesResp{
|
||||
Chunk: clientEvents,
|
||||
Start: start.String(),
|
||||
End: end.String(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// retrieveEvents retrieve events from the local database for a request on
|
||||
// /messages. If there's not enough events to retrieve, it asks another
|
||||
// homeserver in the room for older events.
|
||||
// Returns an error if there was an issue talking to the database or with the
|
||||
// remote homeserver.
|
||||
func (r *messagesReq) retrieveEvents() (
|
||||
clientEvents []gomatrixserverlib.ClientEvent, start,
|
||||
end *types.PaginationToken, err error,
|
||||
) {
|
||||
// Retrieve the events from the local database.
|
||||
streamEvents, err := r.db.GetEventsInRange(
|
||||
r.ctx, r.from, r.to, r.roomID, r.limit, r.backwardOrdering,
|
||||
)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var events []gomatrixserverlib.Event
|
||||
|
||||
// There can be two reasons for streamEvents to be empty: either we've
|
||||
// reached the oldest event in the room (or the most recent one, depending
|
||||
// on the ordering), or we've reached a backward extremity.
|
||||
if len(streamEvents) == 0 {
|
||||
if events, err = r.handleEmptyEventsSlice(); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if events, err = r.handleNonEmptyEventsSlice(streamEvents); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If we didn't get any event, we don't need to proceed any further.
|
||||
if len(events) == 0 {
|
||||
return []gomatrixserverlib.ClientEvent{}, r.from, r.to, nil
|
||||
}
|
||||
|
||||
// Sort the events to ensure we send them in the right order. We currently
|
||||
// do that based on the event's timestamp.
|
||||
if r.backwardOrdering {
|
||||
sort.SliceStable(events, func(i int, j int) bool {
|
||||
// Backward ordering is antichronological (latest event to oldest
|
||||
// one).
|
||||
return sortEvents(&(events[j]), &(events[i]))
|
||||
})
|
||||
} else {
|
||||
sort.SliceStable(events, func(i int, j int) bool {
|
||||
// Forward ordering is chronological (oldest event to latest one).
|
||||
return sortEvents(&(events[i]), &(events[j]))
|
||||
})
|
||||
}
|
||||
|
||||
// Convert all of the events into client events.
|
||||
clientEvents = gomatrixserverlib.ToClientEvents(events, gomatrixserverlib.FormatAll)
|
||||
// Get the position of the first and the last event in the room's topology.
|
||||
// This position is currently determined by the event's depth, so we could
|
||||
// also use it instead of retrieving from the database. However, if we ever
|
||||
// change the way topological positions are defined (as depth isn't the most
|
||||
// reliable way to define it), it would be easier and less troublesome to
|
||||
// only have to change it in one place, i.e. the database.
|
||||
startPos, err := r.db.EventPositionInTopology(
|
||||
r.ctx, events[0].EventID(),
|
||||
)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
endPos, err := r.db.EventPositionInTopology(
|
||||
r.ctx, events[len(events)-1].EventID(),
|
||||
)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Generate pagination tokens to send to the client using the positions
|
||||
// retrieved previously.
|
||||
start = types.NewPaginationTokenFromTypeAndPosition(
|
||||
types.PaginationTokenTypeTopology, startPos, 0,
|
||||
)
|
||||
end = types.NewPaginationTokenFromTypeAndPosition(
|
||||
types.PaginationTokenTypeTopology, endPos, 0,
|
||||
)
|
||||
|
||||
if r.backwardOrdering {
|
||||
// A stream/topological position is a cursor located between two events.
|
||||
// While they are identified in the code by the event on their right (if
|
||||
// we consider a left to right chronological order), tokens need to refer
|
||||
// to them by the event on their left, therefore we need to decrement the
|
||||
// end position we send in the response if we're going backward.
|
||||
end.PDUPosition--
|
||||
}
|
||||
|
||||
// The lowest token value is 1, therefore we need to manually set it to that
|
||||
// value if we're below it.
|
||||
if end.PDUPosition < types.StreamPosition(1) {
|
||||
end.PDUPosition = types.StreamPosition(1)
|
||||
}
|
||||
|
||||
return clientEvents, start, end, err
|
||||
}
|
||||
|
||||
// handleEmptyEventsSlice handles the case where the initial request to the
|
||||
// database returned an empty slice of events. It does so by checking whether
|
||||
// the set is empty because we've reached a backward extremity, and if that is
|
||||
// the case, by retrieving as much events as requested by backfilling from
|
||||
// another homeserver.
|
||||
// Returns an error if there was an issue talking with the database or
|
||||
// backfilling.
|
||||
func (r *messagesReq) handleEmptyEventsSlice() (
|
||||
events []gomatrixserverlib.Event, err error,
|
||||
) {
|
||||
backwardExtremities, err := r.db.BackwardExtremitiesForRoom(r.ctx, r.roomID)
|
||||
|
||||
// Check if we have backward extremities for this room.
|
||||
if len(backwardExtremities) > 0 {
|
||||
// If so, retrieve as much events as needed through backfilling.
|
||||
events, err = r.backfill(backwardExtremities, r.limit)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// If not, it means the slice was empty because we reached the room's
|
||||
// creation, so return an empty slice.
|
||||
events = []gomatrixserverlib.Event{}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// handleNonEmptyEventsSlice handles the case where the initial request to the
|
||||
// database returned a non-empty slice of events. It does so by checking whether
|
||||
// events are missing from the expected result, and retrieve missing events
|
||||
// through backfilling if needed.
|
||||
// Returns an error if there was an issue while backfilling.
|
||||
func (r *messagesReq) handleNonEmptyEventsSlice(streamEvents []types.StreamEvent) (
|
||||
events []gomatrixserverlib.Event, err error,
|
||||
) {
|
||||
// Check if we have enough events.
|
||||
isSetLargeEnough := true
|
||||
if len(streamEvents) < r.limit {
|
||||
if r.backwardOrdering {
|
||||
if r.wasToProvided {
|
||||
// The condition in the SQL query is a strict "greater than" so
|
||||
// we need to check against to-1.
|
||||
streamPos := types.StreamPosition(streamEvents[len(streamEvents)-1].StreamPosition)
|
||||
isSetLargeEnough = (r.to.PDUPosition-1 == streamPos)
|
||||
}
|
||||
} else {
|
||||
streamPos := types.StreamPosition(streamEvents[0].StreamPosition)
|
||||
isSetLargeEnough = (r.from.PDUPosition-1 == streamPos)
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the slice contains a backward extremity.
|
||||
backwardExtremities, err := r.db.BackwardExtremitiesForRoom(r.ctx, r.roomID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Backfill is needed if we've reached a backward extremity and need more
|
||||
// events. It's only needed if the direction is backward.
|
||||
if len(backwardExtremities) > 0 && !isSetLargeEnough && r.backwardOrdering {
|
||||
var pdus []gomatrixserverlib.Event
|
||||
// Only ask the remote server for enough events to reach the limit.
|
||||
pdus, err = r.backfill(backwardExtremities, r.limit-len(streamEvents))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Append the PDUs to the list to send back to the client.
|
||||
events = append(events, pdus...)
|
||||
}
|
||||
|
||||
// Append the events ve previously retrieved locally.
|
||||
events = append(events, r.db.StreamEventsToEvents(nil, streamEvents)...)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// containsBackwardExtremity checks if a slice of StreamEvent contains a
|
||||
// backward extremity. It does so by selecting the earliest event in the slice
|
||||
// and by checking the presence in the database of all of its parent events, and
|
||||
// considers the event itself a backward extremity if at least one of the parent
|
||||
// events doesn't exist in the database.
|
||||
// Returns an error if there was an issue with talking to the database.
|
||||
//
|
||||
// This function is unused but currently set to nolint for now until we are
|
||||
// absolutely sure that the changes in matrix-org/dendrite#847 are behaving
|
||||
// properly.
|
||||
// nolint:unused
|
||||
func (r *messagesReq) containsBackwardExtremity(events []types.StreamEvent) (bool, error) {
|
||||
// Select the earliest retrieved event.
|
||||
var ev *types.StreamEvent
|
||||
if r.backwardOrdering {
|
||||
ev = &(events[len(events)-1])
|
||||
} else {
|
||||
ev = &(events[0])
|
||||
}
|
||||
// Get the earliest retrieved event's parents.
|
||||
prevIDs := ev.PrevEventIDs()
|
||||
prevs, err := r.db.Events(r.ctx, prevIDs)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
// Check if we have all of the events we requested. If not, it means we've
|
||||
// reached a backward extremity.
|
||||
var eventInDB bool
|
||||
var id string
|
||||
// Iterate over the IDs we used in the request.
|
||||
for _, id = range prevIDs {
|
||||
eventInDB = false
|
||||
// Iterate over the events we got in response.
|
||||
for _, ev := range prevs {
|
||||
if ev.EventID() == id {
|
||||
eventInDB = true
|
||||
}
|
||||
}
|
||||
// One occurrence of one the event's parents not being present in the
|
||||
// database is enough to say that the event is a backward extremity.
|
||||
if !eventInDB {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// backfill performs a backfill request over the federation on another
|
||||
// homeserver in the room.
|
||||
// See: https://matrix.org/docs/spec/server_server/latest#get-matrix-federation-v1-backfill-roomid
|
||||
// It also stores the PDUs retrieved from the remote homeserver's response to
|
||||
// the database.
|
||||
// Returns with an empty string if the remote homeserver didn't return with any
|
||||
// event, or if there is no remote homeserver to contact.
|
||||
// Returns an error if there was an issue with retrieving the list of servers in
|
||||
// the room or sending the request.
|
||||
func (r *messagesReq) backfill(fromEventIDs []string, limit int) ([]gomatrixserverlib.Event, error) {
|
||||
// Query the list of servers in the room when one of the backward extremities
|
||||
// was sent.
|
||||
var serversResponse api.QueryServersInRoomAtEventResponse
|
||||
serversRequest := api.QueryServersInRoomAtEventRequest{
|
||||
RoomID: r.roomID,
|
||||
EventID: fromEventIDs[0],
|
||||
}
|
||||
if err := r.queryAPI.QueryServersInRoomAtEvent(r.ctx, &serversRequest, &serversResponse); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Use the first server from the response, except if that server is us.
|
||||
// In that case, use the second one if the roomserver responded with
|
||||
// enough servers. If not, use an empty string to prevent the backfill
|
||||
// from happening as there's no server to direct the request towards.
|
||||
// TODO: Be smarter at selecting the server to direct the request
|
||||
// towards.
|
||||
srvToBackfillFrom := serversResponse.Servers[0]
|
||||
if srvToBackfillFrom == r.cfg.Matrix.ServerName {
|
||||
if len(serversResponse.Servers) > 1 {
|
||||
srvToBackfillFrom = serversResponse.Servers[1]
|
||||
} else {
|
||||
srvToBackfillFrom = gomatrixserverlib.ServerName("")
|
||||
log.Warn("Not enough servers to backfill from")
|
||||
}
|
||||
}
|
||||
|
||||
pdus := make([]gomatrixserverlib.Event, 0)
|
||||
|
||||
// If the roomserver responded with at least one server that isn't us,
|
||||
// send it a request for backfill.
|
||||
if len(srvToBackfillFrom) > 0 {
|
||||
txn, err := r.federation.Backfill(
|
||||
r.ctx, srvToBackfillFrom, r.roomID, limit, fromEventIDs,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pdus = txn.PDUs
|
||||
|
||||
// Store the events in the database, while marking them as unfit to show
|
||||
// up in responses to sync requests.
|
||||
for _, pdu := range pdus {
|
||||
if _, err = r.db.WriteEvent(
|
||||
r.ctx, &pdu, []gomatrixserverlib.Event{}, []string{}, []string{},
|
||||
nil, true,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return pdus, nil
|
||||
}
|
||||
|
||||
// setToDefault returns the default value for the "to" query parameter of a
|
||||
// request to /messages if not provided. It defaults to either the earliest
|
||||
// topological position (if we're going backward) or to the latest one (if we're
|
||||
// going forward).
|
||||
// Returns an error if there was an issue with retrieving the latest position
|
||||
// from the database
|
||||
func setToDefault(
|
||||
ctx context.Context, db storage.Database, backwardOrdering bool,
|
||||
roomID string,
|
||||
) (to *types.PaginationToken, err error) {
|
||||
if backwardOrdering {
|
||||
to = types.NewPaginationTokenFromTypeAndPosition(types.PaginationTokenTypeTopology, 1, 0)
|
||||
} else {
|
||||
var pos types.StreamPosition
|
||||
pos, err = db.MaxTopologicalPosition(ctx, roomID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
to = types.NewPaginationTokenFromTypeAndPosition(types.PaginationTokenTypeTopology, pos, 0)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// sortEvents is a function to give to sort.SliceStable, and compares the
|
||||
// timestamp of two Matrix events.
|
||||
// Returns true if the first event happened before the second one, false
|
||||
// otherwise.
|
||||
func sortEvents(e1 *gomatrixserverlib.Event, e2 *gomatrixserverlib.Event) bool {
|
||||
t := e1.OriginServerTS().Time()
|
||||
return e2.OriginServerTS().Time().After(t)
|
||||
}
|
|
@ -22,8 +22,11 @@ import (
|
|||
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
|
||||
"github.com/matrix-org/dendrite/clientapi/auth/storage/devices"
|
||||
"github.com/matrix-org/dendrite/common"
|
||||
"github.com/matrix-org/dendrite/common/config"
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/syncapi/storage"
|
||||
"github.com/matrix-org/dendrite/syncapi/sync"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/util"
|
||||
)
|
||||
|
||||
|
@ -34,7 +37,12 @@ const pathPrefixR0 = "/_matrix/client/r0"
|
|||
// Due to Setup being used to call many other functions, a gocyclo nolint is
|
||||
// applied:
|
||||
// nolint: gocyclo
|
||||
func Setup(apiMux *mux.Router, srp *sync.RequestPool, syncDB storage.Database, deviceDB *devices.Database) {
|
||||
func Setup(
|
||||
apiMux *mux.Router, srp *sync.RequestPool, syncDB storage.Database,
|
||||
deviceDB *devices.Database, federation *gomatrixserverlib.FederationClient,
|
||||
queryAPI api.RoomserverQueryAPI,
|
||||
cfg *config.Dendrite,
|
||||
) {
|
||||
r0mux := apiMux.PathPrefix(pathPrefixR0).Subrouter()
|
||||
|
||||
authData := auth.Data{
|
||||
|
@ -71,4 +79,12 @@ func Setup(apiMux *mux.Router, srp *sync.RequestPool, syncDB storage.Database, d
|
|||
}
|
||||
return OnIncomingStateTypeRequest(req, syncDB, vars["roomID"], vars["type"], vars["stateKey"])
|
||||
})).Methods(http.MethodGet, http.MethodOptions)
|
||||
|
||||
r0mux.Handle("/rooms/{roomID}/messages", common.MakeAuthAPI("room_messages", authData, func(req *http.Request, device *authtypes.Device) util.JSONResponse {
|
||||
vars, err := common.URLDecodeMapValues(mux.Vars(req))
|
||||
if err != nil {
|
||||
return util.ErrorResponse(err)
|
||||
}
|
||||
return OnIncomingMessagesRequest(req, syncDB, vars["roomID"], federation, queryAPI, cfg)
|
||||
})).Methods(http.MethodGet, http.MethodOptions)
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||
"github.com/matrix-org/dendrite/syncapi/storage"
|
||||
"github.com/matrix-org/dendrite/syncapi/types"
|
||||
"github.com/matrix-org/gomatrix"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
@ -44,7 +45,7 @@ func OnIncomingStateRequest(req *http.Request, db storage.Database, roomID strin
|
|||
// TODO(#287): Auth request and handle the case where the user has left (where
|
||||
// we should return the state at the poin they left)
|
||||
|
||||
stateFilterPart := gomatrixserverlib.DefaultFilterPart()
|
||||
stateFilterPart := gomatrix.DefaultFilterPart()
|
||||
// TODO: stateFilterPart should not limit the number of state events (or only limits abusive number of events)
|
||||
|
||||
stateEvents, err := db.GetStateEventsForRoom(req.Context(), roomID, &stateFilterPart)
|
||||
|
|
|
@ -21,7 +21,8 @@ import (
|
|||
|
||||
"github.com/lib/pq"
|
||||
"github.com/matrix-org/dendrite/common"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/dendrite/syncapi/types"
|
||||
"github.com/matrix-org/gomatrix"
|
||||
)
|
||||
|
||||
const accountDataSchema = `
|
||||
|
@ -89,7 +90,7 @@ func (s *accountDataStatements) prepare(db *sql.DB) (err error) {
|
|||
func (s *accountDataStatements) insertAccountData(
|
||||
ctx context.Context,
|
||||
userID, roomID, dataType string,
|
||||
) (pos int64, err error) {
|
||||
) (pos types.StreamPosition, err error) {
|
||||
err = s.insertAccountDataStmt.QueryRowContext(ctx, userID, roomID, dataType).Scan(&pos)
|
||||
return
|
||||
}
|
||||
|
@ -97,8 +98,8 @@ func (s *accountDataStatements) insertAccountData(
|
|||
func (s *accountDataStatements) selectAccountDataInRange(
|
||||
ctx context.Context,
|
||||
userID string,
|
||||
oldPos, newPos int64,
|
||||
accountDataFilterPart *gomatrixserverlib.FilterPart,
|
||||
oldPos, newPos types.StreamPosition,
|
||||
accountDataFilterPart *gomatrix.FilterPart,
|
||||
) (data map[string][]string, err error) {
|
||||
data = make(map[string][]string)
|
||||
|
||||
|
|
119
syncapi/storage/postgres/backward_extremities_table.go
Normal file
119
syncapi/storage/postgres/backward_extremities_table.go
Normal file
|
@ -0,0 +1,119 @@
|
|||
// Copyright 2018 New Vector Ltd
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
)
|
||||
|
||||
const backwardExtremitiesSchema = `
|
||||
-- Stores output room events received from the roomserver.
|
||||
CREATE TABLE IF NOT EXISTS syncapi_backward_extremities (
|
||||
-- The 'room_id' key for the event.
|
||||
room_id TEXT NOT NULL,
|
||||
-- The event ID for the event.
|
||||
event_id TEXT NOT NULL,
|
||||
|
||||
PRIMARY KEY(room_id, event_id)
|
||||
);
|
||||
`
|
||||
|
||||
const insertBackwardExtremitySQL = "" +
|
||||
"INSERT INTO syncapi_backward_extremities (room_id, event_id)" +
|
||||
" VALUES ($1, $2)" +
|
||||
" ON CONFLICT DO NOTHING"
|
||||
|
||||
const selectBackwardExtremitiesForRoomSQL = "" +
|
||||
"SELECT event_id FROM syncapi_backward_extremities WHERE room_id = $1"
|
||||
|
||||
const isBackwardExtremitySQL = "" +
|
||||
"SELECT EXISTS (" +
|
||||
" SELECT TRUE FROM syncapi_backward_extremities" +
|
||||
" WHERE room_id = $1 AND event_id = $2" +
|
||||
")"
|
||||
|
||||
const deleteBackwardExtremitySQL = "" +
|
||||
"DELETE FROM syncapi_backward_extremities WHERE room_id = $1 AND event_id = $2"
|
||||
|
||||
type backwardExtremitiesStatements struct {
|
||||
insertBackwardExtremityStmt *sql.Stmt
|
||||
selectBackwardExtremitiesForRoomStmt *sql.Stmt
|
||||
isBackwardExtremityStmt *sql.Stmt
|
||||
deleteBackwardExtremityStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func (s *backwardExtremitiesStatements) prepare(db *sql.DB) (err error) {
|
||||
_, err = db.Exec(backwardExtremitiesSchema)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if s.insertBackwardExtremityStmt, err = db.Prepare(insertBackwardExtremitySQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.selectBackwardExtremitiesForRoomStmt, err = db.Prepare(selectBackwardExtremitiesForRoomSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.isBackwardExtremityStmt, err = db.Prepare(isBackwardExtremitySQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.deleteBackwardExtremityStmt, err = db.Prepare(deleteBackwardExtremitySQL); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *backwardExtremitiesStatements) insertsBackwardExtremity(
|
||||
ctx context.Context, roomID, eventID string,
|
||||
) (err error) {
|
||||
_, err = s.insertBackwardExtremityStmt.ExecContext(ctx, roomID, eventID)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *backwardExtremitiesStatements) selectBackwardExtremitiesForRoom(
|
||||
ctx context.Context, roomID string,
|
||||
) (eventIDs []string, err error) {
|
||||
eventIDs = make([]string, 0)
|
||||
|
||||
rows, err := s.selectBackwardExtremitiesForRoomStmt.QueryContext(ctx, roomID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for rows.Next() {
|
||||
var eID string
|
||||
if err = rows.Scan(&eID); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
eventIDs = append(eventIDs, eID)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *backwardExtremitiesStatements) isBackwardExtremity(
|
||||
ctx context.Context, roomID, eventID string,
|
||||
) (isBE bool, err error) {
|
||||
err = s.isBackwardExtremityStmt.QueryRowContext(ctx, roomID, eventID).Scan(&isBE)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *backwardExtremitiesStatements) deleteBackwardExtremity(
|
||||
ctx context.Context, roomID, eventID string,
|
||||
) (err error) {
|
||||
_, err = s.insertBackwardExtremityStmt.ExecContext(ctx, roomID, eventID)
|
||||
return
|
||||
}
|
|
@ -22,6 +22,8 @@ import (
|
|||
|
||||
"github.com/lib/pq"
|
||||
"github.com/matrix-org/dendrite/common"
|
||||
"github.com/matrix-org/dendrite/syncapi/types"
|
||||
"github.com/matrix-org/gomatrix"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
)
|
||||
|
||||
|
@ -86,10 +88,10 @@ const selectStateEventSQL = "" +
|
|||
|
||||
const selectEventsWithEventIDsSQL = "" +
|
||||
// TODO: The session_id and transaction_id blanks are here because otherwise
|
||||
// the rowsToStreamEvents expects there to be exactly four columns. We need to
|
||||
// the rowsToStreamEvents expects there to be exactly five columns. We need to
|
||||
// figure out if these really need to be in the DB, and if so, we need a
|
||||
// better permanent fix for this. - neilalexander, 2 Jan 2020
|
||||
"SELECT added_at, event_json, 0 AS session_id, '' AS transaction_id" +
|
||||
"SELECT added_at, event_json, 0 AS session_id, false AS exclude_from_sync, '' AS transaction_id" +
|
||||
" FROM syncapi_current_room_state WHERE event_id = ANY($1)"
|
||||
|
||||
type currentRoomStateStatements struct {
|
||||
|
@ -183,7 +185,7 @@ func (s *currentRoomStateStatements) selectRoomIDsWithMembership(
|
|||
// CurrentState returns all the current state events for the given room.
|
||||
func (s *currentRoomStateStatements) selectCurrentState(
|
||||
ctx context.Context, txn *sql.Tx, roomID string,
|
||||
stateFilterPart *gomatrixserverlib.FilterPart,
|
||||
stateFilterPart *gomatrix.FilterPart,
|
||||
) ([]gomatrixserverlib.Event, error) {
|
||||
stmt := common.TxStmt(txn, s.selectCurrentStateStmt)
|
||||
rows, err := stmt.QueryContext(ctx, roomID,
|
||||
|
@ -212,7 +214,7 @@ func (s *currentRoomStateStatements) deleteRoomStateByEventID(
|
|||
|
||||
func (s *currentRoomStateStatements) upsertRoomState(
|
||||
ctx context.Context, txn *sql.Tx,
|
||||
event gomatrixserverlib.Event, membership *string, addedAt int64,
|
||||
event gomatrixserverlib.Event, membership *string, addedAt types.StreamPosition,
|
||||
) error {
|
||||
// Parse content as JSON and search for an "url" key
|
||||
containsURL := false
|
||||
|
@ -241,7 +243,7 @@ func (s *currentRoomStateStatements) upsertRoomState(
|
|||
|
||||
func (s *currentRoomStateStatements) selectEventsWithEventIDs(
|
||||
ctx context.Context, txn *sql.Tx, eventIDs []string,
|
||||
) ([]streamEvent, error) {
|
||||
) ([]types.StreamEvent, error) {
|
||||
stmt := common.TxStmt(txn, s.selectEventsWithEventIDsStmt)
|
||||
rows, err := stmt.QueryContext(ctx, pq.StringArray(eventIDs))
|
||||
if err != nil {
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"database/sql"
|
||||
|
||||
"github.com/matrix-org/dendrite/common"
|
||||
"github.com/matrix-org/dendrite/syncapi/types"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
)
|
||||
|
||||
|
@ -86,7 +87,7 @@ func (s *inviteEventsStatements) prepare(db *sql.DB) (err error) {
|
|||
|
||||
func (s *inviteEventsStatements) insertInviteEvent(
|
||||
ctx context.Context, inviteEvent gomatrixserverlib.Event,
|
||||
) (streamPos int64, err error) {
|
||||
) (streamPos types.StreamPosition, err error) {
|
||||
err = s.insertInviteEventStmt.QueryRowContext(
|
||||
ctx,
|
||||
inviteEvent.RoomID(),
|
||||
|
@ -107,7 +108,7 @@ func (s *inviteEventsStatements) deleteInviteEvent(
|
|||
// selectInviteEventsInRange returns a map of room ID to invite event for the
|
||||
// active invites for the target user ID in the supplied range.
|
||||
func (s *inviteEventsStatements) selectInviteEventsInRange(
|
||||
ctx context.Context, txn *sql.Tx, targetUserID string, startPos, endPos int64,
|
||||
ctx context.Context, txn *sql.Tx, targetUserID string, startPos, endPos types.StreamPosition,
|
||||
) (map[string]gomatrixserverlib.Event, error) {
|
||||
stmt := common.TxStmt(txn, s.selectInviteEventsInRangeStmt)
|
||||
rows, err := stmt.QueryContext(ctx, targetUserID, startPos, endPos)
|
||||
|
|
|
@ -22,6 +22,8 @@ import (
|
|||
"sort"
|
||||
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/syncapi/types"
|
||||
"github.com/matrix-org/gomatrix"
|
||||
|
||||
"github.com/lib/pq"
|
||||
"github.com/matrix-org/dendrite/common"
|
||||
|
@ -35,52 +37,69 @@ CREATE SEQUENCE IF NOT EXISTS syncapi_stream_id;
|
|||
|
||||
-- Stores output room events received from the roomserver.
|
||||
CREATE TABLE IF NOT EXISTS syncapi_output_room_events (
|
||||
-- An incrementing ID which denotes the position in the log that this event resides at.
|
||||
-- NB: 'serial' makes no guarantees to increment by 1 every time, only that it increments.
|
||||
-- This isn't a problem for us since we just want to order by this field.
|
||||
id BIGINT PRIMARY KEY DEFAULT nextval('syncapi_stream_id'),
|
||||
-- The event ID for the event
|
||||
event_id TEXT NOT NULL,
|
||||
-- The 'room_id' key for the event.
|
||||
room_id TEXT NOT NULL,
|
||||
-- The JSON for the event. Stored as TEXT because this should be valid UTF-8.
|
||||
event_json TEXT NOT NULL,
|
||||
-- The event type e.g 'm.room.member'.
|
||||
type TEXT NOT NULL,
|
||||
-- The 'sender' property of the event.
|
||||
sender TEXT NOT NULL,
|
||||
-- true if the event content contains a url key.
|
||||
contains_url BOOL NOT NULL,
|
||||
-- A list of event IDs which represent a delta of added/removed room state. This can be NULL
|
||||
-- if there is no delta.
|
||||
add_state_ids TEXT[],
|
||||
remove_state_ids TEXT[],
|
||||
session_id BIGINT, -- The client session that sent the event, if any
|
||||
transaction_id TEXT -- The transaction id used to send the event, if any
|
||||
-- An incrementing ID which denotes the position in the log that this event resides at.
|
||||
-- NB: 'serial' makes no guarantees to increment by 1 every time, only that it increments.
|
||||
-- This isn't a problem for us since we just want to order by this field.
|
||||
id BIGINT PRIMARY KEY DEFAULT nextval('syncapi_stream_id'),
|
||||
-- The event ID for the event
|
||||
event_id TEXT NOT NULL CONSTRAINT syncapi_event_id_idx UNIQUE,
|
||||
-- The 'room_id' key for the event.
|
||||
room_id TEXT NOT NULL,
|
||||
-- The JSON for the event. Stored as TEXT because this should be valid UTF-8.
|
||||
event_json TEXT NOT NULL,
|
||||
-- The event type e.g 'm.room.member'.
|
||||
type TEXT NOT NULL,
|
||||
-- The 'sender' property of the event.
|
||||
sender TEXT NOT NULL,
|
||||
-- true if the event content contains a url key.
|
||||
contains_url BOOL NOT NULL,
|
||||
-- A list of event IDs which represent a delta of added/removed room state. This can be NULL
|
||||
-- if there is no delta.
|
||||
add_state_ids TEXT[],
|
||||
remove_state_ids TEXT[],
|
||||
-- The client session that sent the event, if any
|
||||
session_id BIGINT,
|
||||
-- The transaction id used to send the event, if any
|
||||
transaction_id TEXT,
|
||||
-- Should the event be excluded from responses to /sync requests. Useful for
|
||||
-- events retrieved through backfilling that have a position in the stream
|
||||
-- that relates to the moment these were retrieved rather than the moment these
|
||||
-- were emitted.
|
||||
exclude_from_sync BOOL DEFAULT FALSE
|
||||
);
|
||||
-- for event selection
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS syncapi_event_id_idx ON syncapi_output_room_events(event_id);
|
||||
`
|
||||
|
||||
const insertEventSQL = "" +
|
||||
"INSERT INTO syncapi_output_room_events (" +
|
||||
"room_id, event_id, event_json, type, sender, contains_url, add_state_ids, remove_state_ids, session_id, transaction_id" +
|
||||
") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) RETURNING id"
|
||||
"room_id, event_id, event_json, type, sender, contains_url, add_state_ids, remove_state_ids, session_id, transaction_id, exclude_from_sync" +
|
||||
") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) " +
|
||||
"ON CONFLICT ON CONSTRAINT syncapi_event_id_idx DO UPDATE SET exclude_from_sync = $11 " +
|
||||
"RETURNING id"
|
||||
|
||||
const selectEventsSQL = "" +
|
||||
"SELECT id, event_json, session_id, transaction_id FROM syncapi_output_room_events WHERE event_id = ANY($1)"
|
||||
"SELECT id, event_json, session_id, exclude_from_sync, transaction_id FROM syncapi_output_room_events WHERE event_id = ANY($1)"
|
||||
|
||||
const selectRecentEventsSQL = "" +
|
||||
"SELECT id, event_json, session_id, transaction_id FROM syncapi_output_room_events" +
|
||||
"SELECT id, event_json, session_id, exclude_from_sync, transaction_id FROM syncapi_output_room_events" +
|
||||
" WHERE room_id = $1 AND id > $2 AND id <= $3" +
|
||||
" ORDER BY id DESC LIMIT $4"
|
||||
|
||||
const selectRecentEventsForSyncSQL = "" +
|
||||
"SELECT id, event_json, session_id, exclude_from_sync, transaction_id FROM syncapi_output_room_events" +
|
||||
" WHERE room_id = $1 AND id > $2 AND id <= $3 AND exclude_from_sync = FALSE" +
|
||||
" ORDER BY id DESC LIMIT $4"
|
||||
|
||||
const selectEarlyEventsSQL = "" +
|
||||
"SELECT id, event_json, session_id, exclude_from_sync, transaction_id FROM syncapi_output_room_events" +
|
||||
" WHERE room_id = $1 AND id > $2 AND id <= $3" +
|
||||
" ORDER BY id ASC LIMIT $4"
|
||||
|
||||
const selectMaxEventIDSQL = "" +
|
||||
"SELECT MAX(id) FROM syncapi_output_room_events"
|
||||
|
||||
// In order for us to apply the state updates correctly, rows need to be ordered in the order they were received (id).
|
||||
const selectStateInRangeSQL = "" +
|
||||
"SELECT id, event_json, add_state_ids, remove_state_ids" +
|
||||
"SELECT id, event_json, exclude_from_sync, add_state_ids, remove_state_ids" +
|
||||
" FROM syncapi_output_room_events" +
|
||||
" WHERE (id > $1 AND id <= $2) AND (add_state_ids IS NOT NULL OR remove_state_ids IS NOT NULL)" +
|
||||
" AND ( $3::text[] IS NULL OR sender = ANY($3) )" +
|
||||
|
@ -92,11 +111,13 @@ const selectStateInRangeSQL = "" +
|
|||
" LIMIT $8"
|
||||
|
||||
type outputRoomEventsStatements struct {
|
||||
insertEventStmt *sql.Stmt
|
||||
selectEventsStmt *sql.Stmt
|
||||
selectMaxEventIDStmt *sql.Stmt
|
||||
selectRecentEventsStmt *sql.Stmt
|
||||
selectStateInRangeStmt *sql.Stmt
|
||||
insertEventStmt *sql.Stmt
|
||||
selectEventsStmt *sql.Stmt
|
||||
selectMaxEventIDStmt *sql.Stmt
|
||||
selectRecentEventsStmt *sql.Stmt
|
||||
selectRecentEventsForSyncStmt *sql.Stmt
|
||||
selectEarlyEventsStmt *sql.Stmt
|
||||
selectStateInRangeStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func (s *outputRoomEventsStatements) prepare(db *sql.DB) (err error) {
|
||||
|
@ -116,6 +137,12 @@ func (s *outputRoomEventsStatements) prepare(db *sql.DB) (err error) {
|
|||
if s.selectRecentEventsStmt, err = db.Prepare(selectRecentEventsSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.selectRecentEventsForSyncStmt, err = db.Prepare(selectRecentEventsForSyncSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.selectEarlyEventsStmt, err = db.Prepare(selectEarlyEventsSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.selectStateInRangeStmt, err = db.Prepare(selectStateInRangeSQL); err != nil {
|
||||
return
|
||||
}
|
||||
|
@ -126,9 +153,9 @@ func (s *outputRoomEventsStatements) prepare(db *sql.DB) (err error) {
|
|||
// Results are bucketed based on the room ID. If the same state is overwritten multiple times between the
|
||||
// two positions, only the most recent state is returned.
|
||||
func (s *outputRoomEventsStatements) selectStateInRange(
|
||||
ctx context.Context, txn *sql.Tx, oldPos, newPos int64,
|
||||
stateFilterPart *gomatrixserverlib.FilterPart,
|
||||
) (map[string]map[string]bool, map[string]streamEvent, error) {
|
||||
ctx context.Context, txn *sql.Tx, oldPos, newPos types.StreamPosition,
|
||||
stateFilterPart *gomatrix.FilterPart,
|
||||
) (map[string]map[string]bool, map[string]types.StreamEvent, error) {
|
||||
stmt := common.TxStmt(txn, s.selectStateInRangeStmt)
|
||||
|
||||
rows, err := stmt.QueryContext(
|
||||
|
@ -148,19 +175,20 @@ func (s *outputRoomEventsStatements) selectStateInRange(
|
|||
// - For each room ID, build up an array of event IDs which represents cumulative adds/removes
|
||||
// For each room, map cumulative event IDs to events and return. This may need to a batch SELECT based on event ID
|
||||
// if they aren't in the event ID cache. We don't handle state deletion yet.
|
||||
eventIDToEvent := make(map[string]streamEvent)
|
||||
eventIDToEvent := make(map[string]types.StreamEvent)
|
||||
|
||||
// RoomID => A set (map[string]bool) of state event IDs which are between the two positions
|
||||
stateNeeded := make(map[string]map[string]bool)
|
||||
|
||||
for rows.Next() {
|
||||
var (
|
||||
streamPos int64
|
||||
eventBytes []byte
|
||||
addIDs pq.StringArray
|
||||
delIDs pq.StringArray
|
||||
streamPos types.StreamPosition
|
||||
eventBytes []byte
|
||||
excludeFromSync bool
|
||||
addIDs pq.StringArray
|
||||
delIDs pq.StringArray
|
||||
)
|
||||
if err := rows.Scan(&streamPos, &eventBytes, &addIDs, &delIDs); err != nil {
|
||||
if err := rows.Scan(&streamPos, &eventBytes, &excludeFromSync, &addIDs, &delIDs); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// Sanity check for deleted state and whine if we see it. We don't need to do anything
|
||||
|
@ -191,9 +219,10 @@ func (s *outputRoomEventsStatements) selectStateInRange(
|
|||
}
|
||||
stateNeeded[ev.RoomID()] = needSet
|
||||
|
||||
eventIDToEvent[ev.EventID()] = streamEvent{
|
||||
Event: ev,
|
||||
streamPosition: streamPos,
|
||||
eventIDToEvent[ev.EventID()] = types.StreamEvent{
|
||||
Event: ev,
|
||||
StreamPosition: streamPos,
|
||||
ExcludeFromSync: excludeFromSync,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -220,8 +249,8 @@ func (s *outputRoomEventsStatements) selectMaxEventID(
|
|||
func (s *outputRoomEventsStatements) insertEvent(
|
||||
ctx context.Context, txn *sql.Tx,
|
||||
event *gomatrixserverlib.Event, addState, removeState []string,
|
||||
transactionID *api.TransactionID,
|
||||
) (streamPos int64, err error) {
|
||||
transactionID *api.TransactionID, excludeFromSync bool,
|
||||
) (streamPos types.StreamPosition, err error) {
|
||||
var txnID *string
|
||||
var sessionID *int64
|
||||
if transactionID != nil {
|
||||
|
@ -250,16 +279,53 @@ func (s *outputRoomEventsStatements) insertEvent(
|
|||
pq.StringArray(removeState),
|
||||
sessionID,
|
||||
txnID,
|
||||
excludeFromSync,
|
||||
).Scan(&streamPos)
|
||||
return
|
||||
}
|
||||
|
||||
// RecentEventsInRoom returns the most recent events in the given room, up to a maximum of 'limit'.
|
||||
// selectRecentEvents returns the most recent events in the given room, up to a maximum of 'limit'.
|
||||
// If onlySyncEvents has a value of true, only returns the events that aren't marked as to exclude
|
||||
// from sync.
|
||||
func (s *outputRoomEventsStatements) selectRecentEvents(
|
||||
ctx context.Context, txn *sql.Tx,
|
||||
roomID string, fromPos, toPos int64, limit int,
|
||||
) ([]streamEvent, error) {
|
||||
stmt := common.TxStmt(txn, s.selectRecentEventsStmt)
|
||||
roomID string, fromPos, toPos types.StreamPosition, limit int,
|
||||
chronologicalOrder bool, onlySyncEvents bool,
|
||||
) ([]types.StreamEvent, error) {
|
||||
var stmt *sql.Stmt
|
||||
if onlySyncEvents {
|
||||
stmt = common.TxStmt(txn, s.selectRecentEventsForSyncStmt)
|
||||
} else {
|
||||
stmt = common.TxStmt(txn, s.selectRecentEventsStmt)
|
||||
}
|
||||
|
||||
rows, err := stmt.QueryContext(ctx, roomID, fromPos, toPos, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close() // nolint: errcheck
|
||||
events, err := rowsToStreamEvents(rows)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if chronologicalOrder {
|
||||
// The events need to be returned from oldest to latest, which isn't
|
||||
// necessary the way the SQL query returns them, so a sort is necessary to
|
||||
// ensure the events are in the right order in the slice.
|
||||
sort.SliceStable(events, func(i int, j int) bool {
|
||||
return events[i].StreamPosition < events[j].StreamPosition
|
||||
})
|
||||
}
|
||||
return events, nil
|
||||
}
|
||||
|
||||
// selectEarlyEvents returns the earliest events in the given room, starting
|
||||
// from a given position, up to a maximum of 'limit'.
|
||||
func (s *outputRoomEventsStatements) selectEarlyEvents(
|
||||
ctx context.Context, txn *sql.Tx,
|
||||
roomID string, fromPos, toPos types.StreamPosition, limit int,
|
||||
) ([]types.StreamEvent, error) {
|
||||
stmt := common.TxStmt(txn, s.selectEarlyEventsStmt)
|
||||
rows, err := stmt.QueryContext(ctx, roomID, fromPos, toPos, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -273,16 +339,16 @@ func (s *outputRoomEventsStatements) selectRecentEvents(
|
|||
// necessarily the way the SQL query returns them, so a sort is necessary to
|
||||
// ensure the events are in the right order in the slice.
|
||||
sort.SliceStable(events, func(i int, j int) bool {
|
||||
return events[i].streamPosition < events[j].streamPosition
|
||||
return events[i].StreamPosition < events[j].StreamPosition
|
||||
})
|
||||
return events, nil
|
||||
}
|
||||
|
||||
// Events returns the events for the given event IDs. Returns an error if any one of the event IDs given are missing
|
||||
// from the database.
|
||||
// selectEvents returns the events for the given event IDs. If an event is
|
||||
// missing from the database, it will be omitted.
|
||||
func (s *outputRoomEventsStatements) selectEvents(
|
||||
ctx context.Context, txn *sql.Tx, eventIDs []string,
|
||||
) ([]streamEvent, error) {
|
||||
) ([]types.StreamEvent, error) {
|
||||
stmt := common.TxStmt(txn, s.selectEventsStmt)
|
||||
rows, err := stmt.QueryContext(ctx, pq.StringArray(eventIDs))
|
||||
if err != nil {
|
||||
|
@ -292,17 +358,18 @@ func (s *outputRoomEventsStatements) selectEvents(
|
|||
return rowsToStreamEvents(rows)
|
||||
}
|
||||
|
||||
func rowsToStreamEvents(rows *sql.Rows) ([]streamEvent, error) {
|
||||
var result []streamEvent
|
||||
func rowsToStreamEvents(rows *sql.Rows) ([]types.StreamEvent, error) {
|
||||
var result []types.StreamEvent
|
||||
for rows.Next() {
|
||||
var (
|
||||
streamPos int64
|
||||
eventBytes []byte
|
||||
sessionID *int64
|
||||
txnID *string
|
||||
transactionID *api.TransactionID
|
||||
streamPos types.StreamPosition
|
||||
eventBytes []byte
|
||||
excludeFromSync bool
|
||||
sessionID *int64
|
||||
txnID *string
|
||||
transactionID *api.TransactionID
|
||||
)
|
||||
if err := rows.Scan(&streamPos, &eventBytes, &sessionID, &txnID); err != nil {
|
||||
if err := rows.Scan(&streamPos, &eventBytes, &sessionID, &excludeFromSync, &txnID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO: Handle redacted events
|
||||
|
@ -318,10 +385,11 @@ func rowsToStreamEvents(rows *sql.Rows) ([]streamEvent, error) {
|
|||
}
|
||||
}
|
||||
|
||||
result = append(result, streamEvent{
|
||||
Event: ev,
|
||||
streamPosition: streamPos,
|
||||
transactionID: transactionID,
|
||||
result = append(result, types.StreamEvent{
|
||||
Event: ev,
|
||||
StreamPosition: streamPos,
|
||||
TransactionID: transactionID,
|
||||
ExcludeFromSync: excludeFromSync,
|
||||
})
|
||||
}
|
||||
return result, nil
|
||||
|
|
188
syncapi/storage/postgres/output_room_events_topology_table.go
Normal file
188
syncapi/storage/postgres/output_room_events_topology_table.go
Normal file
|
@ -0,0 +1,188 @@
|
|||
// Copyright 2018 New Vector Ltd
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/matrix-org/dendrite/syncapi/types"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
)
|
||||
|
||||
const outputRoomEventsTopologySchema = `
|
||||
-- Stores output room events received from the roomserver.
|
||||
CREATE TABLE IF NOT EXISTS syncapi_output_room_events_topology (
|
||||
-- The event ID for the event.
|
||||
event_id TEXT PRIMARY KEY,
|
||||
-- The place of the event in the room's topology. This can usually be determined
|
||||
-- from the event's depth.
|
||||
topological_position BIGINT NOT NULL,
|
||||
-- The 'room_id' key for the event.
|
||||
room_id TEXT NOT NULL
|
||||
);
|
||||
-- The topological order will be used in events selection and ordering
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS syncapi_event_topological_position_idx ON syncapi_output_room_events_topology(topological_position, room_id);
|
||||
`
|
||||
|
||||
const insertEventInTopologySQL = "" +
|
||||
"INSERT INTO syncapi_output_room_events_topology (event_id, topological_position, room_id)" +
|
||||
" VALUES ($1, $2, $3)" +
|
||||
" ON CONFLICT (topological_position, room_id) DO UPDATE SET event_id = $1"
|
||||
|
||||
const selectEventIDsInRangeASCSQL = "" +
|
||||
"SELECT event_id FROM syncapi_output_room_events_topology" +
|
||||
" WHERE room_id = $1 AND topological_position > $2 AND topological_position <= $3" +
|
||||
" ORDER BY topological_position ASC LIMIT $4"
|
||||
|
||||
const selectEventIDsInRangeDESCSQL = "" +
|
||||
"SELECT event_id FROM syncapi_output_room_events_topology" +
|
||||
" WHERE room_id = $1 AND topological_position > $2 AND topological_position <= $3" +
|
||||
" ORDER BY topological_position DESC LIMIT $4"
|
||||
|
||||
const selectPositionInTopologySQL = "" +
|
||||
"SELECT topological_position FROM syncapi_output_room_events_topology" +
|
||||
" WHERE event_id = $1"
|
||||
|
||||
const selectMaxPositionInTopologySQL = "" +
|
||||
"SELECT MAX(topological_position) FROM syncapi_output_room_events_topology" +
|
||||
" WHERE room_id = $1"
|
||||
|
||||
const selectEventIDsFromPositionSQL = "" +
|
||||
"SELECT event_id FROM syncapi_output_room_events_topology" +
|
||||
" WHERE room_id = $1 AND topological_position = $2"
|
||||
|
||||
type outputRoomEventsTopologyStatements struct {
|
||||
insertEventInTopologyStmt *sql.Stmt
|
||||
selectEventIDsInRangeASCStmt *sql.Stmt
|
||||
selectEventIDsInRangeDESCStmt *sql.Stmt
|
||||
selectPositionInTopologyStmt *sql.Stmt
|
||||
selectMaxPositionInTopologyStmt *sql.Stmt
|
||||
selectEventIDsFromPositionStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func (s *outputRoomEventsTopologyStatements) prepare(db *sql.DB) (err error) {
|
||||
_, err = db.Exec(outputRoomEventsTopologySchema)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if s.insertEventInTopologyStmt, err = db.Prepare(insertEventInTopologySQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.selectEventIDsInRangeASCStmt, err = db.Prepare(selectEventIDsInRangeASCSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.selectEventIDsInRangeDESCStmt, err = db.Prepare(selectEventIDsInRangeDESCSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.selectPositionInTopologyStmt, err = db.Prepare(selectPositionInTopologySQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.selectMaxPositionInTopologyStmt, err = db.Prepare(selectMaxPositionInTopologySQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.selectEventIDsFromPositionStmt, err = db.Prepare(selectEventIDsFromPositionSQL); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// insertEventInTopology inserts the given event in the room's topology, based
|
||||
// on the event's depth.
|
||||
func (s *outputRoomEventsTopologyStatements) insertEventInTopology(
|
||||
ctx context.Context, event *gomatrixserverlib.Event,
|
||||
) (err error) {
|
||||
_, err = s.insertEventInTopologyStmt.ExecContext(
|
||||
ctx, event.EventID(), event.Depth(), event.RoomID(),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
// selectEventIDsInRange selects the IDs of events which positions are within a
|
||||
// given range in a given room's topological order.
|
||||
// Returns an empty slice if no events match the given range.
|
||||
func (s *outputRoomEventsTopologyStatements) selectEventIDsInRange(
|
||||
ctx context.Context, roomID string, fromPos, toPos types.StreamPosition,
|
||||
limit int, chronologicalOrder bool,
|
||||
) (eventIDs []string, err error) {
|
||||
// Decide on the selection's order according to whether chronological order
|
||||
// is requested or not.
|
||||
var stmt *sql.Stmt
|
||||
if chronologicalOrder {
|
||||
stmt = s.selectEventIDsInRangeASCStmt
|
||||
} else {
|
||||
stmt = s.selectEventIDsInRangeDESCStmt
|
||||
}
|
||||
|
||||
// Query the event IDs.
|
||||
rows, err := stmt.QueryContext(ctx, roomID, fromPos, toPos, limit)
|
||||
if err == sql.ErrNoRows {
|
||||
// If no event matched the request, return an empty slice.
|
||||
return []string{}, nil
|
||||
} else if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Return the IDs.
|
||||
var eventID string
|
||||
for rows.Next() {
|
||||
if err = rows.Scan(&eventID); err != nil {
|
||||
return
|
||||
}
|
||||
eventIDs = append(eventIDs, eventID)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// selectPositionInTopology returns the position of a given event in the
|
||||
// topology of the room it belongs to.
|
||||
func (s *outputRoomEventsTopologyStatements) selectPositionInTopology(
|
||||
ctx context.Context, eventID string,
|
||||
) (pos types.StreamPosition, err error) {
|
||||
err = s.selectPositionInTopologyStmt.QueryRowContext(ctx, eventID).Scan(&pos)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *outputRoomEventsTopologyStatements) selectMaxPositionInTopology(
|
||||
ctx context.Context, roomID string,
|
||||
) (pos types.StreamPosition, err error) {
|
||||
err = s.selectMaxPositionInTopologyStmt.QueryRowContext(ctx, roomID).Scan(&pos)
|
||||
return
|
||||
}
|
||||
|
||||
// selectEventIDsFromPosition returns the IDs of all events that have a given
|
||||
// position in the topology of a given room.
|
||||
func (s *outputRoomEventsTopologyStatements) selectEventIDsFromPosition(
|
||||
ctx context.Context, roomID string, pos types.StreamPosition,
|
||||
) (eventIDs []string, err error) {
|
||||
// Query the event IDs.
|
||||
rows, err := s.selectEventIDsFromPositionStmt.QueryContext(ctx, roomID, pos)
|
||||
if err == sql.ErrNoRows {
|
||||
// If no event matched the request, return an empty slice.
|
||||
return []string{}, nil
|
||||
} else if err != nil {
|
||||
return
|
||||
}
|
||||
// Return the IDs.
|
||||
var eventID string
|
||||
for rows.Next() {
|
||||
if err = rows.Scan(&eventID); err != nil {
|
||||
return
|
||||
}
|
||||
eventIDs = append(eventIDs, eventID)
|
||||
}
|
||||
return
|
||||
}
|
|
@ -20,13 +20,13 @@ import (
|
|||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/gomatrix"
|
||||
|
||||
// Import the postgres database driver.
|
||||
_ "github.com/lib/pq"
|
||||
|
@ -42,29 +42,24 @@ type stateDelta struct {
|
|||
membership string
|
||||
// The PDU stream position of the latest membership event for this user, if applicable.
|
||||
// Can be 0 if there is no membership event in this delta.
|
||||
membershipPos int64
|
||||
membershipPos types.StreamPosition
|
||||
}
|
||||
|
||||
// Same as gomatrixserverlib.Event but also has the PDU stream position for this event.
|
||||
type streamEvent struct {
|
||||
gomatrixserverlib.Event
|
||||
streamPosition int64
|
||||
transactionID *api.TransactionID
|
||||
}
|
||||
|
||||
// SyncServerDatabase represents a sync server datasource which manages
|
||||
// SyncServerDatasource represents a sync server datasource which manages
|
||||
// both the database for PDUs and caches for EDUs.
|
||||
type SyncServerDatasource struct {
|
||||
db *sql.DB
|
||||
common.PartitionOffsetStatements
|
||||
accountData accountDataStatements
|
||||
events outputRoomEventsStatements
|
||||
roomstate currentRoomStateStatements
|
||||
invites inviteEventsStatements
|
||||
typingCache *cache.TypingCache
|
||||
accountData accountDataStatements
|
||||
events outputRoomEventsStatements
|
||||
roomstate currentRoomStateStatements
|
||||
invites inviteEventsStatements
|
||||
typingCache *cache.TypingCache
|
||||
topology outputRoomEventsTopologyStatements
|
||||
backwardExtremities backwardExtremitiesStatements
|
||||
}
|
||||
|
||||
// NewSyncServerDatabase creates a new sync server database
|
||||
// NewSyncServerDatasource creates a new sync server database
|
||||
func NewSyncServerDatasource(dbDataSourceName string) (*SyncServerDatasource, error) {
|
||||
var d SyncServerDatasource
|
||||
var err error
|
||||
|
@ -86,6 +81,12 @@ func NewSyncServerDatasource(dbDataSourceName string) (*SyncServerDatasource, er
|
|||
if err := d.invites.prepare(d.db); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := d.topology.prepare(d.db); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := d.backwardExtremities.prepare(d.db); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.typingCache = cache.NewTypingCache()
|
||||
return &d, nil
|
||||
}
|
||||
|
@ -108,7 +109,46 @@ func (d *SyncServerDatasource) Events(ctx context.Context, eventIDs []string) ([
|
|||
|
||||
// We don't include a device here as we only include transaction IDs in
|
||||
// incremental syncs.
|
||||
return streamEventsToEvents(nil, streamEvents), nil
|
||||
return d.StreamEventsToEvents(nil, streamEvents), nil
|
||||
}
|
||||
|
||||
func (d *SyncServerDatasource) handleBackwardExtremities(ctx context.Context, ev *gomatrixserverlib.Event) error {
|
||||
// If the event is already known as a backward extremity, don't consider
|
||||
// it as such anymore now that we have it.
|
||||
isBackwardExtremity, err := d.backwardExtremities.isBackwardExtremity(ctx, ev.RoomID(), ev.EventID())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isBackwardExtremity {
|
||||
if err = d.backwardExtremities.deleteBackwardExtremity(ctx, ev.RoomID(), ev.EventID()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Check if we have all of the event's previous events. If an event is
|
||||
// missing, add it to the room's backward extremities.
|
||||
prevEvents, err := d.events.selectEvents(ctx, nil, ev.PrevEventIDs())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var found bool
|
||||
for _, eID := range ev.PrevEventIDs() {
|
||||
found = false
|
||||
for _, prevEv := range prevEvents {
|
||||
if eID == prevEv.EventID() {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
// If the event is missing, consider it a backward extremity.
|
||||
if !found {
|
||||
if err = d.backwardExtremities.insertsBackwardExtremity(ctx, ev.RoomID(), ev.EventID()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteEvent into the database. It is not safe to call this function from multiple goroutines, as it would create races
|
||||
|
@ -119,16 +159,26 @@ func (d *SyncServerDatasource) WriteEvent(
|
|||
ev *gomatrixserverlib.Event,
|
||||
addStateEvents []gomatrixserverlib.Event,
|
||||
addStateEventIDs, removeStateEventIDs []string,
|
||||
transactionID *api.TransactionID,
|
||||
) (pduPosition int64, returnErr error) {
|
||||
transactionID *api.TransactionID, excludeFromSync bool,
|
||||
) (pduPosition types.StreamPosition, returnErr error) {
|
||||
returnErr = common.WithTransaction(d.db, func(txn *sql.Tx) error {
|
||||
var err error
|
||||
pos, err := d.events.insertEvent(ctx, txn, ev, addStateEventIDs, removeStateEventIDs, transactionID)
|
||||
pos, err := d.events.insertEvent(
|
||||
ctx, txn, ev, addStateEventIDs, removeStateEventIDs, transactionID, excludeFromSync,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pduPosition = pos
|
||||
|
||||
if err = d.topology.insertEventInTopology(ctx, ev); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = d.handleBackwardExtremities(ctx, ev); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(addStateEvents) == 0 && len(removeStateEventIDs) == 0 {
|
||||
// Nothing to do, the event may have just been a message event.
|
||||
return nil
|
||||
|
@ -136,14 +186,15 @@ func (d *SyncServerDatasource) WriteEvent(
|
|||
|
||||
return d.updateRoomState(ctx, txn, removeStateEventIDs, addStateEvents, pduPosition)
|
||||
})
|
||||
return
|
||||
|
||||
return pduPosition, returnErr
|
||||
}
|
||||
|
||||
func (d *SyncServerDatasource) updateRoomState(
|
||||
ctx context.Context, txn *sql.Tx,
|
||||
removedEventIDs []string,
|
||||
addedEvents []gomatrixserverlib.Event,
|
||||
pduPosition int64,
|
||||
pduPosition types.StreamPosition,
|
||||
) error {
|
||||
// remove first, then add, as we do not ever delete state, but do replace state which is a remove followed by an add.
|
||||
for _, eventID := range removedEventIDs {
|
||||
|
@ -186,7 +237,7 @@ func (d *SyncServerDatasource) GetStateEvent(
|
|||
// Returns an empty slice if no state events could be found for this room.
|
||||
// Returns an error if there was an issue with the retrieval.
|
||||
func (d *SyncServerDatasource) GetStateEventsForRoom(
|
||||
ctx context.Context, roomID string, stateFilterPart *gomatrixserverlib.FilterPart,
|
||||
ctx context.Context, roomID string, stateFilterPart *gomatrix.FilterPart,
|
||||
) (stateEvents []gomatrixserverlib.Event, err error) {
|
||||
err = common.WithTransaction(d.db, func(txn *sql.Tx) error {
|
||||
stateEvents, err = d.roomstate.selectCurrentState(ctx, txn, roomID, stateFilterPart)
|
||||
|
@ -195,14 +246,141 @@ func (d *SyncServerDatasource) GetStateEventsForRoom(
|
|||
return
|
||||
}
|
||||
|
||||
// GetEventsInRange retrieves all of the events on a given ordering using the
|
||||
// given extremities and limit.
|
||||
func (d *SyncServerDatasource) GetEventsInRange(
|
||||
ctx context.Context,
|
||||
from, to *types.PaginationToken,
|
||||
roomID string, limit int,
|
||||
backwardOrdering bool,
|
||||
) (events []types.StreamEvent, err error) {
|
||||
// If the pagination token's type is types.PaginationTokenTypeTopology, the
|
||||
// events must be retrieved from the rooms' topology table rather than the
|
||||
// table contaning the syncapi server's whole stream of events.
|
||||
if from.Type == types.PaginationTokenTypeTopology {
|
||||
// Determine the backward and forward limit, i.e. the upper and lower
|
||||
// limits to the selection in the room's topology, from the direction.
|
||||
var backwardLimit, forwardLimit types.StreamPosition
|
||||
if backwardOrdering {
|
||||
// Backward ordering is antichronological (latest event to oldest
|
||||
// one).
|
||||
backwardLimit = to.PDUPosition
|
||||
forwardLimit = from.PDUPosition
|
||||
} else {
|
||||
// Forward ordering is chronological (oldest event to latest one).
|
||||
backwardLimit = from.PDUPosition
|
||||
forwardLimit = to.PDUPosition
|
||||
}
|
||||
|
||||
// Select the event IDs from the defined range.
|
||||
var eIDs []string
|
||||
eIDs, err = d.topology.selectEventIDsInRange(
|
||||
ctx, roomID, backwardLimit, forwardLimit, limit, !backwardOrdering,
|
||||
)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Retrieve the events' contents using their IDs.
|
||||
events, err = d.events.selectEvents(ctx, nil, eIDs)
|
||||
return
|
||||
}
|
||||
|
||||
// If the pagination token's type is types.PaginationTokenTypeStream, the
|
||||
// events must be retrieved from the table contaning the syncapi server's
|
||||
// whole stream of events.
|
||||
|
||||
if backwardOrdering {
|
||||
// When using backward ordering, we want the most recent events first.
|
||||
if events, err = d.events.selectRecentEvents(
|
||||
ctx, nil, roomID, to.PDUPosition, from.PDUPosition, limit, false, false,
|
||||
); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// When using forward ordering, we want the least recent events first.
|
||||
if events, err = d.events.selectEarlyEvents(
|
||||
ctx, nil, roomID, from.PDUPosition, to.PDUPosition, limit,
|
||||
); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// SyncPosition returns the latest positions for syncing.
|
||||
func (d *SyncServerDatasource) SyncPosition(ctx context.Context) (types.SyncPosition, error) {
|
||||
func (d *SyncServerDatasource) SyncPosition(ctx context.Context) (types.PaginationToken, error) {
|
||||
return d.syncPositionTx(ctx, nil)
|
||||
}
|
||||
|
||||
// BackwardExtremitiesForRoom returns the event IDs of all of the backward
|
||||
// extremities we know of for a given room.
|
||||
func (d *SyncServerDatasource) BackwardExtremitiesForRoom(
|
||||
ctx context.Context, roomID string,
|
||||
) (backwardExtremities []string, err error) {
|
||||
return d.backwardExtremities.selectBackwardExtremitiesForRoom(ctx, roomID)
|
||||
}
|
||||
|
||||
// MaxTopologicalPosition returns the highest topological position for a given
|
||||
// room.
|
||||
func (d *SyncServerDatasource) MaxTopologicalPosition(
|
||||
ctx context.Context, roomID string,
|
||||
) (types.StreamPosition, error) {
|
||||
return d.topology.selectMaxPositionInTopology(ctx, roomID)
|
||||
}
|
||||
|
||||
// EventsAtTopologicalPosition returns all of the events matching a given
|
||||
// position in the topology of a given room.
|
||||
func (d *SyncServerDatasource) EventsAtTopologicalPosition(
|
||||
ctx context.Context, roomID string, pos types.StreamPosition,
|
||||
) ([]types.StreamEvent, error) {
|
||||
eIDs, err := d.topology.selectEventIDsFromPosition(ctx, roomID, pos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return d.events.selectEvents(ctx, nil, eIDs)
|
||||
}
|
||||
|
||||
func (d *SyncServerDatasource) EventPositionInTopology(
|
||||
ctx context.Context, eventID string,
|
||||
) (types.StreamPosition, error) {
|
||||
return d.topology.selectPositionInTopology(ctx, eventID)
|
||||
}
|
||||
|
||||
// SyncStreamPosition returns the latest position in the sync stream. Returns 0 if there are no events yet.
|
||||
func (d *SyncServerDatasource) SyncStreamPosition(ctx context.Context) (types.StreamPosition, error) {
|
||||
return d.syncStreamPositionTx(ctx, nil)
|
||||
}
|
||||
|
||||
func (d *SyncServerDatasource) syncStreamPositionTx(
|
||||
ctx context.Context, txn *sql.Tx,
|
||||
) (types.StreamPosition, error) {
|
||||
maxID, err := d.events.selectMaxEventID(ctx, txn)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
maxAccountDataID, err := d.accountData.selectMaxAccountDataID(ctx, txn)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if maxAccountDataID > maxID {
|
||||
maxID = maxAccountDataID
|
||||
}
|
||||
maxInviteID, err := d.invites.selectMaxInviteID(ctx, txn)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if maxInviteID > maxID {
|
||||
maxID = maxInviteID
|
||||
}
|
||||
return types.StreamPosition(maxID), nil
|
||||
}
|
||||
|
||||
func (d *SyncServerDatasource) syncPositionTx(
|
||||
ctx context.Context, txn *sql.Tx,
|
||||
) (sp types.SyncPosition, err error) {
|
||||
) (sp types.PaginationToken, err error) {
|
||||
|
||||
maxEventID, err := d.events.selectMaxEventID(ctx, txn)
|
||||
if err != nil {
|
||||
|
@ -222,10 +400,8 @@ func (d *SyncServerDatasource) syncPositionTx(
|
|||
if maxInviteID > maxEventID {
|
||||
maxEventID = maxInviteID
|
||||
}
|
||||
sp.PDUPosition = maxEventID
|
||||
|
||||
sp.TypingPosition = d.typingCache.GetLatestSyncPosition()
|
||||
|
||||
sp.PDUPosition = types.StreamPosition(maxEventID)
|
||||
sp.EDUTypingPosition = types.StreamPosition(d.typingCache.GetLatestSyncPosition())
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -234,7 +410,7 @@ func (d *SyncServerDatasource) syncPositionTx(
|
|||
func (d *SyncServerDatasource) addPDUDeltaToResponse(
|
||||
ctx context.Context,
|
||||
device authtypes.Device,
|
||||
fromPos, toPos int64,
|
||||
fromPos, toPos types.StreamPosition,
|
||||
numRecentEventsPerRoom int,
|
||||
wantFullState bool,
|
||||
res *types.Response,
|
||||
|
@ -246,7 +422,7 @@ func (d *SyncServerDatasource) addPDUDeltaToResponse(
|
|||
var succeeded bool
|
||||
defer common.EndTransaction(txn, &succeeded)
|
||||
|
||||
stateFilterPart := gomatrixserverlib.DefaultFilterPart() // TODO: use filter provided in request
|
||||
stateFilterPart := gomatrix.DefaultFilterPart() // TODO: use filter provided in request
|
||||
|
||||
// Work out which rooms to return in the response. This is done by getting not only the currently
|
||||
// joined rooms, but also which rooms have membership transitions for this user between the 2 PDU stream positions.
|
||||
|
@ -286,7 +462,7 @@ func (d *SyncServerDatasource) addPDUDeltaToResponse(
|
|||
// addTypingDeltaToResponse adds all typing notifications to a sync response
|
||||
// since the specified position.
|
||||
func (d *SyncServerDatasource) addTypingDeltaToResponse(
|
||||
since int64,
|
||||
since types.PaginationToken,
|
||||
joinedRoomIDs []string,
|
||||
res *types.Response,
|
||||
) error {
|
||||
|
@ -295,7 +471,7 @@ func (d *SyncServerDatasource) addTypingDeltaToResponse(
|
|||
var err error
|
||||
for _, roomID := range joinedRoomIDs {
|
||||
if typingUsers, updated := d.typingCache.GetTypingUsersIfUpdatedAfter(
|
||||
roomID, since,
|
||||
roomID, int64(since.EDUTypingPosition),
|
||||
); updated {
|
||||
ev := gomatrixserverlib.ClientEvent{
|
||||
Type: gomatrixserverlib.MTyping,
|
||||
|
@ -320,14 +496,14 @@ func (d *SyncServerDatasource) addTypingDeltaToResponse(
|
|||
// addEDUDeltaToResponse adds updates for EDUs of each type since fromPos if
|
||||
// the positions of that type are not equal in fromPos and toPos.
|
||||
func (d *SyncServerDatasource) addEDUDeltaToResponse(
|
||||
fromPos, toPos types.SyncPosition,
|
||||
fromPos, toPos types.PaginationToken,
|
||||
joinedRoomIDs []string,
|
||||
res *types.Response,
|
||||
) (err error) {
|
||||
|
||||
if fromPos.TypingPosition != toPos.TypingPosition {
|
||||
if fromPos.EDUTypingPosition != toPos.EDUTypingPosition {
|
||||
err = d.addTypingDeltaToResponse(
|
||||
fromPos.TypingPosition, joinedRoomIDs, res,
|
||||
fromPos, joinedRoomIDs, res,
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -342,7 +518,7 @@ func (d *SyncServerDatasource) addEDUDeltaToResponse(
|
|||
func (d *SyncServerDatasource) IncrementalSync(
|
||||
ctx context.Context,
|
||||
device authtypes.Device,
|
||||
fromPos, toPos types.SyncPosition,
|
||||
fromPos, toPos types.PaginationToken,
|
||||
numRecentEventsPerRoom int,
|
||||
wantFullState bool,
|
||||
) (*types.Response, error) {
|
||||
|
@ -382,7 +558,7 @@ func (d *SyncServerDatasource) getResponseWithPDUsForCompleteSync(
|
|||
numRecentEventsPerRoom int,
|
||||
) (
|
||||
res *types.Response,
|
||||
toPos types.SyncPosition,
|
||||
toPos types.PaginationToken,
|
||||
joinedRoomIDs []string,
|
||||
err error,
|
||||
) {
|
||||
|
@ -411,7 +587,7 @@ func (d *SyncServerDatasource) getResponseWithPDUsForCompleteSync(
|
|||
return
|
||||
}
|
||||
|
||||
stateFilterPart := gomatrixserverlib.DefaultFilterPart() // TODO: use filter provided in request
|
||||
stateFilterPart := gomatrix.DefaultFilterPart() // TODO: use filter provided in request
|
||||
|
||||
// Build up a /sync response. Add joined rooms.
|
||||
for _, roomID := range joinedRoomIDs {
|
||||
|
@ -422,27 +598,36 @@ func (d *SyncServerDatasource) getResponseWithPDUsForCompleteSync(
|
|||
}
|
||||
// TODO: When filters are added, we may need to call this multiple times to get enough events.
|
||||
// See: https://github.com/matrix-org/synapse/blob/v0.19.3/synapse/handlers/sync.py#L316
|
||||
var recentStreamEvents []streamEvent
|
||||
var recentStreamEvents []types.StreamEvent
|
||||
recentStreamEvents, err = d.events.selectRecentEvents(
|
||||
ctx, txn, roomID, 0, toPos.PDUPosition, numRecentEventsPerRoom,
|
||||
ctx, txn, roomID, types.StreamPosition(0), toPos.PDUPosition,
|
||||
numRecentEventsPerRoom, true, true,
|
||||
)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Retrieve the backward topology position, i.e. the position of the
|
||||
// oldest event in the room's topology.
|
||||
var backwardTopologyPos types.StreamPosition
|
||||
backwardTopologyPos, err = d.topology.selectPositionInTopology(ctx, recentStreamEvents[0].EventID())
|
||||
if err != nil {
|
||||
return nil, types.PaginationToken{}, []string{}, err
|
||||
}
|
||||
if backwardTopologyPos-1 <= 0 {
|
||||
backwardTopologyPos = types.StreamPosition(1)
|
||||
} else {
|
||||
backwardTopologyPos--
|
||||
}
|
||||
|
||||
// We don't include a device here as we don't need to send down
|
||||
// transaction IDs for complete syncs
|
||||
recentEvents := streamEventsToEvents(nil, recentStreamEvents)
|
||||
|
||||
recentEvents := d.StreamEventsToEvents(nil, recentStreamEvents)
|
||||
stateEvents = removeDuplicates(stateEvents, recentEvents)
|
||||
jr := types.NewJoinResponse()
|
||||
if prevPDUPos := recentStreamEvents[0].streamPosition - 1; prevPDUPos > 0 {
|
||||
// Use the short form of batch token for prev_batch
|
||||
jr.Timeline.PrevBatch = strconv.FormatInt(prevPDUPos, 10)
|
||||
} else {
|
||||
// Use the short form of batch token for prev_batch
|
||||
jr.Timeline.PrevBatch = "1"
|
||||
}
|
||||
jr.Timeline.PrevBatch = types.NewPaginationTokenFromTypeAndPosition(
|
||||
types.PaginationTokenTypeTopology, backwardTopologyPos, 0,
|
||||
).String()
|
||||
jr.Timeline.Events = gomatrixserverlib.ToClientEvents(recentEvents, gomatrixserverlib.FormatSync)
|
||||
jr.Timeline.Limited = true
|
||||
jr.State.Events = gomatrixserverlib.ToClientEvents(stateEvents, gomatrixserverlib.FormatSync)
|
||||
|
@ -470,7 +655,7 @@ func (d *SyncServerDatasource) CompleteSync(
|
|||
|
||||
// Use a zero value SyncPosition for fromPos so all EDU states are added.
|
||||
err = d.addEDUDeltaToResponse(
|
||||
types.SyncPosition{}, toPos, joinedRoomIDs, res,
|
||||
types.PaginationToken{}, toPos, joinedRoomIDs, res,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -495,8 +680,8 @@ var txReadOnlySnapshot = sql.TxOptions{
|
|||
// If no data is retrieved, returns an empty map
|
||||
// If there was an issue with the retrieval, returns an error
|
||||
func (d *SyncServerDatasource) GetAccountDataInRange(
|
||||
ctx context.Context, userID string, oldPos, newPos int64,
|
||||
accountDataFilterPart *gomatrixserverlib.FilterPart,
|
||||
ctx context.Context, userID string, oldPos, newPos types.StreamPosition,
|
||||
accountDataFilterPart *gomatrix.FilterPart,
|
||||
) (map[string][]string, error) {
|
||||
return d.accountData.selectAccountDataInRange(ctx, userID, oldPos, newPos, accountDataFilterPart)
|
||||
}
|
||||
|
@ -509,7 +694,7 @@ func (d *SyncServerDatasource) GetAccountDataInRange(
|
|||
// Returns an error if there was an issue with the upsert
|
||||
func (d *SyncServerDatasource) UpsertAccountData(
|
||||
ctx context.Context, userID, roomID, dataType string,
|
||||
) (int64, error) {
|
||||
) (types.StreamPosition, error) {
|
||||
return d.accountData.insertAccountData(ctx, userID, roomID, dataType)
|
||||
}
|
||||
|
||||
|
@ -518,7 +703,7 @@ func (d *SyncServerDatasource) UpsertAccountData(
|
|||
// Returns an error if there was a problem communicating with the database.
|
||||
func (d *SyncServerDatasource) AddInviteEvent(
|
||||
ctx context.Context, inviteEvent gomatrixserverlib.Event,
|
||||
) (int64, error) {
|
||||
) (types.StreamPosition, error) {
|
||||
return d.invites.insertInviteEvent(ctx, inviteEvent)
|
||||
}
|
||||
|
||||
|
@ -541,26 +726,26 @@ func (d *SyncServerDatasource) SetTypingTimeoutCallback(fn cache.TimeoutCallback
|
|||
// Returns the newly calculated sync position for typing notifications.
|
||||
func (d *SyncServerDatasource) AddTypingUser(
|
||||
userID, roomID string, expireTime *time.Time,
|
||||
) int64 {
|
||||
return d.typingCache.AddTypingUser(userID, roomID, expireTime)
|
||||
) types.StreamPosition {
|
||||
return types.StreamPosition(d.typingCache.AddTypingUser(userID, roomID, expireTime))
|
||||
}
|
||||
|
||||
// RemoveTypingUser removes a typing user from the typing cache.
|
||||
// Returns the newly calculated sync position for typing notifications.
|
||||
func (d *SyncServerDatasource) RemoveTypingUser(
|
||||
userID, roomID string,
|
||||
) int64 {
|
||||
return d.typingCache.RemoveUser(userID, roomID)
|
||||
) types.StreamPosition {
|
||||
return types.StreamPosition(d.typingCache.RemoveUser(userID, roomID))
|
||||
}
|
||||
|
||||
func (d *SyncServerDatasource) addInvitesToResponse(
|
||||
ctx context.Context, txn *sql.Tx,
|
||||
userID string,
|
||||
fromPos, toPos int64,
|
||||
fromPos, toPos types.StreamPosition,
|
||||
res *types.Response,
|
||||
) error {
|
||||
invites, err := d.invites.selectInviteEventsInRange(
|
||||
ctx, txn, userID, int64(fromPos), int64(toPos),
|
||||
ctx, txn, userID, fromPos, toPos,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -576,12 +761,29 @@ func (d *SyncServerDatasource) addInvitesToResponse(
|
|||
return nil
|
||||
}
|
||||
|
||||
// Retrieve the backward topology position, i.e. the position of the
|
||||
// oldest event in the room's topology.
|
||||
func (d *SyncServerDatasource) getBackwardTopologyPos(
|
||||
ctx context.Context,
|
||||
events []types.StreamEvent,
|
||||
) (pos types.StreamPosition) {
|
||||
if len(events) > 0 {
|
||||
pos, _ = d.topology.selectPositionInTopology(ctx, events[0].EventID())
|
||||
}
|
||||
if pos-1 <= 0 {
|
||||
pos = types.StreamPosition(1)
|
||||
} else {
|
||||
pos = pos - 1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// addRoomDeltaToResponse adds a room state delta to a sync response
|
||||
func (d *SyncServerDatasource) addRoomDeltaToResponse(
|
||||
ctx context.Context,
|
||||
device *authtypes.Device,
|
||||
txn *sql.Tx,
|
||||
fromPos, toPos int64,
|
||||
fromPos, toPos types.StreamPosition,
|
||||
delta stateDelta,
|
||||
numRecentEventsPerRoom int,
|
||||
res *types.Response,
|
||||
|
@ -597,38 +799,23 @@ func (d *SyncServerDatasource) addRoomDeltaToResponse(
|
|||
endPos = delta.membershipPos
|
||||
}
|
||||
recentStreamEvents, err := d.events.selectRecentEvents(
|
||||
ctx, txn, delta.roomID, fromPos, endPos, numRecentEventsPerRoom,
|
||||
ctx, txn, delta.roomID, types.StreamPosition(fromPos), types.StreamPosition(endPos),
|
||||
numRecentEventsPerRoom, true, true,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
recentEvents := streamEventsToEvents(device, recentStreamEvents)
|
||||
recentEvents := d.StreamEventsToEvents(device, recentStreamEvents)
|
||||
delta.stateEvents = removeDuplicates(delta.stateEvents, recentEvents) // roll back
|
||||
|
||||
var prevPDUPos int64
|
||||
|
||||
if len(recentEvents) == 0 {
|
||||
if len(delta.stateEvents) == 0 {
|
||||
// Don't bother appending empty room entries
|
||||
return nil
|
||||
}
|
||||
|
||||
// If full_state=true and since is already up to date, then we'll have
|
||||
// state events but no recent events.
|
||||
prevPDUPos = toPos - 1
|
||||
} else {
|
||||
prevPDUPos = recentStreamEvents[0].streamPosition - 1
|
||||
}
|
||||
|
||||
if prevPDUPos <= 0 {
|
||||
prevPDUPos = 1
|
||||
}
|
||||
backwardTopologyPos := d.getBackwardTopologyPos(ctx, recentStreamEvents)
|
||||
|
||||
switch delta.membership {
|
||||
case gomatrixserverlib.Join:
|
||||
jr := types.NewJoinResponse()
|
||||
// Use the short form of batch token for prev_batch
|
||||
jr.Timeline.PrevBatch = strconv.FormatInt(prevPDUPos, 10)
|
||||
|
||||
jr.Timeline.PrevBatch = types.NewPaginationTokenFromTypeAndPosition(
|
||||
types.PaginationTokenTypeTopology, backwardTopologyPos, 0,
|
||||
).String()
|
||||
jr.Timeline.Events = gomatrixserverlib.ToClientEvents(recentEvents, gomatrixserverlib.FormatSync)
|
||||
jr.Timeline.Limited = false // TODO: if len(events) >= numRecents + 1 and then set limited:true
|
||||
jr.State.Events = gomatrixserverlib.ToClientEvents(delta.stateEvents, gomatrixserverlib.FormatSync)
|
||||
|
@ -639,8 +826,9 @@ func (d *SyncServerDatasource) addRoomDeltaToResponse(
|
|||
// TODO: recentEvents may contain events that this user is not allowed to see because they are
|
||||
// no longer in the room.
|
||||
lr := types.NewLeaveResponse()
|
||||
// Use the short form of batch token for prev_batch
|
||||
lr.Timeline.PrevBatch = strconv.FormatInt(prevPDUPos, 10)
|
||||
lr.Timeline.PrevBatch = types.NewPaginationTokenFromTypeAndPosition(
|
||||
types.PaginationTokenTypeTopology, backwardTopologyPos, 0,
|
||||
).String()
|
||||
lr.Timeline.Events = gomatrixserverlib.ToClientEvents(recentEvents, gomatrixserverlib.FormatSync)
|
||||
lr.Timeline.Limited = false // TODO: if len(events) >= numRecents + 1 and then set limited:true
|
||||
lr.State.Events = gomatrixserverlib.ToClientEvents(delta.stateEvents, gomatrixserverlib.FormatSync)
|
||||
|
@ -655,9 +843,9 @@ func (d *SyncServerDatasource) addRoomDeltaToResponse(
|
|||
func (d *SyncServerDatasource) fetchStateEvents(
|
||||
ctx context.Context, txn *sql.Tx,
|
||||
roomIDToEventIDSet map[string]map[string]bool,
|
||||
eventIDToEvent map[string]streamEvent,
|
||||
) (map[string][]streamEvent, error) {
|
||||
stateBetween := make(map[string][]streamEvent)
|
||||
eventIDToEvent map[string]types.StreamEvent,
|
||||
) (map[string][]types.StreamEvent, error) {
|
||||
stateBetween := make(map[string][]types.StreamEvent)
|
||||
missingEvents := make(map[string][]string)
|
||||
for roomID, ids := range roomIDToEventIDSet {
|
||||
events := stateBetween[roomID]
|
||||
|
@ -699,7 +887,7 @@ func (d *SyncServerDatasource) fetchStateEvents(
|
|||
|
||||
func (d *SyncServerDatasource) fetchMissingStateEvents(
|
||||
ctx context.Context, txn *sql.Tx, eventIDs []string,
|
||||
) ([]streamEvent, error) {
|
||||
) ([]types.StreamEvent, error) {
|
||||
// Fetch from the events table first so we pick up the stream ID for the
|
||||
// event.
|
||||
events, err := d.events.selectEvents(ctx, txn, eventIDs)
|
||||
|
@ -742,8 +930,8 @@ func (d *SyncServerDatasource) fetchMissingStateEvents(
|
|||
// A list of joined room IDs is also returned in case the caller needs it.
|
||||
func (d *SyncServerDatasource) getStateDeltas(
|
||||
ctx context.Context, device *authtypes.Device, txn *sql.Tx,
|
||||
fromPos, toPos int64, userID string,
|
||||
stateFilterPart *gomatrixserverlib.FilterPart,
|
||||
fromPos, toPos types.StreamPosition, userID string,
|
||||
stateFilterPart *gomatrix.FilterPart,
|
||||
) ([]stateDelta, []string, error) {
|
||||
// Implement membership change algorithm: https://github.com/matrix-org/synapse/blob/v0.19.3/synapse/handlers/sync.py#L821
|
||||
// - Get membership list changes for this user in this sync response
|
||||
|
@ -775,7 +963,7 @@ func (d *SyncServerDatasource) getStateDeltas(
|
|||
if membership := getMembershipFromEvent(&ev.Event, userID); membership != "" {
|
||||
if membership == gomatrixserverlib.Join {
|
||||
// send full room state down instead of a delta
|
||||
var s []streamEvent
|
||||
var s []types.StreamEvent
|
||||
s, err = d.currentStateStreamEventsForRoom(ctx, txn, roomID, stateFilterPart)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
|
@ -786,8 +974,8 @@ func (d *SyncServerDatasource) getStateDeltas(
|
|||
|
||||
deltas = append(deltas, stateDelta{
|
||||
membership: membership,
|
||||
membershipPos: ev.streamPosition,
|
||||
stateEvents: streamEventsToEvents(device, stateStreamEvents),
|
||||
membershipPos: ev.StreamPosition,
|
||||
stateEvents: d.StreamEventsToEvents(device, stateStreamEvents),
|
||||
roomID: roomID,
|
||||
})
|
||||
break
|
||||
|
@ -803,7 +991,7 @@ func (d *SyncServerDatasource) getStateDeltas(
|
|||
for _, joinedRoomID := range joinedRoomIDs {
|
||||
deltas = append(deltas, stateDelta{
|
||||
membership: gomatrixserverlib.Join,
|
||||
stateEvents: streamEventsToEvents(device, state[joinedRoomID]),
|
||||
stateEvents: d.StreamEventsToEvents(device, state[joinedRoomID]),
|
||||
roomID: joinedRoomID,
|
||||
})
|
||||
}
|
||||
|
@ -817,8 +1005,8 @@ func (d *SyncServerDatasource) getStateDeltas(
|
|||
// updates for other rooms.
|
||||
func (d *SyncServerDatasource) getStateDeltasForFullStateSync(
|
||||
ctx context.Context, device *authtypes.Device, txn *sql.Tx,
|
||||
fromPos, toPos int64, userID string,
|
||||
stateFilterPart *gomatrixserverlib.FilterPart,
|
||||
fromPos, toPos types.StreamPosition, userID string,
|
||||
stateFilterPart *gomatrix.FilterPart,
|
||||
) ([]stateDelta, []string, error) {
|
||||
joinedRoomIDs, err := d.roomstate.selectRoomIDsWithMembership(ctx, txn, userID, gomatrixserverlib.Join)
|
||||
if err != nil {
|
||||
|
@ -836,7 +1024,7 @@ func (d *SyncServerDatasource) getStateDeltasForFullStateSync(
|
|||
}
|
||||
deltas = append(deltas, stateDelta{
|
||||
membership: gomatrixserverlib.Join,
|
||||
stateEvents: streamEventsToEvents(device, s),
|
||||
stateEvents: d.StreamEventsToEvents(device, s),
|
||||
roomID: joinedRoomID,
|
||||
})
|
||||
}
|
||||
|
@ -857,8 +1045,8 @@ func (d *SyncServerDatasource) getStateDeltasForFullStateSync(
|
|||
if membership != gomatrixserverlib.Join { // We've already added full state for all joined rooms above.
|
||||
deltas = append(deltas, stateDelta{
|
||||
membership: membership,
|
||||
membershipPos: ev.streamPosition,
|
||||
stateEvents: streamEventsToEvents(device, stateStreamEvents),
|
||||
membershipPos: ev.StreamPosition,
|
||||
stateEvents: d.StreamEventsToEvents(device, stateStreamEvents),
|
||||
roomID: roomID,
|
||||
})
|
||||
}
|
||||
|
@ -873,30 +1061,30 @@ func (d *SyncServerDatasource) getStateDeltasForFullStateSync(
|
|||
|
||||
func (d *SyncServerDatasource) currentStateStreamEventsForRoom(
|
||||
ctx context.Context, txn *sql.Tx, roomID string,
|
||||
stateFilterPart *gomatrixserverlib.FilterPart,
|
||||
) ([]streamEvent, error) {
|
||||
stateFilterPart *gomatrix.FilterPart,
|
||||
) ([]types.StreamEvent, error) {
|
||||
allState, err := d.roomstate.selectCurrentState(ctx, txn, roomID, stateFilterPart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := make([]streamEvent, len(allState))
|
||||
s := make([]types.StreamEvent, len(allState))
|
||||
for i := 0; i < len(s); i++ {
|
||||
s[i] = streamEvent{Event: allState[i], streamPosition: 0}
|
||||
s[i] = types.StreamEvent{Event: allState[i], StreamPosition: 0}
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// streamEventsToEvents converts streamEvent to Event. If device is non-nil and
|
||||
// StreamEventsToEvents converts streamEvent to Event. If device is non-nil and
|
||||
// matches the streamevent.transactionID device then the transaction ID gets
|
||||
// added to the unsigned section of the output event.
|
||||
func streamEventsToEvents(device *authtypes.Device, in []streamEvent) []gomatrixserverlib.Event {
|
||||
func (d *SyncServerDatasource) StreamEventsToEvents(device *authtypes.Device, in []types.StreamEvent) []gomatrixserverlib.Event {
|
||||
out := make([]gomatrixserverlib.Event, len(in))
|
||||
for i := 0; i < len(in); i++ {
|
||||
out[i] = in[i].Event
|
||||
if device != nil && in[i].transactionID != nil {
|
||||
if device.UserID == in[i].Sender() && device.SessionID == in[i].transactionID.SessionID {
|
||||
if device != nil && in[i].TransactionID != nil {
|
||||
if device.UserID == in[i].Sender() && device.SessionID == in[i].TransactionID.SessionID {
|
||||
err := out[i].SetUnsignedField(
|
||||
"transaction_id", in[i].transactionID.TransactionID,
|
||||
"transaction_id", in[i].TransactionID.TransactionID,
|
||||
)
|
||||
if err != nil {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
|
|
|
@ -16,7 +16,6 @@ package storage
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
|
@ -26,6 +25,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/syncapi/storage/postgres"
|
||||
"github.com/matrix-org/dendrite/syncapi/types"
|
||||
"github.com/matrix-org/dendrite/typingserver/cache"
|
||||
"github.com/matrix-org/gomatrix"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
)
|
||||
|
||||
|
@ -33,31 +33,38 @@ type Database interface {
|
|||
common.PartitionStorer
|
||||
AllJoinedUsersInRooms(ctx context.Context) (map[string][]string, error)
|
||||
Events(ctx context.Context, eventIDs []string) ([]gomatrixserverlib.Event, error)
|
||||
WriteEvent(ctx context.Context, ev *gomatrixserverlib.Event, addStateEvents []gomatrixserverlib.Event, addStateEventIDs, removeStateEventIDs []string, transactionID *api.TransactionID) (pduPosition int64, returnErr error)
|
||||
WriteEvent(context.Context, *gomatrixserverlib.Event, []gomatrixserverlib.Event, []string, []string, *api.TransactionID, bool) (types.StreamPosition, error)
|
||||
GetStateEvent(ctx context.Context, roomID, evType, stateKey string) (*gomatrixserverlib.Event, error)
|
||||
GetStateEventsForRoom(ctx context.Context, roomID string, stateFilterPart *gomatrixserverlib.FilterPart) (stateEvents []gomatrixserverlib.Event, err error)
|
||||
SyncPosition(ctx context.Context) (types.SyncPosition, error)
|
||||
IncrementalSync(ctx context.Context, device authtypes.Device, fromPos, toPos types.SyncPosition, numRecentEventsPerRoom int, wantFullState bool) (*types.Response, error)
|
||||
GetStateEventsForRoom(ctx context.Context, roomID string, stateFilterPart *gomatrix.FilterPart) (stateEvents []gomatrixserverlib.Event, err error)
|
||||
SyncPosition(ctx context.Context) (types.PaginationToken, error)
|
||||
IncrementalSync(ctx context.Context, device authtypes.Device, fromPos, toPos types.PaginationToken, numRecentEventsPerRoom int, wantFullState bool) (*types.Response, error)
|
||||
CompleteSync(ctx context.Context, userID string, numRecentEventsPerRoom int) (*types.Response, error)
|
||||
GetAccountDataInRange(ctx context.Context, userID string, oldPos, newPos int64, accountDataFilterPart *gomatrixserverlib.FilterPart) (map[string][]string, error)
|
||||
UpsertAccountData(ctx context.Context, userID, roomID, dataType string) (int64, error)
|
||||
AddInviteEvent(ctx context.Context, inviteEvent gomatrixserverlib.Event) (int64, error)
|
||||
GetAccountDataInRange(ctx context.Context, userID string, oldPos, newPos types.StreamPosition, accountDataFilterPart *gomatrix.FilterPart) (map[string][]string, error)
|
||||
UpsertAccountData(ctx context.Context, userID, roomID, dataType string) (types.StreamPosition, error)
|
||||
AddInviteEvent(ctx context.Context, inviteEvent gomatrixserverlib.Event) (types.StreamPosition, error)
|
||||
RetireInviteEvent(ctx context.Context, inviteEventID string) error
|
||||
SetTypingTimeoutCallback(fn cache.TimeoutCallbackFn)
|
||||
AddTypingUser(userID, roomID string, expireTime *time.Time) int64
|
||||
RemoveTypingUser(userID, roomID string) int64
|
||||
AddTypingUser(userID, roomID string, expireTime *time.Time) types.StreamPosition
|
||||
RemoveTypingUser(userID, roomID string) types.StreamPosition
|
||||
GetEventsInRange(ctx context.Context, from, to *types.PaginationToken, roomID string, limit int, backwardOrdering bool) (events []types.StreamEvent, err error)
|
||||
EventPositionInTopology(ctx context.Context, eventID string) (types.StreamPosition, error)
|
||||
EventsAtTopologicalPosition(ctx context.Context, roomID string, pos types.StreamPosition) ([]types.StreamEvent, error)
|
||||
BackwardExtremitiesForRoom(ctx context.Context, roomID string) (backwardExtremities []string, err error)
|
||||
MaxTopologicalPosition(ctx context.Context, roomID string) (types.StreamPosition, error)
|
||||
StreamEventsToEvents(device *authtypes.Device, in []types.StreamEvent) []gomatrixserverlib.Event
|
||||
SyncStreamPosition(ctx context.Context) (types.StreamPosition, error)
|
||||
}
|
||||
|
||||
// NewPublicRoomsServerDatabase opens a database connection.
|
||||
func NewSyncServerDatasource(dataSourceName string) (Database, error) {
|
||||
uri, err := url.Parse(dataSourceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return postgres.NewSyncServerDatasource(dataSourceName)
|
||||
}
|
||||
switch uri.Scheme {
|
||||
case "postgres":
|
||||
return postgres.NewSyncServerDatasource(dataSourceName)
|
||||
default:
|
||||
return nil, errors.New("unknown schema")
|
||||
return postgres.NewSyncServerDatasource(dataSourceName)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ type Notifier struct {
|
|||
// Protects currPos and userStreams.
|
||||
streamLock *sync.Mutex
|
||||
// The latest sync position
|
||||
currPos types.SyncPosition
|
||||
currPos types.PaginationToken
|
||||
// A map of user_id => UserStream which can be used to wake a given user's /sync request.
|
||||
userStreams map[string]*UserStream
|
||||
// The last time we cleaned out stale entries from the userStreams map
|
||||
|
@ -46,7 +46,7 @@ type Notifier struct {
|
|||
// NewNotifier creates a new notifier set to the given sync position.
|
||||
// In order for this to be of any use, the Notifier needs to be told all rooms and
|
||||
// the joined users within each of them by calling Notifier.Load(*storage.SyncServerDatabase).
|
||||
func NewNotifier(pos types.SyncPosition) *Notifier {
|
||||
func NewNotifier(pos types.PaginationToken) *Notifier {
|
||||
return &Notifier{
|
||||
currPos: pos,
|
||||
roomIDToJoinedUsers: make(map[string]userIDSet),
|
||||
|
@ -68,7 +68,7 @@ func NewNotifier(pos types.SyncPosition) *Notifier {
|
|||
// event type it handles, leaving other fields as 0.
|
||||
func (n *Notifier) OnNewEvent(
|
||||
ev *gomatrixserverlib.Event, roomID string, userIDs []string,
|
||||
posUpdate types.SyncPosition,
|
||||
posUpdate types.PaginationToken,
|
||||
) {
|
||||
// update the current position then notify relevant /sync streams.
|
||||
// This needs to be done PRIOR to waking up users as they will read this value.
|
||||
|
@ -151,7 +151,7 @@ func (n *Notifier) Load(ctx context.Context, db storage.Database) error {
|
|||
}
|
||||
|
||||
// CurrentPosition returns the current sync position
|
||||
func (n *Notifier) CurrentPosition() types.SyncPosition {
|
||||
func (n *Notifier) CurrentPosition() types.PaginationToken {
|
||||
n.streamLock.Lock()
|
||||
defer n.streamLock.Unlock()
|
||||
|
||||
|
@ -173,7 +173,7 @@ func (n *Notifier) setUsersJoinedToRooms(roomIDToUserIDs map[string][]string) {
|
|||
}
|
||||
}
|
||||
|
||||
func (n *Notifier) wakeupUsers(userIDs []string, newPos types.SyncPosition) {
|
||||
func (n *Notifier) wakeupUsers(userIDs []string, newPos types.PaginationToken) {
|
||||
for _, userID := range userIDs {
|
||||
stream := n.fetchUserStream(userID, false)
|
||||
if stream != nil {
|
||||
|
|
|
@ -32,11 +32,11 @@ var (
|
|||
randomMessageEvent gomatrixserverlib.Event
|
||||
aliceInviteBobEvent gomatrixserverlib.Event
|
||||
bobLeaveEvent gomatrixserverlib.Event
|
||||
syncPositionVeryOld types.SyncPosition
|
||||
syncPositionBefore types.SyncPosition
|
||||
syncPositionAfter types.SyncPosition
|
||||
syncPositionNewEDU types.SyncPosition
|
||||
syncPositionAfter2 types.SyncPosition
|
||||
syncPositionVeryOld types.PaginationToken
|
||||
syncPositionBefore types.PaginationToken
|
||||
syncPositionAfter types.PaginationToken
|
||||
syncPositionNewEDU types.PaginationToken
|
||||
syncPositionAfter2 types.PaginationToken
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -46,9 +46,9 @@ var (
|
|||
)
|
||||
|
||||
func init() {
|
||||
baseSyncPos := types.SyncPosition{
|
||||
PDUPosition: 0,
|
||||
TypingPosition: 0,
|
||||
baseSyncPos := types.PaginationToken{
|
||||
PDUPosition: 0,
|
||||
EDUTypingPosition: 0,
|
||||
}
|
||||
|
||||
syncPositionVeryOld = baseSyncPos
|
||||
|
@ -61,7 +61,7 @@ func init() {
|
|||
syncPositionAfter.PDUPosition = 12
|
||||
|
||||
syncPositionNewEDU = syncPositionAfter
|
||||
syncPositionNewEDU.TypingPosition = 1
|
||||
syncPositionNewEDU.EDUTypingPosition = 1
|
||||
|
||||
syncPositionAfter2 = baseSyncPos
|
||||
syncPositionAfter2.PDUPosition = 13
|
||||
|
@ -119,7 +119,7 @@ func TestImmediateNotification(t *testing.T) {
|
|||
t.Fatalf("TestImmediateNotification error: %s", err)
|
||||
}
|
||||
if pos != syncPositionBefore {
|
||||
t.Fatalf("TestImmediateNotification want %d, got %d", syncPositionBefore, pos)
|
||||
t.Fatalf("TestImmediateNotification want %v, got %v", syncPositionBefore, pos)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -138,7 +138,7 @@ func TestNewEventAndJoinedToRoom(t *testing.T) {
|
|||
t.Errorf("TestNewEventAndJoinedToRoom error: %s", err)
|
||||
}
|
||||
if pos != syncPositionAfter {
|
||||
t.Errorf("TestNewEventAndJoinedToRoom want %d, got %d", syncPositionAfter, pos)
|
||||
t.Errorf("TestNewEventAndJoinedToRoom want %v, got %v", syncPositionAfter, pos)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
|
@ -166,7 +166,7 @@ func TestNewInviteEventForUser(t *testing.T) {
|
|||
t.Errorf("TestNewInviteEventForUser error: %s", err)
|
||||
}
|
||||
if pos != syncPositionAfter {
|
||||
t.Errorf("TestNewInviteEventForUser want %d, got %d", syncPositionAfter, pos)
|
||||
t.Errorf("TestNewInviteEventForUser want %v, got %v", syncPositionAfter, pos)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
|
@ -194,7 +194,7 @@ func TestEDUWakeup(t *testing.T) {
|
|||
t.Errorf("TestNewInviteEventForUser error: %s", err)
|
||||
}
|
||||
if pos != syncPositionNewEDU {
|
||||
t.Errorf("TestNewInviteEventForUser want %d, got %d", syncPositionNewEDU, pos)
|
||||
t.Errorf("TestNewInviteEventForUser want %v, got %v", syncPositionNewEDU, pos)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
|
@ -222,7 +222,7 @@ func TestMultipleRequestWakeup(t *testing.T) {
|
|||
t.Errorf("TestMultipleRequestWakeup error: %s", err)
|
||||
}
|
||||
if pos != syncPositionAfter {
|
||||
t.Errorf("TestMultipleRequestWakeup want %d, got %d", syncPositionAfter, pos)
|
||||
t.Errorf("TestMultipleRequestWakeup want %v, got %v", syncPositionAfter, pos)
|
||||
}
|
||||
wg.Done()
|
||||
}
|
||||
|
@ -262,7 +262,7 @@ func TestNewEventAndWasPreviouslyJoinedToRoom(t *testing.T) {
|
|||
t.Errorf("TestNewEventAndWasPreviouslyJoinedToRoom error: %s", err)
|
||||
}
|
||||
if pos != syncPositionAfter {
|
||||
t.Errorf("TestNewEventAndWasPreviouslyJoinedToRoom want %d, got %d", syncPositionAfter, pos)
|
||||
t.Errorf("TestNewEventAndWasPreviouslyJoinedToRoom want %v, got %v", syncPositionAfter, pos)
|
||||
}
|
||||
leaveWG.Done()
|
||||
}()
|
||||
|
@ -281,7 +281,7 @@ func TestNewEventAndWasPreviouslyJoinedToRoom(t *testing.T) {
|
|||
t.Errorf("TestNewEventAndWasPreviouslyJoinedToRoom error: %s", err)
|
||||
}
|
||||
if pos != syncPositionAfter2 {
|
||||
t.Errorf("TestNewEventAndWasPreviouslyJoinedToRoom want %d, got %d", syncPositionAfter2, pos)
|
||||
t.Errorf("TestNewEventAndWasPreviouslyJoinedToRoom want %v, got %v", syncPositionAfter2, pos)
|
||||
}
|
||||
aliceWG.Done()
|
||||
}()
|
||||
|
@ -305,14 +305,14 @@ func TestNewEventAndWasPreviouslyJoinedToRoom(t *testing.T) {
|
|||
time.Sleep(1 * time.Millisecond)
|
||||
}
|
||||
|
||||
func waitForEvents(n *Notifier, req syncRequest) (types.SyncPosition, error) {
|
||||
func waitForEvents(n *Notifier, req syncRequest) (types.PaginationToken, error) {
|
||||
listener := n.GetListener(req)
|
||||
defer listener.Close()
|
||||
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
return types.SyncPosition{}, fmt.Errorf(
|
||||
"waitForEvents timed out waiting for %s (pos=%d)", req.device.UserID, req.since,
|
||||
return types.PaginationToken{}, fmt.Errorf(
|
||||
"waitForEvents timed out waiting for %s (pos=%v)", req.device.UserID, req.since,
|
||||
)
|
||||
case <-listener.GetNotifyChannel(*req.since):
|
||||
p := listener.GetSyncPosition()
|
||||
|
@ -337,7 +337,7 @@ func lockedFetchUserStream(n *Notifier, userID string) *UserStream {
|
|||
return n.fetchUserStream(userID, true)
|
||||
}
|
||||
|
||||
func newTestSyncRequest(userID string, since types.SyncPosition) syncRequest {
|
||||
func newTestSyncRequest(userID string, since types.PaginationToken) syncRequest {
|
||||
return syncRequest{
|
||||
device: authtypes.Device{UserID: userID},
|
||||
timeout: 1 * time.Minute,
|
||||
|
|
|
@ -16,10 +16,8 @@ package sync
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
|
||||
|
@ -38,7 +36,7 @@ type syncRequest struct {
|
|||
device authtypes.Device
|
||||
limit int
|
||||
timeout time.Duration
|
||||
since *types.SyncPosition // nil means that no since token was supplied
|
||||
since *types.PaginationToken // nil means that no since token was supplied
|
||||
wantFullState bool
|
||||
log *log.Entry
|
||||
}
|
||||
|
@ -47,7 +45,7 @@ func newSyncRequest(req *http.Request, device authtypes.Device) (*syncRequest, e
|
|||
timeout := getTimeout(req.URL.Query().Get("timeout"))
|
||||
fullState := req.URL.Query().Get("full_state")
|
||||
wantFullState := fullState != "" && fullState != "false"
|
||||
since, err := getSyncStreamPosition(req.URL.Query().Get("since"))
|
||||
since, err := getPaginationToken(req.URL.Query().Get("since"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -75,41 +73,14 @@ func getTimeout(timeoutMS string) time.Duration {
|
|||
}
|
||||
|
||||
// getSyncStreamPosition tries to parse a 'since' token taken from the API to a
|
||||
// types.SyncPosition. If the string is empty then (nil, nil) is returned.
|
||||
// types.PaginationToken. If the string is empty then (nil, nil) is returned.
|
||||
// There are two forms of tokens: The full length form containing all PDU and EDU
|
||||
// positions separated by "_", and the short form containing only the PDU
|
||||
// position. Short form can be used for, e.g., `prev_batch` tokens.
|
||||
func getSyncStreamPosition(since string) (*types.SyncPosition, error) {
|
||||
func getPaginationToken(since string) (*types.PaginationToken, error) {
|
||||
if since == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
posStrings := strings.Split(since, "_")
|
||||
if len(posStrings) != 2 && len(posStrings) != 1 {
|
||||
// A token can either be full length or short (PDU-only).
|
||||
return nil, errors.New("malformed batch token")
|
||||
}
|
||||
|
||||
positions := make([]int64, len(posStrings))
|
||||
for i, posString := range posStrings {
|
||||
pos, err := strconv.ParseInt(posString, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
positions[i] = pos
|
||||
}
|
||||
|
||||
if len(positions) == 2 {
|
||||
// Full length token; construct SyncPosition with every entry in
|
||||
// `positions`. These entries must have the same order with the fields
|
||||
// in struct SyncPosition, so we disable the govet check below.
|
||||
return &types.SyncPosition{ //nolint:govet
|
||||
positions[0], positions[1],
|
||||
}, nil
|
||||
} else {
|
||||
// Token with PDU position only
|
||||
return &types.SyncPosition{
|
||||
PDUPosition: positions[0],
|
||||
}, nil
|
||||
}
|
||||
return types.NewPaginationTokenFromString(since)
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||
"github.com/matrix-org/dendrite/syncapi/storage"
|
||||
"github.com/matrix-org/dendrite/syncapi/types"
|
||||
"github.com/matrix-org/gomatrix"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
@ -129,7 +130,7 @@ func (rp *RequestPool) OnIncomingSyncRequest(req *http.Request, device *authtype
|
|||
}
|
||||
}
|
||||
|
||||
func (rp *RequestPool) currentSyncForUser(req syncRequest, latestPos types.SyncPosition) (res *types.Response, err error) {
|
||||
func (rp *RequestPool) currentSyncForUser(req syncRequest, latestPos types.PaginationToken) (res *types.Response, err error) {
|
||||
// TODO: handle ignored users
|
||||
if req.since == nil {
|
||||
res, err = rp.db.CompleteSync(req.ctx, req.device.UserID, req.limit)
|
||||
|
@ -141,14 +142,14 @@ func (rp *RequestPool) currentSyncForUser(req syncRequest, latestPos types.SyncP
|
|||
return
|
||||
}
|
||||
|
||||
accountDataFilter := gomatrixserverlib.DefaultFilterPart() // TODO: use filter provided in req instead
|
||||
res, err = rp.appendAccountData(res, req.device.UserID, req, latestPos.PDUPosition, &accountDataFilter)
|
||||
accountDataFilter := gomatrix.DefaultFilterPart() // TODO: use filter provided in req instead
|
||||
res, err = rp.appendAccountData(res, req.device.UserID, req, int64(latestPos.PDUPosition), &accountDataFilter)
|
||||
return
|
||||
}
|
||||
|
||||
func (rp *RequestPool) appendAccountData(
|
||||
data *types.Response, userID string, req syncRequest, currentPos int64,
|
||||
accountDataFilter *gomatrixserverlib.FilterPart,
|
||||
accountDataFilter *gomatrix.FilterPart,
|
||||
) (*types.Response, error) {
|
||||
// TODO: Account data doesn't have a sync position of its own, meaning that
|
||||
// account data might be sent multiple time to the client if multiple account
|
||||
|
@ -182,7 +183,11 @@ func (rp *RequestPool) appendAccountData(
|
|||
}
|
||||
|
||||
// Sync is not initial, get all account data since the latest sync
|
||||
dataTypes, err := rp.db.GetAccountDataInRange(req.ctx, userID, req.since.PDUPosition, currentPos, accountDataFilter)
|
||||
dataTypes, err := rp.db.GetAccountDataInRange(
|
||||
req.ctx, userID,
|
||||
types.StreamPosition(req.since.PDUPosition), types.StreamPosition(currentPos),
|
||||
accountDataFilter,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ type UserStream struct {
|
|||
// Closed when there is an update.
|
||||
signalChannel chan struct{}
|
||||
// The last sync position that there may have been an update for the user
|
||||
pos types.SyncPosition
|
||||
pos types.PaginationToken
|
||||
// The last time when we had some listeners waiting
|
||||
timeOfLastChannel time.Time
|
||||
// The number of listeners waiting
|
||||
|
@ -51,7 +51,7 @@ type UserStreamListener struct {
|
|||
}
|
||||
|
||||
// NewUserStream creates a new user stream
|
||||
func NewUserStream(userID string, currPos types.SyncPosition) *UserStream {
|
||||
func NewUserStream(userID string, currPos types.PaginationToken) *UserStream {
|
||||
return &UserStream{
|
||||
UserID: userID,
|
||||
timeOfLastChannel: time.Now(),
|
||||
|
@ -85,7 +85,7 @@ func (s *UserStream) GetListener(ctx context.Context) UserStreamListener {
|
|||
}
|
||||
|
||||
// Broadcast a new sync position for this user.
|
||||
func (s *UserStream) Broadcast(pos types.SyncPosition) {
|
||||
func (s *UserStream) Broadcast(pos types.PaginationToken) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
|
@ -120,7 +120,7 @@ func (s *UserStream) TimeOfLastNonEmpty() time.Time {
|
|||
|
||||
// GetStreamPosition returns last sync position which the UserStream was
|
||||
// notified about
|
||||
func (s *UserStreamListener) GetSyncPosition() types.SyncPosition {
|
||||
func (s *UserStreamListener) GetSyncPosition() types.PaginationToken {
|
||||
s.userStream.lock.Lock()
|
||||
defer s.userStream.lock.Unlock()
|
||||
|
||||
|
@ -132,7 +132,7 @@ func (s *UserStreamListener) GetSyncPosition() types.SyncPosition {
|
|||
// sincePos specifies from which point we want to be notified about. If there
|
||||
// has already been an update after sincePos we'll return a closed channel
|
||||
// immediately.
|
||||
func (s *UserStreamListener) GetNotifyChannel(sincePos types.SyncPosition) <-chan struct{} {
|
||||
func (s *UserStreamListener) GetNotifyChannel(sincePos types.PaginationToken) <-chan struct{} {
|
||||
s.userStream.lock.Lock()
|
||||
defer s.userStream.lock.Unlock()
|
||||
|
||||
|
|
|
@ -21,7 +21,9 @@ import (
|
|||
|
||||
"github.com/matrix-org/dendrite/clientapi/auth/storage/accounts"
|
||||
"github.com/matrix-org/dendrite/common/basecomponent"
|
||||
"github.com/matrix-org/dendrite/common/config"
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
|
||||
"github.com/matrix-org/dendrite/clientapi/auth/storage/devices"
|
||||
"github.com/matrix-org/dendrite/syncapi/consumers"
|
||||
|
@ -37,6 +39,8 @@ func SetupSyncAPIComponent(
|
|||
deviceDB *devices.Database,
|
||||
accountsDB *accounts.Database,
|
||||
queryAPI api.RoomserverQueryAPI,
|
||||
federation *gomatrixserverlib.FederationClient,
|
||||
cfg *config.Dendrite,
|
||||
) {
|
||||
syncDB, err := storage.NewSyncServerDatasource(string(base.Cfg.Database.SyncAPI))
|
||||
if err != nil {
|
||||
|
@ -77,5 +81,5 @@ func SetupSyncAPIComponent(
|
|||
logrus.WithError(err).Panicf("failed to start typing server consumer")
|
||||
}
|
||||
|
||||
routing.Setup(base.APIMux, requestPool, syncDB, deviceDB)
|
||||
routing.Setup(base.APIMux, requestPool, syncDB, deviceDB, federation, queryAPI, cfg)
|
||||
}
|
||||
|
|
|
@ -16,45 +16,144 @@ package types
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
)
|
||||
|
||||
// SyncPosition contains the PDU and EDU stream sync positions for a client.
|
||||
type SyncPosition struct {
|
||||
// PDUPosition is the stream position for PDUs the client is at.
|
||||
PDUPosition int64
|
||||
// TypingPosition is the client's position for typing notifications.
|
||||
TypingPosition int64
|
||||
var (
|
||||
// ErrInvalidPaginationTokenType is returned when an attempt at creating a
|
||||
// new instance of PaginationToken with an invalid type (i.e. neither "s"
|
||||
// nor "t").
|
||||
ErrInvalidPaginationTokenType = fmt.Errorf("Pagination token has an unknown prefix (should be either s or t)")
|
||||
// ErrInvalidPaginationTokenLen is returned when the pagination token is an
|
||||
// invalid length
|
||||
ErrInvalidPaginationTokenLen = fmt.Errorf("Pagination token has an invalid length")
|
||||
)
|
||||
|
||||
// StreamPosition represents the offset in the sync stream a client is at.
|
||||
type StreamPosition int64
|
||||
|
||||
// Same as gomatrixserverlib.Event but also has the PDU stream position for this event.
|
||||
type StreamEvent struct {
|
||||
gomatrixserverlib.Event
|
||||
StreamPosition StreamPosition
|
||||
TransactionID *api.TransactionID
|
||||
ExcludeFromSync bool
|
||||
}
|
||||
|
||||
// String implements the Stringer interface.
|
||||
func (sp SyncPosition) String() string {
|
||||
return strconv.FormatInt(sp.PDUPosition, 10) + "_" +
|
||||
strconv.FormatInt(sp.TypingPosition, 10)
|
||||
// PaginationTokenType represents the type of a pagination token.
|
||||
// It can be either "s" (representing a position in the whole stream of events)
|
||||
// or "t" (representing a position in a room's topology/depth).
|
||||
type PaginationTokenType string
|
||||
|
||||
const (
|
||||
// PaginationTokenTypeStream represents a position in the server's whole
|
||||
// stream of events
|
||||
PaginationTokenTypeStream PaginationTokenType = "s"
|
||||
// PaginationTokenTypeTopology represents a position in a room's topology.
|
||||
PaginationTokenTypeTopology PaginationTokenType = "t"
|
||||
)
|
||||
|
||||
// PaginationToken represents a pagination token, used for interactions with
|
||||
// /sync or /messages, for example.
|
||||
type PaginationToken struct {
|
||||
//Position StreamPosition
|
||||
Type PaginationTokenType
|
||||
PDUPosition StreamPosition
|
||||
EDUTypingPosition StreamPosition
|
||||
}
|
||||
|
||||
// IsAfter returns whether one SyncPosition refers to states newer than another SyncPosition.
|
||||
func (sp SyncPosition) IsAfter(other SyncPosition) bool {
|
||||
return sp.PDUPosition > other.PDUPosition ||
|
||||
sp.TypingPosition > other.TypingPosition
|
||||
// NewPaginationTokenFromString takes a string of the form "xyyyy..." where "x"
|
||||
// represents the type of a pagination token and "yyyy..." the token itself, and
|
||||
// parses it in order to create a new instance of PaginationToken. Returns an
|
||||
// error if the token couldn't be parsed into an int64, or if the token type
|
||||
// isn't a known type (returns ErrInvalidPaginationTokenType in the latter
|
||||
// case).
|
||||
func NewPaginationTokenFromString(s string) (token *PaginationToken, err error) {
|
||||
if len(s) == 0 {
|
||||
return nil, ErrInvalidPaginationTokenLen
|
||||
}
|
||||
|
||||
token = new(PaginationToken)
|
||||
var positions []string
|
||||
|
||||
switch t := PaginationTokenType(s[:1]); t {
|
||||
case PaginationTokenTypeStream, PaginationTokenTypeTopology:
|
||||
token.Type = t
|
||||
positions = strings.Split(s[1:], "_")
|
||||
default:
|
||||
token.Type = PaginationTokenTypeStream
|
||||
positions = strings.Split(s, "_")
|
||||
}
|
||||
|
||||
// Try to get the PDU position.
|
||||
if len(positions) >= 1 {
|
||||
if pduPos, err := strconv.ParseInt(positions[0], 10, 64); err != nil {
|
||||
return nil, err
|
||||
} else if pduPos < 0 {
|
||||
return nil, errors.New("negative PDU position not allowed")
|
||||
} else {
|
||||
token.PDUPosition = StreamPosition(pduPos)
|
||||
}
|
||||
}
|
||||
|
||||
// Try to get the typing position.
|
||||
if len(positions) >= 2 {
|
||||
if typPos, err := strconv.ParseInt(positions[1], 10, 64); err != nil {
|
||||
return nil, err
|
||||
} else if typPos < 0 {
|
||||
return nil, errors.New("negative EDU typing position not allowed")
|
||||
} else {
|
||||
token.EDUTypingPosition = StreamPosition(typPos)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// WithUpdates returns a copy of the SyncPosition with updates applied from another SyncPosition.
|
||||
// If the latter SyncPosition contains a field that is not 0, it is considered an update,
|
||||
// and its value will replace the corresponding value in the SyncPosition on which WithUpdates is called.
|
||||
func (sp SyncPosition) WithUpdates(other SyncPosition) SyncPosition {
|
||||
ret := sp
|
||||
// NewPaginationTokenFromTypeAndPosition takes a PaginationTokenType and a
|
||||
// StreamPosition and returns an instance of PaginationToken.
|
||||
func NewPaginationTokenFromTypeAndPosition(
|
||||
t PaginationTokenType, pdupos StreamPosition, typpos StreamPosition,
|
||||
) (p *PaginationToken) {
|
||||
return &PaginationToken{
|
||||
Type: t,
|
||||
PDUPosition: pdupos,
|
||||
EDUTypingPosition: typpos,
|
||||
}
|
||||
}
|
||||
|
||||
// String translates a PaginationToken to a string of the "xyyyy..." (see
|
||||
// NewPaginationToken to know what it represents).
|
||||
func (p *PaginationToken) String() string {
|
||||
return fmt.Sprintf("%s%d_%d", p.Type, p.PDUPosition, p.EDUTypingPosition)
|
||||
}
|
||||
|
||||
// WithUpdates returns a copy of the PaginationToken with updates applied from another PaginationToken.
|
||||
// If the latter PaginationToken contains a field that is not 0, it is considered an update,
|
||||
// and its value will replace the corresponding value in the PaginationToken on which WithUpdates is called.
|
||||
func (pt *PaginationToken) WithUpdates(other PaginationToken) PaginationToken {
|
||||
ret := *pt
|
||||
if other.PDUPosition != 0 {
|
||||
ret.PDUPosition = other.PDUPosition
|
||||
}
|
||||
if other.TypingPosition != 0 {
|
||||
ret.TypingPosition = other.TypingPosition
|
||||
if other.EDUTypingPosition != 0 {
|
||||
ret.EDUTypingPosition = other.EDUTypingPosition
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// IsAfter returns whether one PaginationToken refers to states newer than another PaginationToken.
|
||||
func (sp *PaginationToken) IsAfter(other PaginationToken) bool {
|
||||
return sp.PDUPosition > other.PDUPosition ||
|
||||
sp.EDUTypingPosition > other.EDUTypingPosition
|
||||
}
|
||||
|
||||
// PrevEventRef represents a reference to a previous event in a state event upgrade
|
||||
type PrevEventRef struct {
|
||||
PrevContent json.RawMessage `json:"prev_content"`
|
||||
|
@ -79,9 +178,9 @@ type Response struct {
|
|||
}
|
||||
|
||||
// NewResponse creates an empty response with initialised maps.
|
||||
func NewResponse(pos SyncPosition) *Response {
|
||||
func NewResponse(token PaginationToken) *Response {
|
||||
res := Response{
|
||||
NextBatch: pos.String(),
|
||||
NextBatch: token.String(),
|
||||
}
|
||||
// Pre-initialise the maps. Synapse will return {} even if there are no rooms under a specific section,
|
||||
// so let's do the same thing. Bonus: this means we can't get dreaded 'assignment to entry in nil map' errors.
|
||||
|
@ -96,6 +195,14 @@ func NewResponse(pos SyncPosition) *Response {
|
|||
res.AccountData.Events = make([]gomatrixserverlib.ClientEvent, 0)
|
||||
res.Presence.Events = make([]gomatrixserverlib.ClientEvent, 0)
|
||||
|
||||
// Fill next_batch with a pagination token. Since this is a response to a sync request, we can assume
|
||||
// we'll always return a stream token.
|
||||
res.NextBatch = NewPaginationTokenFromTypeAndPosition(
|
||||
PaginationTokenTypeStream,
|
||||
StreamPosition(token.PDUPosition),
|
||||
StreamPosition(token.EDUTypingPosition),
|
||||
).String()
|
||||
|
||||
return &res
|
||||
}
|
||||
|
||||
|
|
52
syncapi/types/types_test.go
Normal file
52
syncapi/types/types_test.go
Normal file
|
@ -0,0 +1,52 @@
|
|||
package types
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestNewPaginationTokenFromString(t *testing.T) {
|
||||
shouldPass := map[string]PaginationToken{
|
||||
"2": PaginationToken{
|
||||
Type: PaginationTokenTypeStream,
|
||||
PDUPosition: 2,
|
||||
},
|
||||
"s4": PaginationToken{
|
||||
Type: PaginationTokenTypeStream,
|
||||
PDUPosition: 4,
|
||||
},
|
||||
"s3_1": PaginationToken{
|
||||
Type: PaginationTokenTypeStream,
|
||||
PDUPosition: 3,
|
||||
EDUTypingPosition: 1,
|
||||
},
|
||||
"t3_1_4": PaginationToken{
|
||||
Type: PaginationTokenTypeTopology,
|
||||
PDUPosition: 3,
|
||||
EDUTypingPosition: 1,
|
||||
},
|
||||
}
|
||||
|
||||
shouldFail := []string{
|
||||
"",
|
||||
"s_1",
|
||||
"s_",
|
||||
"a3_4",
|
||||
"b",
|
||||
"b-1",
|
||||
"-4",
|
||||
}
|
||||
|
||||
for test, expected := range shouldPass {
|
||||
result, err := NewPaginationTokenFromString(test)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if *result != expected {
|
||||
t.Errorf("expected %v but got %v", expected.String(), result.String())
|
||||
}
|
||||
}
|
||||
|
||||
for _, test := range shouldFail {
|
||||
if _, err := NewPaginationTokenFromString(test); err == nil {
|
||||
t.Errorf("input '%v' should have errored but didn't", test)
|
||||
}
|
||||
}
|
||||
}
|
18
sytest-blacklist
Normal file
18
sytest-blacklist
Normal file
|
@ -0,0 +1,18 @@
|
|||
# Blacklisted due to flakiness
|
||||
Remote users can join room by alias
|
||||
|
||||
# Blacklisted due to flakiness
|
||||
POST /login can log in as a user with just the local part of the id
|
||||
|
||||
# Blacklisted due to flakiness
|
||||
avatar_url updates affect room member events
|
||||
|
||||
# Blacklisted due to flakiness
|
||||
Room members can override their displayname on a room-specific basis
|
||||
|
||||
# Blacklisted due to flakiness
|
||||
Alias creators can delete alias with no ops
|
||||
|
||||
# Blacklisted because matrix-org/dendrite#847 might have broken it but we're not
|
||||
# really sure and we need it pretty badly anyway
|
||||
Real non-joined users can get individual state for world_readable rooms after leaving
|
|
@ -17,10 +17,6 @@ POST /register rejects registration of usernames with 'é'
|
|||
POST /register rejects registration of usernames with '\n'
|
||||
POST /register rejects registration of usernames with '''
|
||||
GET /login yields a set of flows
|
||||
POST /login can log in as a user
|
||||
POST /login can log in as a user with just the local part of the id
|
||||
POST /login as non-existing user is rejected
|
||||
POST /login wrong password is rejected
|
||||
GET /events initially
|
||||
GET /initialSync initially
|
||||
Version responds 200 OK with valid structure
|
||||
|
@ -55,7 +51,6 @@ PUT power_levels should not explode if the old power levels were empty
|
|||
Both GET and PUT work
|
||||
POST /rooms/:room_id/read_markers can create read marker
|
||||
User signups are forbidden from starting with '_'
|
||||
Can logout all devices
|
||||
Request to logout with invalid an access token is rejected
|
||||
Request to logout without an access token is rejected
|
||||
Room creation reports m.room.create to myself
|
||||
|
@ -71,17 +66,15 @@ Can get rooms/{roomId}/members for a departed room (SPEC-216)
|
|||
3pid invite join with wrong but valid signature are rejected
|
||||
3pid invite join valid signature but revoked keys are rejected
|
||||
3pid invite join valid signature but unreachable ID server are rejected
|
||||
Room members can override their displayname on a room-specific basis
|
||||
Room members can join a room with an overridden displayname
|
||||
displayname updates affect room member events
|
||||
avatar_url updates affect room member events
|
||||
Real non-joined user cannot call /events on shared room
|
||||
Real non-joined user cannot call /events on invited room
|
||||
Real non-joined user cannot call /events on joined room
|
||||
Real non-joined user cannot call /events on default room
|
||||
Real non-joined users can get state for world_readable rooms
|
||||
Real non-joined users can get individual state for world_readable rooms
|
||||
Real non-joined users can get individual state for world_readable rooms after leaving
|
||||
#Real non-joined users can get individual state for world_readable rooms after leaving
|
||||
Real non-joined users cannot send messages to guest_access rooms if not joined
|
||||
Real users can sync from world_readable guest_access rooms if joined
|
||||
Real users can sync from default guest_access rooms if joined
|
||||
|
@ -159,7 +152,6 @@ Typing events appear in gapped sync
|
|||
Inbound federation of state requires event_id as a mandatory paramater
|
||||
Inbound federation of state_ids requires event_id as a mandatory paramater
|
||||
POST /register returns the same device_id as that in the request
|
||||
POST /login returns the same device_id as that in the request
|
||||
POST /createRoom with creation content
|
||||
User can create and send/receive messages in a room with version 1
|
||||
POST /createRoom ignores attempts to set the room version via creation_content
|
||||
|
@ -191,9 +183,30 @@ GET /directory/room/:room_alias yields room ID
|
|||
PUT /directory/room/:room_alias creates alias
|
||||
Room aliases can contain Unicode
|
||||
Creators can delete alias
|
||||
Alias creators can delete alias with no ops
|
||||
Alias creators can delete canonical alias with no ops
|
||||
Regular users cannot create room aliases within the AS namespace
|
||||
Deleting a non-existent alias should return a 404
|
||||
Users can't delete other's aliases
|
||||
Outbound federation can query room alias directory
|
||||
After deactivating account, can't log in with an email
|
||||
Remote room alias queries can handle Unicode
|
||||
Newly joined room is included in an incremental sync after invite
|
||||
Inbound /v1/make_join rejects remote attempts to join local users to rooms
|
||||
Inbound federation rejects invites which are not signed by the sender
|
||||
Local room members see posted message events
|
||||
Fetching eventstream a second time doesn't yield the message again
|
||||
Local non-members don't see posted message events
|
||||
Remote room members also see posted message events
|
||||
Lazy loading parameters in the filter are strictly boolean
|
||||
remote user can join room with version 1
|
||||
remote user can join room with version 2
|
||||
remote user can join room with version 3
|
||||
remote user can join room with version 4
|
||||
remote user can join room with version 5
|
||||
Inbound federation can query room alias directory
|
||||
Outbound federation can query v2 /send_join
|
||||
Inbound federation can receive v2 /send_join
|
||||
Message history can be paginated
|
||||
Getting messages going forward is limited for a departed room (SPEC-216)
|
||||
m.room.history_visibility == "world_readable" allows/forbids appropriately for Real users
|
||||
Backfill works correctly with history visibility set to joined
|
Loading…
Reference in a new issue