Merge branch 'matrix-org:master' into master

This commit is contained in:
davidli18 2021-05-17 10:03:54 +08:00
commit 1ba6606bbc
86 changed files with 2937 additions and 785 deletions

View file

@ -0,0 +1,13 @@
#!/bin/sh
TARGET=""
while getopts "ai" option
do
case "$option"
in
a) gomobile bind -v -target android -trimpath -ldflags="-s -w" github.com/matrix-org/dendrite/build/gobind-pinecone ;;
i) gomobile bind -v -target ios -trimpath -ldflags="-s -w" github.com/matrix-org/dendrite/build/gobind-pinecone ;;
*) echo "No target specified, specify -a or -i"; exit 1 ;;
esac
done

View file

@ -0,0 +1,418 @@
package gobind
import (
"context"
"crypto/ed25519"
"crypto/rand"
"crypto/tls"
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"log"
"math"
"net"
"net/http"
"os"
"sync"
"time"
"github.com/gorilla/mux"
"github.com/matrix-org/dendrite/appservice"
"github.com/matrix-org/dendrite/clientapi/userutil"
"github.com/matrix-org/dendrite/cmd/dendrite-demo-pinecone/conn"
"github.com/matrix-org/dendrite/cmd/dendrite-demo-pinecone/rooms"
"github.com/matrix-org/dendrite/cmd/dendrite-demo-yggdrasil/signing"
"github.com/matrix-org/dendrite/eduserver"
"github.com/matrix-org/dendrite/eduserver/cache"
"github.com/matrix-org/dendrite/federationsender"
"github.com/matrix-org/dendrite/federationsender/api"
"github.com/matrix-org/dendrite/internal/httputil"
"github.com/matrix-org/dendrite/keyserver"
"github.com/matrix-org/dendrite/roomserver"
"github.com/matrix-org/dendrite/setup"
"github.com/matrix-org/dendrite/setup/config"
"github.com/matrix-org/dendrite/setup/process"
"github.com/matrix-org/dendrite/userapi"
userapiAPI "github.com/matrix-org/dendrite/userapi/api"
"github.com/matrix-org/gomatrixserverlib"
"github.com/sirupsen/logrus"
"go.uber.org/atomic"
"golang.org/x/net/http2"
"golang.org/x/net/http2/h2c"
pineconeMulticast "github.com/matrix-org/pinecone/multicast"
pineconeRouter "github.com/matrix-org/pinecone/router"
pineconeSessions "github.com/matrix-org/pinecone/sessions"
"github.com/matrix-org/pinecone/types"
pineconeTypes "github.com/matrix-org/pinecone/types"
)
const (
PeerTypeRemote = pineconeRouter.PeerTypeRemote
PeerTypeMulticast = pineconeRouter.PeerTypeMulticast
PeerTypeBluetooth = pineconeRouter.PeerTypeBluetooth
)
type DendriteMonolith struct {
logger logrus.Logger
PineconeRouter *pineconeRouter.Router
PineconeMulticast *pineconeMulticast.Multicast
PineconeQUIC *pineconeSessions.Sessions
StorageDirectory string
CacheDirectory string
staticPeerURI string
staticPeerMutex sync.RWMutex
staticPeerAttempts atomic.Uint32
listener net.Listener
httpServer *http.Server
processContext *process.ProcessContext
userAPI userapiAPI.UserInternalAPI
}
func (m *DendriteMonolith) BaseURL() string {
return fmt.Sprintf("http://%s", m.listener.Addr().String())
}
func (m *DendriteMonolith) PeerCount(peertype int) int {
return m.PineconeRouter.PeerCount(peertype)
}
func (m *DendriteMonolith) SessionCount() int {
return len(m.PineconeQUIC.Sessions())
}
func (m *DendriteMonolith) SetMulticastEnabled(enabled bool) {
if enabled {
m.PineconeMulticast.Start()
} else {
m.PineconeMulticast.Stop()
m.DisconnectType(pineconeRouter.PeerTypeMulticast)
}
}
func (m *DendriteMonolith) SetStaticPeer(uri string) {
m.staticPeerMutex.Lock()
m.staticPeerURI = uri
m.staticPeerMutex.Unlock()
m.DisconnectType(pineconeRouter.PeerTypeRemote)
if uri != "" {
m.staticPeerConnect()
}
}
func (m *DendriteMonolith) DisconnectType(peertype int) {
for _, p := range m.PineconeRouter.Peers() {
if peertype == p.PeerType {
_ = m.PineconeRouter.Disconnect(types.SwitchPortID(p.Port), nil)
}
}
}
func (m *DendriteMonolith) DisconnectZone(zone string) {
for _, p := range m.PineconeRouter.Peers() {
if zone == p.Zone {
_ = m.PineconeRouter.Disconnect(types.SwitchPortID(p.Port), nil)
}
}
}
func (m *DendriteMonolith) DisconnectPort(port int) error {
return m.PineconeRouter.Disconnect(types.SwitchPortID(port), nil)
}
func (m *DendriteMonolith) Conduit(zone string, peertype int) (*Conduit, error) {
l, r := net.Pipe()
conduit := &Conduit{conn: r, port: 0}
go func() {
conduit.portMutex.Lock()
defer conduit.portMutex.Unlock()
loop:
for i := 1; i <= 10; i++ {
logrus.Errorf("Attempting authenticated connect (attempt %d)", i)
var err error
conduit.port, err = m.PineconeRouter.AuthenticatedConnect(l, zone, peertype)
switch err {
case io.ErrClosedPipe:
logrus.Errorf("Authenticated connect failed due to closed pipe (attempt %d)", i)
return
case io.EOF:
logrus.Errorf("Authenticated connect failed due to EOF (attempt %d)", i)
break loop
case nil:
logrus.Errorf("Authenticated connect succeeded, connected to port %d (attempt %d)", conduit.port, i)
return
default:
logrus.WithError(err).Errorf("Authenticated connect failed (attempt %d)", i)
time.Sleep(time.Second)
}
}
_ = l.Close()
_ = r.Close()
}()
return conduit, nil
}
func (m *DendriteMonolith) RegisterUser(localpart, password string) (string, error) {
pubkey := m.PineconeRouter.PublicKey()
userID := userutil.MakeUserID(
localpart,
gomatrixserverlib.ServerName(hex.EncodeToString(pubkey[:])),
)
userReq := &userapiAPI.PerformAccountCreationRequest{
AccountType: userapiAPI.AccountTypeUser,
Localpart: localpart,
Password: password,
}
userRes := &userapiAPI.PerformAccountCreationResponse{}
if err := m.userAPI.PerformAccountCreation(context.Background(), userReq, userRes); err != nil {
return userID, fmt.Errorf("userAPI.PerformAccountCreation: %w", err)
}
return userID, nil
}
func (m *DendriteMonolith) RegisterDevice(localpart, deviceID string) (string, error) {
accessTokenBytes := make([]byte, 16)
n, err := rand.Read(accessTokenBytes)
if err != nil {
return "", fmt.Errorf("rand.Read: %w", err)
}
loginReq := &userapiAPI.PerformDeviceCreationRequest{
Localpart: localpart,
DeviceID: &deviceID,
AccessToken: hex.EncodeToString(accessTokenBytes[:n]),
}
loginRes := &userapiAPI.PerformDeviceCreationResponse{}
if err := m.userAPI.PerformDeviceCreation(context.Background(), loginReq, loginRes); err != nil {
return "", fmt.Errorf("userAPI.PerformDeviceCreation: %w", err)
}
if !loginRes.DeviceCreated {
return "", fmt.Errorf("device was not created")
}
return loginRes.Device.AccessToken, nil
}
func (m *DendriteMonolith) staticPeerConnect() {
m.staticPeerMutex.RLock()
uri := m.staticPeerURI
m.staticPeerMutex.RUnlock()
if uri == "" {
return
}
if err := conn.ConnectToPeer(m.PineconeRouter, uri); err != nil {
exp := time.Second * time.Duration(math.Exp2(float64(m.staticPeerAttempts.Inc())))
time.AfterFunc(exp, m.staticPeerConnect)
} else {
m.staticPeerAttempts.Store(0)
}
}
// nolint:gocyclo
func (m *DendriteMonolith) Start() {
var err error
var sk ed25519.PrivateKey
var pk ed25519.PublicKey
keyfile := fmt.Sprintf("%s/p2p.key", m.StorageDirectory)
if _, err = os.Stat(keyfile); os.IsNotExist(err) {
if pk, sk, err = ed25519.GenerateKey(nil); err != nil {
panic(err)
}
if err = ioutil.WriteFile(keyfile, sk, 0644); err != nil {
panic(err)
}
} else if err == nil {
if sk, err = ioutil.ReadFile(keyfile); err != nil {
panic(err)
}
if len(sk) != ed25519.PrivateKeySize {
panic("the private key is not long enough")
}
pk = sk.Public().(ed25519.PublicKey)
}
m.listener, err = net.Listen("tcp", "localhost:65432")
if err != nil {
panic(err)
}
m.logger = logrus.Logger{
Out: BindLogger{},
}
m.logger.SetOutput(BindLogger{})
logrus.SetOutput(BindLogger{})
logger := log.New(os.Stdout, "PINECONE: ", 0)
m.PineconeRouter = pineconeRouter.NewRouter(logger, "dendrite", sk, pk, nil)
m.PineconeQUIC = pineconeSessions.NewSessions(logger, m.PineconeRouter)
m.PineconeMulticast = pineconeMulticast.NewMulticast(logger, m.PineconeRouter)
m.PineconeRouter.SetDisconnectedCallback(func(port pineconeTypes.SwitchPortID, public pineconeTypes.PublicKey, peertype int, err error) {
if peertype == pineconeRouter.PeerTypeRemote {
m.staticPeerAttempts.Store(0)
time.AfterFunc(time.Second, m.staticPeerConnect)
}
})
prefix := hex.EncodeToString(pk)
cfg := &config.Dendrite{}
cfg.Defaults()
cfg.Global.ServerName = gomatrixserverlib.ServerName(hex.EncodeToString(pk))
cfg.Global.PrivateKey = sk
cfg.Global.KeyID = gomatrixserverlib.KeyID(signing.KeyID)
cfg.Global.Kafka.UseNaffka = true
cfg.Global.Kafka.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-naffka.db", m.StorageDirectory, prefix))
cfg.UserAPI.AccountDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-account.db", m.StorageDirectory, prefix))
cfg.UserAPI.DeviceDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-device.db", m.StorageDirectory, prefix))
cfg.MediaAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-mediaapi.db", m.CacheDirectory, prefix))
cfg.SyncAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-syncapi.db", m.StorageDirectory, prefix))
cfg.RoomServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-roomserver.db", m.StorageDirectory, prefix))
cfg.SigningKeyServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-signingkeyserver.db", m.StorageDirectory, prefix))
cfg.KeyServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-keyserver.db", m.StorageDirectory, prefix))
cfg.FederationSender.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-federationsender.db", m.StorageDirectory, prefix))
cfg.AppServiceAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-appservice.db", m.StorageDirectory, prefix))
cfg.MediaAPI.BasePath = config.Path(fmt.Sprintf("%s/media", m.CacheDirectory))
cfg.MediaAPI.AbsBasePath = config.Path(fmt.Sprintf("%s/media", m.CacheDirectory))
if err := cfg.Derive(); err != nil {
panic(err)
}
base := setup.NewBaseDendrite(cfg, "Monolith", false)
defer base.Close() // nolint: errcheck
accountDB := base.CreateAccountsDB()
federation := conn.CreateFederationClient(base, m.PineconeQUIC)
serverKeyAPI := &signing.YggdrasilKeys{}
keyRing := serverKeyAPI.KeyRing()
rsAPI := roomserver.NewInternalAPI(
base, keyRing,
)
fsAPI := federationsender.NewInternalAPI(
base, federation, rsAPI, keyRing,
)
keyAPI := keyserver.NewInternalAPI(&base.Cfg.KeyServer, fsAPI)
m.userAPI = userapi.NewInternalAPI(accountDB, &cfg.UserAPI, cfg.Derived.ApplicationServices, keyAPI)
keyAPI.SetUserAPI(m.userAPI)
eduInputAPI := eduserver.NewInternalAPI(
base, cache.New(), m.userAPI,
)
asAPI := appservice.NewInternalAPI(base, m.userAPI, rsAPI)
// The underlying roomserver implementation needs to be able to call the fedsender.
// This is different to rsAPI which can be the http client which doesn't need this dependency
rsAPI.SetFederationSenderAPI(fsAPI)
monolith := setup.Monolith{
Config: base.Cfg,
AccountDB: accountDB,
Client: conn.CreateClient(base, m.PineconeQUIC),
FedClient: federation,
KeyRing: keyRing,
AppserviceAPI: asAPI,
EDUInternalAPI: eduInputAPI,
FederationSenderAPI: fsAPI,
RoomserverAPI: rsAPI,
UserAPI: m.userAPI,
KeyAPI: keyAPI,
ExtPublicRoomsProvider: rooms.NewPineconeRoomProvider(m.PineconeRouter, m.PineconeQUIC, fsAPI, federation),
}
monolith.AddAllPublicRoutes(
base.ProcessContext,
base.PublicClientAPIMux,
base.PublicFederationAPIMux,
base.PublicKeyAPIMux,
base.PublicMediaAPIMux,
)
httpRouter := mux.NewRouter().SkipClean(true).UseEncodedPath()
httpRouter.PathPrefix(httputil.InternalPathPrefix).Handler(base.InternalAPIMux)
httpRouter.PathPrefix(httputil.PublicClientPathPrefix).Handler(base.PublicClientAPIMux)
httpRouter.PathPrefix(httputil.PublicMediaPathPrefix).Handler(base.PublicMediaAPIMux)
pMux := mux.NewRouter().SkipClean(true).UseEncodedPath()
pMux.PathPrefix(httputil.PublicFederationPathPrefix).Handler(base.PublicFederationAPIMux)
pMux.PathPrefix(httputil.PublicMediaPathPrefix).Handler(base.PublicMediaAPIMux)
pHTTP := m.PineconeQUIC.HTTP()
pHTTP.Mux().Handle(httputil.PublicFederationPathPrefix, pMux)
pHTTP.Mux().Handle(httputil.PublicMediaPathPrefix, pMux)
// Build both ends of a HTTP multiplex.
h2s := &http2.Server{}
m.httpServer = &http.Server{
Addr: ":0",
TLSNextProto: map[string]func(*http.Server, *tls.Conn, http.Handler){},
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
IdleTimeout: 30 * time.Second,
BaseContext: func(_ net.Listener) context.Context {
return context.Background()
},
Handler: h2c.NewHandler(pMux, h2s),
}
m.processContext = base.ProcessContext
go func() {
m.logger.Info("Listening on ", cfg.Global.ServerName)
m.logger.Fatal(m.httpServer.Serve(m.PineconeQUIC))
}()
go func() {
logrus.Info("Listening on ", m.listener.Addr())
logrus.Fatal(http.Serve(m.listener, httpRouter))
}()
go func() {
logrus.Info("Sending wake-up message to known nodes")
req := &api.PerformBroadcastEDURequest{}
res := &api.PerformBroadcastEDUResponse{}
if err := fsAPI.PerformBroadcastEDU(context.TODO(), req, res); err != nil {
logrus.WithError(err).Error("Failed to send wake-up message to known nodes")
}
}()
}
func (m *DendriteMonolith) Stop() {
_ = m.listener.Close()
m.PineconeMulticast.Stop()
_ = m.PineconeQUIC.Close()
m.processContext.ShutdownDendrite()
_ = m.PineconeRouter.Close()
}
type Conduit struct {
conn net.Conn
port types.SwitchPortID
portMutex sync.Mutex
}
func (c *Conduit) Port() int {
c.portMutex.Lock()
defer c.portMutex.Unlock()
return int(c.port)
}
func (c *Conduit) Read(b []byte) (int, error) {
return c.conn.Read(b)
}
func (c *Conduit) ReadCopy() ([]byte, error) {
var buf [65535 * 2]byte
n, err := c.conn.Read(buf[:])
if err != nil {
return nil, err
}
return buf[:n], nil
}
func (c *Conduit) Write(b []byte) (int, error) {
return c.conn.Write(b)
}
func (c *Conduit) Close() error {
return c.conn.Close()
}

View file

@ -0,0 +1,25 @@
#!/bin/sh
#!/bin/sh
TARGET=""
while getopts "ai" option
do
case "$option"
in
a) TARGET="android";;
i) TARGET="ios";;
esac
done
if [[ $TARGET = "" ]];
then
echo "No target specified, specify -a or -i"
exit 1
fi
gomobile bind -v \
-target $TARGET \
-ldflags "-X github.com/yggdrasil-network/yggdrasil-go/src/version.buildName=dendrite" \
github.com/matrix-org/dendrite/build/gobind-pinecone

View file

@ -0,0 +1,25 @@
// +build ios
package gobind
/*
#cgo CFLAGS: -x objective-c
#cgo LDFLAGS: -framework Foundation
#import <Foundation/Foundation.h>
void Log(const char *text) {
NSString *nss = [NSString stringWithUTF8String:text];
NSLog(@"%@", nss);
}
*/
import "C"
import "unsafe"
type BindLogger struct {
}
func (nsl BindLogger) Write(p []byte) (n int, err error) {
p = append(p, 0)
cstr := (*C.char)(unsafe.Pointer(&p[0]))
C.Log(cstr)
return len(p), nil
}

View file

@ -0,0 +1,12 @@
// +build !ios
package gobind
import "log"
type BindLogger struct{}
func (nsl BindLogger) Write(p []byte) (n int, err error) {
log.Println(string(p))
return len(p), nil
}

View file

@ -1,6 +0,0 @@
#!/bin/sh
gomobile bind -v \
-ldflags "-X github.com/yggdrasil-network/yggdrasil-go/src/version.buildName=dendrite" \
-target ios \
github.com/matrix-org/dendrite/build/gobind

View file

@ -0,0 +1,70 @@
// Copyright 2021 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package routing
import (
"net/http"
"github.com/matrix-org/dendrite/clientapi/jsonerror"
"github.com/matrix-org/dendrite/setup/config"
"github.com/matrix-org/dendrite/userapi/api"
"github.com/matrix-org/util"
)
type openIDTokenResponse struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
MatrixServerName string `json:"matrix_server_name"`
ExpiresIn int64 `json:"expires_in"`
}
// CreateOpenIDToken creates a new OpenID Connect (OIDC) token that a Matrix user
// can supply to an OpenID Relying Party to verify their identity
func CreateOpenIDToken(
req *http.Request,
userAPI api.UserInternalAPI,
device *api.Device,
userID string,
cfg *config.ClientAPI,
) util.JSONResponse {
// does the incoming user ID match the user that the token was issued for?
if userID != device.UserID {
return util.JSONResponse{
Code: http.StatusForbidden,
JSON: jsonerror.Forbidden("Cannot request tokens for other users"),
}
}
request := api.PerformOpenIDTokenCreationRequest{
UserID: userID, // this is the user ID from the incoming path
}
response := api.PerformOpenIDTokenCreationResponse{}
err := userAPI.PerformOpenIDTokenCreation(req.Context(), &request, &response)
if err != nil {
util.GetLogger(req.Context()).WithError(err).Error("userAPI.CreateOpenIDToken failed")
return jsonerror.InternalServerError()
}
return util.JSONResponse{
Code: http.StatusOK,
JSON: openIDTokenResponse{
AccessToken: response.Token.Token,
TokenType: "Bearer",
MatrixServerName: string(cfg.Matrix.ServerName),
ExpiresIn: response.Token.ExpiresAtMS / 1000, // convert ms to s
},
}
}

View file

@ -161,15 +161,6 @@ type userInteractiveResponse struct {
Session string `json:"session"`
}
// legacyRegisterRequest represents the submitted registration request for v1 API.
type legacyRegisterRequest struct {
Password string `json:"password"`
Username string `json:"user"`
Admin bool `json:"admin"`
Type authtypes.LoginType `json:"type"`
Mac gomatrixserverlib.HexString `json:"mac"`
}
// newUserInteractiveResponse will return a struct to be sent back to the client
// during registration.
func newUserInteractiveResponse(
@ -623,7 +614,10 @@ func handleRegistrationFlow(
}
if cfg.RegistrationDisabled && r.Auth.Type != authtypes.LoginTypeSharedSecret {
return util.MessageResponse(http.StatusForbidden, "Registration has been disabled")
return util.JSONResponse{
Code: http.StatusForbidden,
JSON: jsonerror.Forbidden("Registration is disabled"),
}
}
// Make sure normal user isn't registering under an exclusive application
@ -757,85 +751,6 @@ func checkAndCompleteFlow(
}
}
// LegacyRegister process register requests from the legacy v1 API
func LegacyRegister(
req *http.Request,
userAPI userapi.UserInternalAPI,
cfg *config.ClientAPI,
) util.JSONResponse {
var r legacyRegisterRequest
resErr := parseAndValidateLegacyLogin(req, &r)
if resErr != nil {
return *resErr
}
logger := util.GetLogger(req.Context())
logger.WithFields(log.Fields{
"username": r.Username,
"auth.type": r.Type,
}).Info("Processing registration request")
if cfg.RegistrationDisabled && r.Type != authtypes.LoginTypeSharedSecret {
return util.MessageResponse(http.StatusForbidden, "Registration has been disabled")
}
switch r.Type {
case authtypes.LoginTypeSharedSecret:
if cfg.RegistrationSharedSecret == "" {
return util.MessageResponse(http.StatusBadRequest, "Shared secret registration is disabled")
}
valid, err := isValidMacLogin(cfg, r.Username, r.Password, r.Admin, r.Mac)
if err != nil {
util.GetLogger(req.Context()).WithError(err).Error("isValidMacLogin failed")
return jsonerror.InternalServerError()
}
if !valid {
return util.MessageResponse(http.StatusForbidden, "HMAC incorrect")
}
return completeRegistration(req.Context(), userAPI, r.Username, r.Password, "", req.RemoteAddr, req.UserAgent(), false, nil, nil)
case authtypes.LoginTypeDummy:
// there is nothing to do
return completeRegistration(req.Context(), userAPI, r.Username, r.Password, "", req.RemoteAddr, req.UserAgent(), false, nil, nil)
default:
return util.JSONResponse{
Code: http.StatusNotImplemented,
JSON: jsonerror.Unknown("unknown/unimplemented auth type"),
}
}
}
// parseAndValidateLegacyLogin parses the request into r and checks that the
// request is valid (e.g. valid user names, etc)
func parseAndValidateLegacyLogin(req *http.Request, r *legacyRegisterRequest) *util.JSONResponse {
resErr := httputil.UnmarshalJSONRequest(req, &r)
if resErr != nil {
return resErr
}
// Squash username to all lowercase letters
r.Username = strings.ToLower(r.Username)
if resErr = validateUsername(r.Username); resErr != nil {
return resErr
}
if resErr = validatePassword(r.Password); resErr != nil {
return resErr
}
// All registration requests must specify what auth they are using to perform this request
if r.Type == "" {
return &util.JSONResponse{
Code: http.StatusBadRequest,
JSON: jsonerror.BadJSON("invalid type"),
}
}
return nil
}
// completeRegistration runs some rudimentary checks against the submitted
// input, then if successful creates an account and a newly associated device
// We pass in each individual part of the request here instead of just passing a

View file

@ -89,7 +89,6 @@ func Setup(
).Methods(http.MethodGet, http.MethodOptions)
r0mux := publicAPIMux.PathPrefix("/r0").Subrouter()
v1mux := publicAPIMux.PathPrefix("/api/v1").Subrouter()
unstableMux := publicAPIMux.PathPrefix("/unstable").Subrouter()
r0mux.Handle("/createRoom",
@ -306,13 +305,6 @@ func Setup(
return Register(req, userAPI, accountDB, cfg)
})).Methods(http.MethodPost, http.MethodOptions)
v1mux.Handle("/register", httputil.MakeExternalAPI("register", func(req *http.Request) util.JSONResponse {
if r := rateLimits.rateLimit(req); r != nil {
return *r
}
return LegacyRegister(req, userAPI, cfg)
})).Methods(http.MethodPost, http.MethodOptions)
r0mux.Handle("/register/available", httputil.MakeExternalAPI("registerAvailable", func(req *http.Request) util.JSONResponse {
if r := rateLimits.rateLimit(req); r != nil {
return *r
@ -469,7 +461,7 @@ func Setup(
}),
).Methods(http.MethodPost, http.MethodOptions)
// Stub endpoints required by Riot
// Stub endpoints required by Element
r0mux.Handle("/login",
httputil.MakeExternalAPI("login", func(req *http.Request) util.JSONResponse {
@ -506,7 +498,7 @@ func Setup(
}),
).Methods(http.MethodGet, http.MethodOptions)
// Riot user settings
// Element user settings
r0mux.Handle("/profile/{userID}",
httputil.MakeExternalAPI("profile", func(req *http.Request) util.JSONResponse {
@ -592,7 +584,7 @@ func Setup(
}),
).Methods(http.MethodPost, http.MethodOptions)
// Riot logs get flooded unless this is handled
// Element logs get flooded unless this is handled
r0mux.Handle("/presence/{userID}/status",
httputil.MakeExternalAPI("presence", func(req *http.Request) util.JSONResponse {
if r := rateLimits.rateLimit(req); r != nil {
@ -685,6 +677,19 @@ func Setup(
}),
).Methods(http.MethodGet)
r0mux.Handle("/user/{userID}/openid/request_token",
httputil.MakeAuthAPI("openid_request_token", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
if r := rateLimits.rateLimit(req); r != nil {
return *r
}
vars, err := httputil.URLDecodeMapValues(mux.Vars(req))
if err != nil {
return util.ErrorResponse(err)
}
return CreateOpenIDToken(req, userAPI, device, vars["userID"], cfg)
}),
).Methods(http.MethodPost, http.MethodOptions)
r0mux.Handle("/user_directory/search",
httputil.MakeAuthAPI("userdirectory_search", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
if r := rateLimits.rateLimit(req); r != nil {

View file

@ -58,7 +58,7 @@ func main() {
accountDB, err := accounts.NewDatabase(&config.DatabaseOptions{
ConnectionString: cfg.UserAPI.AccountDatabase.ConnectionString,
}, cfg.Global.ServerName, bcrypt.DefaultCost)
}, cfg.Global.ServerName, bcrypt.DefaultCost, cfg.UserAPI.OpenIDTokenLifetimeMS)
if err != nil {
logrus.Fatalln("Failed to connect to the database:", err.Error())
}

View file

@ -0,0 +1,81 @@
package conn
import (
"fmt"
"net"
"net/http"
"strings"
"github.com/gorilla/websocket"
"github.com/matrix-org/dendrite/setup"
"github.com/matrix-org/gomatrixserverlib"
pineconeRouter "github.com/matrix-org/pinecone/router"
pineconeSessions "github.com/matrix-org/pinecone/sessions"
)
func ConnectToPeer(pRouter *pineconeRouter.Router, peer string) error {
var parent net.Conn
if strings.HasPrefix(peer, "ws://") || strings.HasPrefix(peer, "wss://") {
c, _, err := websocket.DefaultDialer.Dial(peer, nil)
if err != nil {
return fmt.Errorf("websocket.DefaultDialer.Dial: %w", err)
}
parent = WrapWebSocketConn(c)
} else {
var err error
parent, err = net.Dial("tcp", peer)
if err != nil {
return fmt.Errorf("net.Dial: %w", err)
}
}
if parent == nil {
return fmt.Errorf("failed to wrap connection")
}
_, err := pRouter.AuthenticatedConnect(parent, "static", pineconeRouter.PeerTypeRemote)
return err
}
type RoundTripper struct {
inner *http.Transport
}
func (y *RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
req.URL.Scheme = "http"
return y.inner.RoundTrip(req)
}
func createTransport(s *pineconeSessions.Sessions) *http.Transport {
tr := &http.Transport{}
tr.RegisterProtocol(
"matrix", &RoundTripper{
inner: &http.Transport{
DisableKeepAlives: false,
Dial: s.Dial,
DialContext: s.DialContext,
DialTLS: s.DialTLS,
DialTLSContext: s.DialTLSContext,
},
},
)
return tr
}
func CreateClient(
base *setup.BaseDendrite, s *pineconeSessions.Sessions,
) *gomatrixserverlib.Client {
return gomatrixserverlib.NewClient(
gomatrixserverlib.WithTransport(createTransport(s)),
)
}
func CreateFederationClient(
base *setup.BaseDendrite, s *pineconeSessions.Sessions,
) *gomatrixserverlib.FederationClient {
return gomatrixserverlib.NewFederationClient(
base.Cfg.Global.ServerName,
base.Cfg.Global.KeyID,
base.Cfg.Global.PrivateKey,
gomatrixserverlib.WithTransport(createTransport(s)),
)
}

View file

@ -0,0 +1,81 @@
package conn
import (
"io"
"net"
"time"
"github.com/gorilla/websocket"
)
func WrapWebSocketConn(c *websocket.Conn) *WebSocketConn {
return &WebSocketConn{c: c}
}
type WebSocketConn struct {
r io.Reader
c *websocket.Conn
}
func (c *WebSocketConn) Write(p []byte) (int, error) {
err := c.c.WriteMessage(websocket.BinaryMessage, p)
if err != nil {
return 0, err
}
return len(p), nil
}
func (c *WebSocketConn) Read(p []byte) (int, error) {
for {
if c.r == nil {
// Advance to next message.
var err error
_, c.r, err = c.c.NextReader()
if err != nil {
return 0, err
}
}
n, err := c.r.Read(p)
if err == io.EOF {
// At end of message.
c.r = nil
if n > 0 {
return n, nil
} else {
// No data read, continue to next message.
continue
}
}
return n, err
}
}
func (c *WebSocketConn) Close() error {
return c.c.Close()
}
func (c *WebSocketConn) LocalAddr() net.Addr {
return c.c.LocalAddr()
}
func (c *WebSocketConn) RemoteAddr() net.Addr {
return c.c.RemoteAddr()
}
func (c *WebSocketConn) SetDeadline(t time.Time) error {
if err := c.SetReadDeadline(t); err != nil {
return err
}
if err := c.SetWriteDeadline(t); err != nil {
return err
}
return nil
}
func (c *WebSocketConn) SetReadDeadline(t time.Time) error {
return c.c.SetReadDeadline(t)
}
func (c *WebSocketConn) SetWriteDeadline(t time.Time) error {
return c.c.SetWriteDeadline(t)
}

View file

@ -0,0 +1,9 @@
// +build !riotweb
package embed
import "github.com/gorilla/mux"
func Embed(_ *mux.Router, _ int, _ string) {
}

View file

@ -0,0 +1,83 @@
// +build riotweb
package embed
import (
"fmt"
"io"
"net/http"
"regexp"
"github.com/gorilla/mux"
"github.com/tidwall/sjson"
)
// From within the Riot Web directory:
// go run github.com/mjibson/esc -o /path/to/dendrite/internal/embed/fs_riotweb.go -private -pkg embed .
var cssFile = regexp.MustCompile("\\.css$")
var jsFile = regexp.MustCompile("\\.js$")
type mimeFixingHandler struct {
fs http.Handler
}
func (h mimeFixingHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ruri := r.RequestURI
fmt.Println(ruri)
switch {
case cssFile.MatchString(ruri):
w.Header().Set("Content-Type", "text/css")
case jsFile.MatchString(ruri):
w.Header().Set("Content-Type", "application/javascript")
default:
}
h.fs.ServeHTTP(w, r)
}
func Embed(rootMux *mux.Router, listenPort int, serverName string) {
embeddedFS := _escFS(false)
embeddedServ := mimeFixingHandler{http.FileServer(embeddedFS)}
rootMux.NotFoundHandler = embeddedServ
rootMux.HandleFunc("/config.json", func(w http.ResponseWriter, r *http.Request) {
url := fmt.Sprintf("http://%s:%d", r.Header("Host"), listenPort)
configFile, err := embeddedFS.Open("/config.sample.json")
if err != nil {
w.WriteHeader(500)
io.WriteString(w, "Couldn't open the file: "+err.Error())
return
}
configFileInfo, err := configFile.Stat()
if err != nil {
w.WriteHeader(500)
io.WriteString(w, "Couldn't stat the file: "+err.Error())
return
}
buf := make([]byte, configFileInfo.Size())
n, err := configFile.Read(buf)
if err != nil {
w.WriteHeader(500)
io.WriteString(w, "Couldn't read the file: "+err.Error())
return
}
if int64(n) != configFileInfo.Size() {
w.WriteHeader(500)
io.WriteString(w, "The returned file size didn't match what we expected")
return
}
js, _ := sjson.SetBytes(buf, "default_server_config.m\\.homeserver.base_url", url)
js, _ = sjson.SetBytes(js, "default_server_config.m\\.homeserver.server_name", serverName)
js, _ = sjson.SetBytes(js, "brand", fmt.Sprintf("Riot %s", serverName))
js, _ = sjson.SetBytes(js, "disable_guests", true)
js, _ = sjson.SetBytes(js, "disable_3pid_login", true)
js, _ = sjson.DeleteBytes(js, "welcomeUserId")
_, _ = w.Write(js)
})
fmt.Println("*-------------------------------*")
fmt.Println("| This build includes Riot Web! |")
fmt.Println("*-------------------------------*")
fmt.Println("Point your browser to:", url)
fmt.Println()
}

View file

@ -0,0 +1,279 @@
// Copyright 2020 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"crypto/ed25519"
"crypto/tls"
"encoding/hex"
"flag"
"fmt"
"io/ioutil"
"log"
"math"
"net"
"net/http"
"os"
"time"
"github.com/gorilla/mux"
"github.com/gorilla/websocket"
"github.com/matrix-org/dendrite/appservice"
"github.com/matrix-org/dendrite/cmd/dendrite-demo-pinecone/conn"
"github.com/matrix-org/dendrite/cmd/dendrite-demo-pinecone/embed"
"github.com/matrix-org/dendrite/cmd/dendrite-demo-pinecone/rooms"
"github.com/matrix-org/dendrite/cmd/dendrite-demo-yggdrasil/signing"
"github.com/matrix-org/dendrite/eduserver"
"github.com/matrix-org/dendrite/eduserver/cache"
"github.com/matrix-org/dendrite/federationsender"
"github.com/matrix-org/dendrite/federationsender/api"
"github.com/matrix-org/dendrite/internal"
"github.com/matrix-org/dendrite/internal/httputil"
"github.com/matrix-org/dendrite/keyserver"
"github.com/matrix-org/dendrite/roomserver"
"github.com/matrix-org/dendrite/setup"
"github.com/matrix-org/dendrite/setup/config"
"github.com/matrix-org/dendrite/userapi"
"github.com/matrix-org/gomatrixserverlib"
"go.uber.org/atomic"
pineconeMulticast "github.com/matrix-org/pinecone/multicast"
pineconeRouter "github.com/matrix-org/pinecone/router"
pineconeSessions "github.com/matrix-org/pinecone/sessions"
pineconeTypes "github.com/matrix-org/pinecone/types"
"github.com/sirupsen/logrus"
)
var (
instanceName = flag.String("name", "dendrite-p2p-pinecone", "the name of this P2P demo instance")
instancePort = flag.Int("port", 8008, "the port that the client API will listen on")
instancePeer = flag.String("peer", "", "the static Pinecone peer to connect to")
instanceListen = flag.String("listen", ":0", "the port Pinecone peers can connect to")
)
// nolint:gocyclo
func main() {
flag.Parse()
internal.SetupPprof()
var pk ed25519.PublicKey
var sk ed25519.PrivateKey
keyfile := *instanceName + ".key"
if _, err := os.Stat(keyfile); os.IsNotExist(err) {
if pk, sk, err = ed25519.GenerateKey(nil); err != nil {
panic(err)
}
if err = ioutil.WriteFile(keyfile, sk, 0644); err != nil {
panic(err)
}
} else if err == nil {
if sk, err = ioutil.ReadFile(keyfile); err != nil {
panic(err)
}
if len(sk) != ed25519.PrivateKeySize {
panic("the private key is not long enough")
}
pk = sk.Public().(ed25519.PublicKey)
}
logger := log.New(os.Stdout, "", 0)
pRouter := pineconeRouter.NewRouter(logger, "dendrite", sk, pk, nil)
go func() {
listener, err := net.Listen("tcp", *instanceListen)
if err != nil {
panic(err)
}
fmt.Println("Listening on", listener.Addr())
for {
conn, err := listener.Accept()
if err != nil {
logrus.WithError(err).Error("listener.Accept failed")
continue
}
port, err := pRouter.AuthenticatedConnect(conn, "", pineconeRouter.PeerTypeRemote)
if err != nil {
logrus.WithError(err).Error("pSwitch.AuthenticatedConnect failed")
continue
}
fmt.Println("Inbound connection", conn.RemoteAddr(), "is connected to port", port)
}
}()
pQUIC := pineconeSessions.NewSessions(logger, pRouter)
pMulticast := pineconeMulticast.NewMulticast(logger, pRouter)
pMulticast.Start()
var staticPeerAttempts atomic.Uint32
var connectToStaticPeer func()
connectToStaticPeer = func() {
uri := *instancePeer
if uri == "" {
return
}
if err := conn.ConnectToPeer(pRouter, uri); err != nil {
exp := time.Second * time.Duration(math.Exp2(float64(staticPeerAttempts.Inc())))
time.AfterFunc(exp, connectToStaticPeer)
} else {
staticPeerAttempts.Store(0)
}
}
pRouter.SetDisconnectedCallback(func(port pineconeTypes.SwitchPortID, public pineconeTypes.PublicKey, peertype int, err error) {
if peertype == pineconeRouter.PeerTypeRemote && err != nil {
staticPeerAttempts.Store(0)
time.AfterFunc(time.Second, connectToStaticPeer)
}
})
go connectToStaticPeer()
cfg := &config.Dendrite{}
cfg.Defaults()
cfg.Global.ServerName = gomatrixserverlib.ServerName(hex.EncodeToString(pk))
cfg.Global.PrivateKey = sk
cfg.Global.KeyID = gomatrixserverlib.KeyID(signing.KeyID)
cfg.Global.Kafka.UseNaffka = true
cfg.UserAPI.AccountDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-account.db", *instanceName))
cfg.UserAPI.DeviceDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-device.db", *instanceName))
cfg.MediaAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-mediaapi.db", *instanceName))
cfg.SyncAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-syncapi.db", *instanceName))
cfg.RoomServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-roomserver.db", *instanceName))
cfg.SigningKeyServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-signingkeyserver.db", *instanceName))
cfg.KeyServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-keyserver.db", *instanceName))
cfg.FederationSender.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-federationsender.db", *instanceName))
cfg.AppServiceAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-appservice.db", *instanceName))
cfg.Global.Kafka.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-naffka.db", *instanceName))
if err := cfg.Derive(); err != nil {
panic(err)
}
base := setup.NewBaseDendrite(cfg, "Monolith", false)
defer base.Close() // nolint: errcheck
accountDB := base.CreateAccountsDB()
federation := conn.CreateFederationClient(base, pQUIC)
serverKeyAPI := &signing.YggdrasilKeys{}
keyRing := serverKeyAPI.KeyRing()
rsComponent := roomserver.NewInternalAPI(
base, keyRing,
)
rsAPI := rsComponent
fsAPI := federationsender.NewInternalAPI(
base, federation, rsAPI, keyRing,
)
keyAPI := keyserver.NewInternalAPI(&base.Cfg.KeyServer, fsAPI)
userAPI := userapi.NewInternalAPI(accountDB, &cfg.UserAPI, nil, keyAPI)
keyAPI.SetUserAPI(userAPI)
eduInputAPI := eduserver.NewInternalAPI(
base, cache.New(), userAPI,
)
asAPI := appservice.NewInternalAPI(base, userAPI, rsAPI)
rsComponent.SetFederationSenderAPI(fsAPI)
monolith := setup.Monolith{
Config: base.Cfg,
AccountDB: accountDB,
Client: conn.CreateClient(base, pQUIC),
FedClient: federation,
KeyRing: keyRing,
AppserviceAPI: asAPI,
EDUInternalAPI: eduInputAPI,
FederationSenderAPI: fsAPI,
RoomserverAPI: rsAPI,
UserAPI: userAPI,
KeyAPI: keyAPI,
ExtPublicRoomsProvider: rooms.NewPineconeRoomProvider(pRouter, pQUIC, fsAPI, federation),
}
monolith.AddAllPublicRoutes(
base.ProcessContext,
base.PublicClientAPIMux,
base.PublicFederationAPIMux,
base.PublicKeyAPIMux,
base.PublicMediaAPIMux,
)
wsUpgrader := websocket.Upgrader{}
httpRouter := mux.NewRouter().SkipClean(true).UseEncodedPath()
httpRouter.PathPrefix(httputil.InternalPathPrefix).Handler(base.InternalAPIMux)
httpRouter.PathPrefix(httputil.PublicClientPathPrefix).Handler(base.PublicClientAPIMux)
httpRouter.PathPrefix(httputil.PublicMediaPathPrefix).Handler(base.PublicMediaAPIMux)
httpRouter.HandleFunc("/ws", func(w http.ResponseWriter, r *http.Request) {
c, err := wsUpgrader.Upgrade(w, r, nil)
if err != nil {
logrus.WithError(err).Error("Failed to upgrade WebSocket connection")
return
}
conn := conn.WrapWebSocketConn(c)
if _, err = pRouter.AuthenticatedConnect(conn, "websocket", pineconeRouter.PeerTypeRemote); err != nil {
logrus.WithError(err).Error("Failed to connect WebSocket peer to Pinecone switch")
}
})
embed.Embed(httpRouter, *instancePort, "Pinecone Demo")
pMux := mux.NewRouter().SkipClean(true).UseEncodedPath()
pMux.PathPrefix(httputil.PublicFederationPathPrefix).Handler(base.PublicFederationAPIMux)
pMux.PathPrefix(httputil.PublicMediaPathPrefix).Handler(base.PublicMediaAPIMux)
pHTTP := pQUIC.HTTP()
pHTTP.Mux().Handle(httputil.PublicFederationPathPrefix, pMux)
pHTTP.Mux().Handle(httputil.PublicMediaPathPrefix, pMux)
// Build both ends of a HTTP multiplex.
httpServer := &http.Server{
Addr: ":0",
TLSNextProto: map[string]func(*http.Server, *tls.Conn, http.Handler){},
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
IdleTimeout: 60 * time.Second,
BaseContext: func(_ net.Listener) context.Context {
return context.Background()
},
Handler: pMux,
}
go func() {
pubkey := pRouter.PublicKey()
logrus.Info("Listening on ", hex.EncodeToString(pubkey[:]))
logrus.Fatal(httpServer.Serve(pQUIC))
}()
go func() {
httpBindAddr := fmt.Sprintf(":%d", *instancePort)
logrus.Info("Listening on ", httpBindAddr)
logrus.Fatal(http.ListenAndServe(httpBindAddr, httpRouter))
}()
go func() {
logrus.Info("Sending wake-up message to known nodes")
req := &api.PerformBroadcastEDURequest{}
res := &api.PerformBroadcastEDUResponse{}
if err := fsAPI.PerformBroadcastEDU(context.TODO(), req, res); err != nil {
logrus.WithError(err).Error("Failed to send wake-up message to known nodes")
}
}()
base.WaitForShutdown()
}

View file

@ -0,0 +1,117 @@
// Copyright 2020 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rooms
import (
"context"
"sync"
"time"
"github.com/matrix-org/dendrite/federationsender/api"
"github.com/matrix-org/gomatrixserverlib"
"github.com/matrix-org/util"
pineconeRouter "github.com/matrix-org/pinecone/router"
pineconeSessions "github.com/matrix-org/pinecone/sessions"
)
type PineconeRoomProvider struct {
r *pineconeRouter.Router
s *pineconeSessions.Sessions
fedSender api.FederationSenderInternalAPI
fedClient *gomatrixserverlib.FederationClient
}
func NewPineconeRoomProvider(
r *pineconeRouter.Router,
s *pineconeSessions.Sessions,
fedSender api.FederationSenderInternalAPI,
fedClient *gomatrixserverlib.FederationClient,
) *PineconeRoomProvider {
p := &PineconeRoomProvider{
r: r,
s: s,
fedSender: fedSender,
fedClient: fedClient,
}
return p
}
func (p *PineconeRoomProvider) Rooms() []gomatrixserverlib.PublicRoom {
known := p.r.KnownNodes()
//known = append(known, p.s.Sessions()...)
list := []gomatrixserverlib.ServerName{}
for _, k := range known {
list = append(list, gomatrixserverlib.ServerName(k.String()))
}
return bulkFetchPublicRoomsFromServers(context.Background(), p.fedClient, list)
}
// bulkFetchPublicRoomsFromServers fetches public rooms from the list of homeservers.
// Returns a list of public rooms.
func bulkFetchPublicRoomsFromServers(
ctx context.Context, fedClient *gomatrixserverlib.FederationClient,
homeservers []gomatrixserverlib.ServerName,
) (publicRooms []gomatrixserverlib.PublicRoom) {
limit := 200
// follow pipeline semantics, see https://blog.golang.org/pipelines for more info.
// goroutines send rooms to this channel
roomCh := make(chan gomatrixserverlib.PublicRoom, int(limit))
// signalling channel to tell goroutines to stop sending rooms and quit
done := make(chan bool)
// signalling to say when we can close the room channel
var wg sync.WaitGroup
wg.Add(len(homeservers))
// concurrently query for public rooms
reqctx, reqcancel := context.WithTimeout(ctx, time.Second*5)
for _, hs := range homeservers {
go func(homeserverDomain gomatrixserverlib.ServerName) {
defer wg.Done()
util.GetLogger(reqctx).WithField("hs", homeserverDomain).Info("Querying HS for public rooms")
fres, err := fedClient.GetPublicRooms(reqctx, homeserverDomain, int(limit), "", false, "")
if err != nil {
util.GetLogger(reqctx).WithError(err).WithField("hs", homeserverDomain).Warn(
"bulkFetchPublicRoomsFromServers: failed to query hs",
)
return
}
for _, room := range fres.Chunk {
// atomically send a room or stop
select {
case roomCh <- room:
case <-done:
case <-reqctx.Done():
util.GetLogger(reqctx).WithError(err).WithField("hs", homeserverDomain).Info("Interrupted whilst sending rooms")
return
}
}
}(hs)
}
select {
case <-time.After(5 * time.Second):
default:
wg.Wait()
}
reqcancel()
close(done)
close(roomCh)
for room := range roomCh {
publicRooms = append(publicRooms, room)
}
return publicRooms
}

View file

@ -1,6 +1,6 @@
# Yggdrasil Demo
This is the Dendrite Yggdrasil demo! It's easy to get started - all you need is Go 1.13 or later.
This is the Dendrite Yggdrasil demo! It's easy to get started - all you need is Go 1.14 or later.
To run the homeserver, start at the root of the Dendrite repository and run:

View file

@ -248,7 +248,8 @@ media_api:
base_path: ./media_store
# The maximum allowed file size (in bytes) for media uploads to this homeserver
# (0 = unlimited).
# (0 = unlimited). If using a reverse proxy, ensure it allows requests at
# least this large (e.g. client_max_body_size in nginx.)
max_file_size_bytes: 10485760
# Whether to dynamically generate thumbnails if needed.
@ -360,6 +361,11 @@ user_api:
max_open_conns: 10
max_idle_conns: 2
conn_max_lifetime: -1
# The length of time that a token issued for a relying party from
# /_matrix/client/r0/user/{userId}/openid/request_token endpoint
# is considered to be valid in milliseconds.
# The default lifetime is 3600000ms (60 minutes).
# openid_token_lifetime_ms: 3600000
# Configuration for Opentracing.
# See https://github.com/matrix-org/dendrite/tree/master/docs/tracing for information on

View file

@ -25,7 +25,7 @@ use in production environments just yet!
Dendrite requires:
* Go 1.13 or higher
* Go 1.14 or higher
* Postgres 9.6 or higher (if using Postgres databases, not needed for SQLite)
If you want to run a polylith deployment, you also need:

View file

@ -0,0 +1,65 @@
// Copyright 2021 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package routing
import (
"net/http"
"time"
"github.com/matrix-org/dendrite/clientapi/jsonerror"
userapi "github.com/matrix-org/dendrite/userapi/api"
"github.com/matrix-org/util"
)
type openIDUserInfoResponse struct {
Sub string `json:"sub"`
}
// GetOpenIDUserInfo implements GET /_matrix/federation/v1/openid/userinfo
func GetOpenIDUserInfo(
httpReq *http.Request,
userAPI userapi.UserInternalAPI,
) util.JSONResponse {
token := httpReq.URL.Query().Get("access_token")
if len(token) == 0 {
return util.JSONResponse{
Code: http.StatusUnauthorized,
JSON: jsonerror.MissingArgument("access_token is missing"),
}
}
req := userapi.QueryOpenIDTokenRequest{
Token: token,
}
var openIDTokenAttrResponse userapi.QueryOpenIDTokenResponse
err := userAPI.QueryOpenIDToken(httpReq.Context(), &req, &openIDTokenAttrResponse)
if err != nil {
util.GetLogger(httpReq.Context()).WithError(err).Error("userAPI.QueryOpenIDToken failed")
}
var res interface{} = openIDUserInfoResponse{Sub: openIDTokenAttrResponse.Sub}
code := http.StatusOK
nowMS := time.Now().UnixNano() / int64(time.Millisecond)
if openIDTokenAttrResponse.Sub == "" || nowMS > openIDTokenAttrResponse.ExpiresAtMS {
code = http.StatusUnauthorized
res = jsonerror.UnknownToken("Access Token unknown or expired")
}
return util.JSONResponse{
Code: code,
JSON: res,
}
}

View file

@ -462,4 +462,10 @@ func Setup(
return QueryDeviceKeys(httpReq, request, keyAPI, cfg.Matrix.ServerName)
},
)).Methods(http.MethodPost)
v1fedmux.Handle("/openid/userinfo",
httputil.MakeExternalAPI("federation_openid_userinfo", func(req *http.Request) util.JSONResponse {
return GetOpenIDUserInfo(req, userAPI)
}),
).Methods(http.MethodGet)
}

View file

@ -498,8 +498,6 @@ func (t *txnReq) getServers(ctx context.Context, roomID string) []gomatrixserver
}
func (t *txnReq) processEvent(ctx context.Context, e *gomatrixserverlib.Event) error {
t.roomsMu.Lock(e.RoomID())
defer t.roomsMu.Unlock(e.RoomID())
logger := util.GetLogger(ctx).WithField("event_id", e.EventID()).WithField("room_id", e.RoomID())
t.work = "" // reset from previous event
@ -620,7 +618,9 @@ func checkAllowedByState(e *gomatrixserverlib.Event, stateEvents []*gomatrixserv
return gomatrixserverlib.Allowed(e, &authUsingState)
}
func (t *txnReq) processEventWithMissingState(ctx context.Context, e *gomatrixserverlib.Event, roomVersion gomatrixserverlib.RoomVersion) error {
func (t *txnReq) processEventWithMissingState(
ctx context.Context, e *gomatrixserverlib.Event, roomVersion gomatrixserverlib.RoomVersion,
) error {
// Do this with a fresh context, so that we keep working even if the
// original request times out. With any luck, by the time the remote
// side retries, we'll have fetched the missing state.
@ -716,7 +716,9 @@ func (t *txnReq) processEventWithMissingState(ctx context.Context, e *gomatrixse
respStates[i] = states[i].RespState
}
// There's more than one previous state - run them all through state res
t.roomsMu.Lock(e.RoomID())
resolvedState, err = t.resolveStatesAndCheck(gmectx, roomVersion, respStates, backwardsExtremity)
t.roomsMu.Unlock(e.RoomID())
if err != nil {
util.GetLogger(ctx).WithError(err).Errorf("Failed to resolve state conflicts for event %s", backwardsExtremity.EventID())
return err
@ -784,7 +786,7 @@ func (t *txnReq) lookupStateAfterEvent(ctx context.Context, roomVersion gomatrix
default:
return nil, false, fmt.Errorf("t.lookupEvent: %w", err)
}
t.haveEvents[h.EventID()] = h
t.cacheAndReturn(h)
if h.StateKey() != nil {
addedToState := false
for i := range respState.StateEvents {
@ -803,6 +805,14 @@ func (t *txnReq) lookupStateAfterEvent(ctx context.Context, roomVersion gomatrix
return respState, false, nil
}
func (t *txnReq) cacheAndReturn(ev *gomatrixserverlib.HeaderedEvent) *gomatrixserverlib.HeaderedEvent {
if cached, exists := t.haveEvents[ev.EventID()]; exists {
return cached
}
t.haveEvents[ev.EventID()] = ev
return ev
}
func (t *txnReq) lookupStateAfterEventLocally(ctx context.Context, roomID, eventID string) *gomatrixserverlib.RespState {
var res api.QueryStateAfterEventsResponse
err := t.rsAPI.QueryStateAfterEvents(ctx, &api.QueryStateAfterEventsRequest{
@ -810,15 +820,21 @@ func (t *txnReq) lookupStateAfterEventLocally(ctx context.Context, roomID, event
PrevEventIDs: []string{eventID},
}, &res)
if err != nil || !res.PrevEventsExist {
util.GetLogger(ctx).WithError(err).Warnf("failed to query state after %s locally", eventID)
util.GetLogger(ctx).WithField("room_id", roomID).WithError(err).Warnf("failed to query state after %s locally, prev exists=%v", eventID, res.PrevEventsExist)
return nil
}
stateEvents := make([]*gomatrixserverlib.HeaderedEvent, len(res.StateEvents))
for i, ev := range res.StateEvents {
t.haveEvents[ev.EventID()] = res.StateEvents[i]
// set the event from the haveEvents cache - this means we will share pointers with other prev_event branches for this
// processEvent request, which is better for memory.
stateEvents[i] = t.cacheAndReturn(ev)
}
// we should never access res.StateEvents again so we delete it here to make GC faster
res.StateEvents = nil
var authEvents []*gomatrixserverlib.Event
missingAuthEvents := map[string]bool{}
for _, ev := range res.StateEvents {
for _, ev := range stateEvents {
for _, ae := range ev.AuthEventIDs() {
if aev, ok := t.haveEvents[ae]; ok {
authEvents = append(authEvents, aev.Unwrap())
@ -843,14 +859,13 @@ func (t *txnReq) lookupStateAfterEventLocally(ctx context.Context, roomID, event
return nil
}
for i := range queryRes.Events {
evID := queryRes.Events[i].EventID()
t.haveEvents[evID] = queryRes.Events[i]
authEvents = append(authEvents, queryRes.Events[i].Unwrap())
authEvents = append(authEvents, t.cacheAndReturn(queryRes.Events[i]).Unwrap())
}
queryRes.Events = nil
}
return &gomatrixserverlib.RespState{
StateEvents: gomatrixserverlib.UnwrapEventHeaders(res.StateEvents),
StateEvents: gomatrixserverlib.UnwrapEventHeaders(stateEvents),
AuthEvents: authEvents,
}
}
@ -860,8 +875,6 @@ func (t *txnReq) lookupStateAfterEventLocally(ctx context.Context, roomID, event
func (t *txnReq) lookupStateBeforeEvent(ctx context.Context, roomVersion gomatrixserverlib.RoomVersion, roomID, eventID string) (
*gomatrixserverlib.RespState, error) {
util.GetLogger(ctx).Infof("lookupStateBeforeEvent %s", eventID)
// Attempt to fetch the missing state using /state_ids and /events
return t.lookupMissingStateViaStateIDs(ctx, roomID, eventID, roomVersion)
}
@ -992,7 +1005,6 @@ Event:
}
}
// we processed everything!
return newEvents, nil
}
@ -1011,7 +1023,7 @@ func (t *txnReq) lookupMissingStateViaState(ctx context.Context, roomID, eventID
func (t *txnReq) lookupMissingStateViaStateIDs(ctx context.Context, roomID, eventID string, roomVersion gomatrixserverlib.RoomVersion) (
*gomatrixserverlib.RespState, error) {
util.GetLogger(ctx).Infof("lookupMissingStateViaStateIDs %s", eventID)
util.GetLogger(ctx).WithField("room_id", roomID).Infof("lookupMissingStateViaStateIDs %s", eventID)
// fetch the state event IDs at the time of the event
stateIDs, err := t.federation.LookupStateIDs(ctx, t.Origin, roomID, eventID)
if err != nil {
@ -1040,14 +1052,16 @@ func (t *txnReq) lookupMissingStateViaStateIDs(ctx context.Context, roomID, even
}
for i := range queryRes.Events {
evID := queryRes.Events[i].EventID()
t.haveEvents[evID] = queryRes.Events[i]
t.cacheAndReturn(queryRes.Events[i])
if missing[evID] {
delete(missing, evID)
}
}
queryRes.Events = nil // allow it to be GCed
concurrentRequests := 8
missingCount := len(missing)
util.GetLogger(ctx).WithField("room_id", roomID).WithField("event_id", eventID).Infof("lookupMissingStateViaStateIDs missing %d/%d events", missingCount, len(wantIDs))
// If over 50% of the auth/state events from /state_ids are missing
// then we'll just call /state instead, otherwise we'll just end up
@ -1112,7 +1126,7 @@ func (t *txnReq) lookupMissingStateViaStateIDs(ctx context.Context, roomID, even
return
}
haveEventsMutex.Lock()
t.haveEvents[h.EventID()] = h
t.cacheAndReturn(h)
haveEventsMutex.Unlock()
}

View file

@ -48,7 +48,7 @@ CREATE INDEX IF NOT EXISTS federatonsender_joined_hosts_room_id_idx
const insertJoinedHostsSQL = "" +
"INSERT INTO federationsender_joined_hosts (room_id, event_id, server_name)" +
" VALUES ($1, $2, $3)"
" VALUES ($1, $2, $3) ON CONFLICT DO NOTHING"
const deleteJoinedHostsSQL = "" +
"DELETE FROM federationsender_joined_hosts WHERE event_id = ANY($1)"

View file

@ -47,7 +47,7 @@ CREATE INDEX IF NOT EXISTS federatonsender_joined_hosts_room_id_idx
`
const insertJoinedHostsSQL = "" +
"INSERT INTO federationsender_joined_hosts (room_id, event_id, server_name)" +
"INSERT OR IGNORE INTO federationsender_joined_hosts (room_id, event_id, server_name)" +
" VALUES ($1, $2, $3)"
const deleteJoinedHostsSQL = "" +

9
go.mod
View file

@ -7,6 +7,7 @@ require (
github.com/getsentry/sentry-go v0.10.0
github.com/gologme/log v1.2.0
github.com/gorilla/mux v1.8.0
github.com/gorilla/websocket v1.4.2
github.com/hashicorp/golang-lru v0.5.4
github.com/lib/pq v1.9.0
github.com/libp2p/go-libp2p v0.13.0
@ -17,15 +18,16 @@ require (
github.com/libp2p/go-libp2p-kad-dht v0.11.1
github.com/libp2p/go-libp2p-pubsub v0.4.1
github.com/libp2p/go-libp2p-record v0.1.3
github.com/lucas-clemente/quic-go v0.17.3
github.com/lucas-clemente/quic-go v0.19.3
github.com/matrix-org/dugong v0.0.0-20180820122854-51a565b5666b
github.com/matrix-org/go-http-js-libp2p v0.0.0-20200518170932-783164aeeda4
github.com/matrix-org/go-sqlite3-js v0.0.0-20200522092705-bc8506ccbcf3
github.com/matrix-org/gomatrix v0.0.0-20200827122206-7dd5e2a05bcd
github.com/matrix-org/gomatrixserverlib v0.0.0-20210302161955-6142fe3f8c2c
github.com/matrix-org/naffka v0.0.0-20201009174903-d26a3b9cb161
github.com/matrix-org/pinecone v0.0.0-20210510160342-a1dfbcf4bd47
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4
github.com/mattn/go-sqlite3 v1.14.6
github.com/mattn/go-sqlite3 v1.14.7-0.20210414154423-1157a4212dcb
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646
github.com/ngrok/sqlmw v0.0.0-20200129213757-d5c93a81bec6
github.com/opentracing/opentracing-go v1.2.0
@ -41,8 +43,9 @@ require (
go.uber.org/atomic v1.7.0
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110
golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69 // indirect
gopkg.in/h2non/bimg.v1 v1.1.5
gopkg.in/yaml.v2 v2.4.0
)
go 1.13
go 1.14

90
go.sum
View file

@ -24,6 +24,9 @@ github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go
github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc=
github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w=
github.com/RyanCarrier/dijkstra v1.0.0/go.mod h1:5agGUBNEtUAGIANmbw09fuO3a2htPEkc1jNH01qxCWA=
github.com/RyanCarrier/dijkstra-1 v0.0.0-20170512020943-0e5801a26345/go.mod h1:OK4EvWJ441LQqGzed5NGB6vKBAE34n3z7iayPcEwr30=
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/sarama v1.26.1/go.mod h1:NbSGBSSndYaIhRcBtY9V0U7AyH+x71bG668AuWys/yU=
@ -36,11 +39,27 @@ github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
github.com/albertorestifo/dijkstra v0.0.0-20160910063646-aba76f725f72/go.mod h1:o+JdB7VetTHjLhU0N57x18B9voDBQe0paApdEAEoEfw=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c=
github.com/anacrolix/envpprof v1.0.0/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c=
github.com/anacrolix/envpprof v1.1.1 h1:sHQCyj7HtiSfaZAzL2rJrQdyS7odLqlwO6nhk/tG/j8=
github.com/anacrolix/envpprof v1.1.1/go.mod h1:My7T5oSqVfEn4MD4Meczkw/f5lSIndGAKu/0SM/rkf4=
github.com/anacrolix/log v0.3.0 h1:Btxh7GkT4JYWvWJ1uKOwgobf+7q/1eFQaDdCUXCtssw=
github.com/anacrolix/log v0.3.0/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU=
github.com/anacrolix/missinggo v1.1.2-0.20190815015349-b888af804467/go.mod h1:MBJu3Sk/k3ZfGYcS7z18gwfu72Ey/xopPFJJbTi5yIo=
github.com/anacrolix/missinggo v1.2.1 h1:0IE3TqX5y5D0IxeMwTyIgqdDew4QrzcXaaEnJQyjHvw=
github.com/anacrolix/missinggo v1.2.1/go.mod h1:J5cMhif8jPmFoC3+Uvob3OXXNIhOUikzMt+uUjeM21Y=
github.com/anacrolix/missinggo/perf v1.0.0 h1:7ZOGYziGEBytW49+KmYGTaNfnwUqP1HBsy6BqESAJVw=
github.com/anacrolix/missinggo/perf v1.0.0/go.mod h1:ljAFWkBuzkO12MQclXzZrosP5urunoLS0Cbvb4V0uMQ=
github.com/anacrolix/sync v0.2.0 h1:oRe22/ZB+v7v/5Mbc4d2zE0AXEZy0trKyKLjqYOt6tY=
github.com/anacrolix/sync v0.2.0/go.mod h1:BbecHL6jDSExojhNtgTFSBcdGerzNc64tz3DCOj/I0g=
github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw=
github.com/anacrolix/utp v0.1.0/go.mod h1:MDwc+vsGEq7RMw6lr2GKOEqjWny5hO5OZXRVNaBJ2Dk=
github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
@ -63,6 +82,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
github.com/bradfitz/iter v0.0.0-20190303215204-33e6a9893b0c/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 h1:GKTyiRCL6zVf5wWaqKnf+7Qs6GbEPfd4iMOitWzXJx8=
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8/go.mod h1:spo1JLcs67NmW1aVLEgtA8Yy1elc+X8y5SRW1sFW4Og=
github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8=
github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI=
github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI=
@ -119,7 +142,9 @@ github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70d
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q=
@ -144,7 +169,6 @@ github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6 h1:u/UEqS66A5ckRmS4yNp
github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ=
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
@ -152,8 +176,9 @@ github.com/frankban/quicktest v1.0.0/go.mod h1:R98jIehRai+d1/3Hv2//jOVCTJhW1VBav
github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o=
github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc=
github.com/getsentry/sentry-go v0.10.0 h1:6gwY+66NHKqyZrdi6O2jGdo7wGdo9b3B69E01NFgT5g=
github.com/getsentry/sentry-go v0.10.0/go.mod h1:kELm/9iCblqUYh+ZRML7PNdCvEuw24wBvJPYyi86cws=
@ -161,6 +186,8 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME
github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM=
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
@ -185,13 +212,15 @@ github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 h1:uHTyIjqVhYRhLbJ8nIiOJHkEZZ+5YoOsAbD3sk82NiE=
github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.4.0 h1:Rd1kQnQu0Hq3qvJppYSG0HtP+f5LPPUiDswTLiEegLg=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@ -213,6 +242,7 @@ github.com/gologme/log v0.0.0-20181207131047-4e5d8ccb38e8/go.mod h1:gq31gQ8wEHkR
github.com/gologme/log v1.2.0 h1:Ya5Ip/KD6FX7uH0S31QO87nCCSucKtF44TLbTtO7V4c=
github.com/gologme/log v1.2.0/go.mod h1:gq31gQ8wEHkR+WekdWsqDuf8pXTUZA9BnnzTuPz1Y9U=
github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@ -239,6 +269,7 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
@ -291,6 +322,9 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hjson/hjson-go v3.0.2-0.20200316202735-d5d0e8b0617d+incompatible/go.mod h1:qsetwF8NlsTsOTwZTApNlTCerV+b2GjYRRcIk4JMFio=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo=
github.com/huandu/xstrings v1.2.0 h1:yPeWdRnmynF7p+lLYz0H2tthW9lqhMJrQV/U7yy4wX0=
github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4=
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo=
github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc=
@ -385,6 +419,7 @@ github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
@ -640,8 +675,8 @@ github.com/libp2p/go-yamux/v2 v2.0.0 h1:vSGhAy5u6iHBq11ZDcyHH4Blcf9xlBhT4WQDoOE9
github.com/libp2p/go-yamux/v2 v2.0.0/go.mod h1:NVWira5+sVUIU6tu1JWvaRn1dRnG+cawOJiflsAM+7U=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/lucas-clemente/quic-go v0.17.3 h1:jMX/MmDNCljfisgMmPGUcBJ+zUh9w3d3ia4YJjYS3TM=
github.com/lucas-clemente/quic-go v0.17.3/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE=
github.com/lucas-clemente/quic-go v0.19.3 h1:eCDQqvGBB+kCTkA0XrAFtNe81FMa0/fn4QSoeAbmiF4=
github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/lxn/walk v0.0.0-20210112085537-c389da54e794/go.mod h1:E23UucZGqpuUANJooIbHWCufXvOcT6E7Stq81gU+CSQ=
github.com/lxn/win v0.0.0-20201111105847-2a20daff6a55/go.mod h1:KxxjdtRkfNoYDCUP5ryK7XJJNTnpC8atvtmTheChOtk=
@ -652,9 +687,11 @@ github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/marten-seemann/qpack v0.1.0/go.mod h1:LFt1NU/Ptjip0C2CPkhimBz5CGE3WGDAUWqna+CNTrI=
github.com/marten-seemann/qtls v0.9.1 h1:O0YKQxNVPaiFgMng0suWEOY2Sb4LT2sRn9Qimq3Z1IQ=
github.com/marten-seemann/qtls v0.9.1/go.mod h1:T1MmAdDPyISzxlK6kjRr0pcZFBVd1OZbBb/j3cvzHhk=
github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc=
github.com/marten-seemann/qtls v0.10.0 h1:ECsuYUKalRL240rRD4Ri33ISb7kAQ3qGDlrrl55b2pc=
github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs=
github.com/marten-seemann/qtls-go1-15 v0.1.1 h1:LIH6K34bPVttyXnUWixk0bzH6/N07VxbSabxn5A5gZQ=
github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I=
github.com/matrix-org/dugong v0.0.0-20180820122854-51a565b5666b h1:xpcmnpfUImRC4O2SAS/dmTcJENDXvGmLUzey76V1R3Q=
github.com/matrix-org/dugong v0.0.0-20180820122854-51a565b5666b/go.mod h1:NgPCr+UavRGH6n5jmdX8DuqFZ4JiCWIJoZiuhTRLSUg=
github.com/matrix-org/go-http-js-libp2p v0.0.0-20200518170932-783164aeeda4 h1:eqE5OnGx9ZMWmrRbD3KF/3KtTunw0iQulI7YxOIdxo4=
@ -668,6 +705,8 @@ github.com/matrix-org/gomatrixserverlib v0.0.0-20210302161955-6142fe3f8c2c h1:vW
github.com/matrix-org/gomatrixserverlib v0.0.0-20210302161955-6142fe3f8c2c/go.mod h1:JsAzE1Ll3+gDWS9JSUHPJiiyAksvOOnGWF2nXdg4ZzU=
github.com/matrix-org/naffka v0.0.0-20201009174903-d26a3b9cb161 h1:h1XVh05pLoC+nJjP3GIpj5wUsuC8WdHP3He0RTkRJTs=
github.com/matrix-org/naffka v0.0.0-20201009174903-d26a3b9cb161/go.mod h1:sjyPyRxKM5uw1nD2cJ6O2OxI6GOqyVBfNXqKjBZTBZE=
github.com/matrix-org/pinecone v0.0.0-20210510160342-a1dfbcf4bd47 h1:kWLnEcQoqJWlBtgtooa7ixyGNL3fy4CsyfR8M6b8vdI=
github.com/matrix-org/pinecone v0.0.0-20210510160342-a1dfbcf4bd47/go.mod h1:/75aR0l7umUnMBk02Q5+QC2IPY58S21BLpoPR9BUr0k=
github.com/matrix-org/util v0.0.0-20190711121626-527ce5ddefc7/go.mod h1:vVQlW/emklohkZnOPwD3LrZUBqdfsbiyO3p1lNV8F6U=
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4 h1:eCEHXWDv9Rm335MSuB49mFUK44bwZPFSDde3ORE3syk=
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4/go.mod h1:vVQlW/emklohkZnOPwD3LrZUBqdfsbiyO3p1lNV8F6U=
@ -684,9 +723,10 @@ github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcME
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-sqlite3 v1.14.2/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus=
github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.7-0.20210414154423-1157a4212dcb h1:ax2vG2unlxsjwS7PMRo4FECIfAdQLowd6ejWYwPQhBo=
github.com/mattn/go-sqlite3 v1.14.7-0.20210414154423-1157a4212dcb/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
github.com/mattomatic/dijkstra v0.0.0-20130617153013-6f6d134eb237/go.mod h1:UOnLAUmVG5paym8pD3C4B9BQylUDC2vXFJJpT7JrlEA=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8=
@ -725,6 +765,7 @@ github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjW
github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI=
github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA=
github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4=
@ -789,6 +830,8 @@ github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms=
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
github.com/neilalexander/utp v0.1.1-0.20210510143443-c1bac1cd577f h1:xcJVva0Ziw+Ud4AaY/g9OMNc7veEfsYVox3eItY2w8Q=
github.com/neilalexander/utp v0.1.1-0.20210510143443-c1bac1cd577f/go.mod h1:ylsx0342RjGHjOoVKhR/wz/7Lhiusonihfj4QLxEMcU=
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ=
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8=
github.com/ngrok/sqlmw v0.0.0-20200129213757-d5c93a81bec6 h1:evlcQnJY+v8XRRchV3hXzpHDl6GcEZeLXAhlH9Csdww=
@ -803,16 +846,16 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg=
github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
@ -830,6 +873,7 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
@ -887,6 +931,7 @@ github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
@ -920,6 +965,7 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf
github.com/sirupsen/logrus v1.8.0 h1:nfhvjKcUMhBMVqbKHJlk5RPrrfYr/NMo3692g0dwfWU=
github.com/sirupsen/logrus v1.8.0/go.mod h1:4GuYW9TZmE769R5STWrRakJc4UqQ3+QQ95fyz7ENv1A=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
@ -947,6 +993,7 @@ github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5J
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
@ -969,6 +1016,7 @@ github.com/tidwall/pretty v1.0.2/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhV
github.com/tidwall/sjson v1.0.3/go.mod h1:bURseu1nuBkFpIES5cz6zBtjmYeOQmEESshn7VpF15Y=
github.com/tidwall/sjson v1.1.5 h1:wsUceI/XDyZk3J1FUvuuYlK62zJv2HO2Pzb8A5EWdUE=
github.com/tidwall/sjson v1.1.5/go.mod h1:VuJzsZnTowhSxWdOgsAnb886i4AjEyTkk7tNtsL7EYE=
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U=
github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
@ -1000,6 +1048,7 @@ github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 h1:
github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI=
github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow=
github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg=
github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE=
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
@ -1098,7 +1147,6 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190228165749-92fc7df08ae7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@ -1115,7 +1163,9 @@ golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw=
@ -1159,6 +1209,7 @@ golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1168,11 +1219,13 @@ golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201018230417-eeed37f84f13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201231184435-2d18734c6014/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210105210732-16f7687f5001/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1208,8 +1261,9 @@ golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114 h1:DnSr2mCsxyCE6ZgIkmcWUQY2R5cH/6wL7eIxEmQOMSE=
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69 h1:yBHHx+XZqXJBm6Exke3N7V9gnlsyXxoCPEb1yVenjfk=
golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -1263,6 +1317,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
@ -1302,6 +1357,7 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=

View file

@ -72,10 +72,8 @@ func init() {
// we guarantee we will get around to it. Also, more users on a given server does not increase the number of requests
// (as /keys/query allows multiple users to be specified) so being stuck behind matrix.org won't materially be any worse
// than being stuck behind foo.bar
// In the event that the query fails, the worker spins up a short-lived goroutine whose sole purpose is to inject the server
// name back into the channel after a certain amount of time. If in the interim the device lists have been updated, then
// the database query will return no stale lists. Reinjection into the channel continues until success or the server terminates,
// when it will be reloaded on startup.
// In the event that the query fails, a lock is acquired and the server name along with the time to wait before retrying is
// set in a map. A restarter goroutine periodically probes this map and injects servers which are ready to be retried.
type DeviceListUpdater struct {
// A map from user_id to a mutex. Used when we are missing prev IDs so we don't make more than 1
// request to the remote server and race.
@ -297,42 +295,38 @@ func (u *DeviceListUpdater) clearChannel(userID string) {
}
func (u *DeviceListUpdater) worker(ch chan gomatrixserverlib.ServerName) {
// It's possible to get many of the same server name in the channel, so in order
// to prevent processing the same server over and over we keep track of when we
// last made a request to the server. If we get the server name during the cooloff
// period, we'll ignore the poke.
lastProcessed := make(map[gomatrixserverlib.ServerName]time.Time)
// this can't be too long else sytest will give up trying to do a test
cooloffPeriod := 500 * time.Millisecond
shouldProcess := func(srv gomatrixserverlib.ServerName) bool {
// we should process requests when now is after the last process time + cooloff
return time.Now().After(lastProcessed[srv].Add(cooloffPeriod))
retries := make(map[gomatrixserverlib.ServerName]time.Time)
retriesMu := &sync.Mutex{}
// restarter goroutine which will inject failed servers into ch when it is time
go func() {
for {
var serversToRetry []gomatrixserverlib.ServerName
time.Sleep(time.Second)
retriesMu.Lock()
now := time.Now()
for srv, retryAt := range retries {
if now.After(retryAt) {
serversToRetry = append(serversToRetry, srv)
}
// on failure, spin up a short-lived goroutine to inject the server name again.
scheduledRetries := make(map[gomatrixserverlib.ServerName]time.Time)
inject := func(srv gomatrixserverlib.ServerName, duration time.Duration) {
time.Sleep(duration)
}
for _, srv := range serversToRetry {
delete(retries, srv)
}
retriesMu.Unlock()
for _, srv := range serversToRetry {
ch <- srv
}
}
}()
for serverName := range ch {
if !shouldProcess(serverName) {
if time.Now().Before(scheduledRetries[serverName]) {
// do not inject into the channel as we know there will be a sleeping goroutine
// which will do it after the cooloff period expires
continue
} else {
scheduledRetries[serverName] = time.Now().Add(cooloffPeriod)
go inject(serverName, cooloffPeriod)
continue
}
}
lastProcessed[serverName] = time.Now()
waitTime, shouldRetry := u.processServer(serverName)
if shouldRetry {
scheduledRetries[serverName] = time.Now().Add(waitTime)
go inject(serverName, waitTime)
retriesMu.Lock()
_, exists := retries[serverName]
if !exists {
retries[serverName] = time.Now().Add(waitTime)
}
retriesMu.Unlock()
}
}
}
@ -380,7 +374,7 @@ func (u *DeviceListUpdater) processServer(serverName gomatrixserverlib.ServerNam
}
}
if failCount > 0 {
logger.WithField("total", len(userIDs)).WithField("failed", failCount).Error("failed to query device keys for some users")
logger.WithField("total", len(userIDs)).WithField("failed", failCount).WithField("wait", waitTime).Error("failed to query device keys for some users")
}
for _, userID := range userIDs {
// always clear the channel to unblock Update calls regardless of success/failure

View file

@ -158,6 +158,12 @@ func (r *uploadRequest) doUpload(
}
}
// Check if temp file size exceeds max file size configuration
if bytesWritten > types.FileSizeBytes(*cfg.MaxFileSizeBytes) {
fileutils.RemoveDir(tmpDir, r.Logger) // delete temp file
return requestEntityTooLargeJSONResponse(*cfg.MaxFileSizeBytes)
}
// Look up the media by the file hash. If we already have the file but under a
// different media ID then we won't upload the file again - instead we'll just
// add a new metadata entry that refers to the same file.
@ -219,26 +225,17 @@ func (r *uploadRequest) doUpload(
)
}
// Validate validates the uploadRequest fields
func (r *uploadRequest) Validate(maxFileSizeBytes config.FileSizeBytes) *util.JSONResponse {
if r.MediaMetadata.FileSizeBytes < 1 {
return &util.JSONResponse{
Code: http.StatusLengthRequired,
JSON: jsonerror.Unknown("HTTP Content-Length request header must be greater than zero."),
}
}
if maxFileSizeBytes > 0 && r.MediaMetadata.FileSizeBytes > types.FileSizeBytes(maxFileSizeBytes) {
func requestEntityTooLargeJSONResponse(maxFileSizeBytes config.FileSizeBytes) *util.JSONResponse {
return &util.JSONResponse{
Code: http.StatusRequestEntityTooLarge,
JSON: jsonerror.Unknown(fmt.Sprintf("HTTP Content-Length is greater than the maximum allowed upload size (%v).", maxFileSizeBytes)),
}
}
// TODO: Check if the Content-Type is a valid type?
if r.MediaMetadata.ContentType == "" {
return &util.JSONResponse{
Code: http.StatusBadRequest,
JSON: jsonerror.Unknown("HTTP Content-Type request header must be set."),
}
}
// Validate validates the uploadRequest fields
func (r *uploadRequest) Validate(maxFileSizeBytes config.FileSizeBytes) *util.JSONResponse {
if maxFileSizeBytes > 0 && r.MediaMetadata.FileSizeBytes > types.FileSizeBytes(maxFileSizeBytes) {
return requestEntityTooLargeJSONResponse(maxFileSizeBytes)
}
if strings.HasPrefix(string(r.MediaMetadata.UploadName), "~") {
return &util.JSONResponse{

View file

@ -24,6 +24,7 @@ import (
func LoadFromGoose() {
goose.AddMigration(UpAddForgottenColumn, DownAddForgottenColumn)
goose.AddMigration(UpStateBlocksRefactor, DownStateBlocksRefactor)
}
func LoadAddForgottenColumn(m *sqlutil.Migrations) {

View file

@ -0,0 +1,223 @@
// Copyright 2020 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package deltas
import (
"database/sql"
"fmt"
"github.com/lib/pq"
"github.com/matrix-org/dendrite/internal/sqlutil"
"github.com/matrix-org/dendrite/roomserver/types"
"github.com/matrix-org/util"
"github.com/sirupsen/logrus"
)
type stateSnapshotData struct {
StateSnapshotNID types.StateSnapshotNID
RoomNID types.RoomNID
}
type stateBlockData struct {
stateSnapshotData
StateBlockNID types.StateBlockNID
EventNIDs types.EventNIDs
}
func LoadStateBlocksRefactor(m *sqlutil.Migrations) {
m.AddMigration(UpStateBlocksRefactor, DownStateBlocksRefactor)
}
// nolint:gocyclo
func UpStateBlocksRefactor(tx *sql.Tx) error {
logrus.Warn("Performing state storage upgrade. Please wait, this may take some time!")
defer logrus.Warn("State storage upgrade complete")
var snapshotcount int
var maxsnapshotid int
var maxblockid int
if err := tx.QueryRow(`SELECT COUNT(DISTINCT state_snapshot_nid) FROM roomserver_state_snapshots;`).Scan(&snapshotcount); err != nil {
return fmt.Errorf("tx.QueryRow.Scan (count snapshots): %w", err)
}
if err := tx.QueryRow(`SELECT COALESCE(MAX(state_snapshot_nid),0) FROM roomserver_state_snapshots;`).Scan(&maxsnapshotid); err != nil {
return fmt.Errorf("tx.QueryRow.Scan (count snapshots): %w", err)
}
if err := tx.QueryRow(`SELECT COALESCE(MAX(state_block_nid),0) FROM roomserver_state_block;`).Scan(&maxblockid); err != nil {
return fmt.Errorf("tx.QueryRow.Scan (count snapshots): %w", err)
}
maxsnapshotid++
maxblockid++
if _, err := tx.Exec(`ALTER TABLE roomserver_state_block RENAME TO _roomserver_state_block;`); err != nil {
return fmt.Errorf("tx.Exec: %w", err)
}
if _, err := tx.Exec(`ALTER TABLE roomserver_state_snapshots RENAME TO _roomserver_state_snapshots;`); err != nil {
return fmt.Errorf("tx.Exec: %w", err)
}
// We create new sequences starting with the maximum state snapshot and block NIDs.
// This means that all newly created snapshots and blocks by the migration will have
// NIDs higher than these values, so that when we come to update the references to
// these NIDs using UPDATE statements, we can guarantee we are only ever updating old
// values and not accidentally overwriting new ones.
if _, err := tx.Exec(fmt.Sprintf(`CREATE SEQUENCE roomserver_state_block_nid_sequence START WITH %d;`, maxblockid)); err != nil {
return fmt.Errorf("tx.Exec: %w", err)
}
if _, err := tx.Exec(fmt.Sprintf(`CREATE SEQUENCE roomserver_state_snapshot_nid_sequence START WITH %d;`, maxsnapshotid)); err != nil {
return fmt.Errorf("tx.Exec: %w", err)
}
_, err := tx.Exec(`
CREATE TABLE IF NOT EXISTS roomserver_state_block (
state_block_nid bigint PRIMARY KEY DEFAULT nextval('roomserver_state_block_nid_sequence'),
state_block_hash BYTEA UNIQUE,
event_nids bigint[] NOT NULL
);
`)
if err != nil {
return fmt.Errorf("tx.Exec (create blocks table): %w", err)
}
_, err = tx.Exec(`
CREATE TABLE IF NOT EXISTS roomserver_state_snapshots (
state_snapshot_nid bigint PRIMARY KEY DEFAULT nextval('roomserver_state_snapshot_nid_sequence'),
state_snapshot_hash BYTEA UNIQUE,
room_nid bigint NOT NULL,
state_block_nids bigint[] NOT NULL
);
`)
if err != nil {
return fmt.Errorf("tx.Exec (create snapshots table): %w", err)
}
logrus.Warn("New tables created...")
batchsize := 100
for batchoffset := 0; batchoffset < snapshotcount; batchoffset += batchsize {
var snapshotrows *sql.Rows
snapshotrows, err = tx.Query(`
SELECT
state_snapshot_nid,
room_nid,
state_block_nid,
ARRAY_AGG(event_nid) AS event_nids
FROM (
SELECT
_roomserver_state_snapshots.state_snapshot_nid,
_roomserver_state_snapshots.room_nid,
_roomserver_state_block.state_block_nid,
_roomserver_state_block.event_nid
FROM
_roomserver_state_snapshots
JOIN _roomserver_state_block ON _roomserver_state_block.state_block_nid = ANY (_roomserver_state_snapshots.state_block_nids)
WHERE
_roomserver_state_snapshots.state_snapshot_nid = ANY ( SELECT DISTINCT
_roomserver_state_snapshots.state_snapshot_nid
FROM
_roomserver_state_snapshots
LIMIT $1 OFFSET $2)) AS _roomserver_state_block
GROUP BY
state_snapshot_nid,
room_nid,
state_block_nid;
`, batchsize, batchoffset)
if err != nil {
return fmt.Errorf("tx.Query: %w", err)
}
logrus.Warnf("Rewriting snapshots %d-%d of %d...", batchoffset, batchoffset+batchsize, snapshotcount)
var snapshots []stateBlockData
for snapshotrows.Next() {
var snapshot stateBlockData
var eventsarray pq.Int64Array
if err = snapshotrows.Scan(&snapshot.StateSnapshotNID, &snapshot.RoomNID, &snapshot.StateBlockNID, &eventsarray); err != nil {
return fmt.Errorf("rows.Scan: %w", err)
}
for _, e := range eventsarray {
snapshot.EventNIDs = append(snapshot.EventNIDs, types.EventNID(e))
}
snapshot.EventNIDs = snapshot.EventNIDs[:util.SortAndUnique(snapshot.EventNIDs)]
snapshots = append(snapshots, snapshot)
}
if err = snapshotrows.Close(); err != nil {
return fmt.Errorf("snapshots.Close: %w", err)
}
newsnapshots := map[stateSnapshotData]types.StateBlockNIDs{}
for _, snapshot := range snapshots {
var eventsarray pq.Int64Array
for _, e := range snapshot.EventNIDs {
eventsarray = append(eventsarray, int64(e))
}
var blocknid types.StateBlockNID
err = tx.QueryRow(`
INSERT INTO roomserver_state_block (state_block_hash, event_nids)
VALUES ($1, $2)
ON CONFLICT (state_block_hash) DO UPDATE SET event_nids=$2
RETURNING state_block_nid
`, snapshot.EventNIDs.Hash(), eventsarray).Scan(&blocknid)
if err != nil {
return fmt.Errorf("tx.QueryRow.Scan (insert new block with %d events): %w", len(eventsarray), err)
}
index := stateSnapshotData{snapshot.StateSnapshotNID, snapshot.RoomNID}
newsnapshots[index] = append(newsnapshots[index], blocknid)
}
for snapshotdata, newblocks := range newsnapshots {
var newblocksarray pq.Int64Array
for _, b := range newblocks {
newblocksarray = append(newblocksarray, int64(b))
}
var newNID types.StateSnapshotNID
err = tx.QueryRow(`
INSERT INTO roomserver_state_snapshots (state_snapshot_hash, room_nid, state_block_nids)
VALUES ($1, $2, $3)
ON CONFLICT (state_snapshot_hash) DO UPDATE SET room_nid=$2
RETURNING state_snapshot_nid
`, newblocks.Hash(), snapshotdata.RoomNID, newblocksarray).Scan(&newNID)
if err != nil {
return fmt.Errorf("tx.QueryRow.Scan (insert new snapshot): %w", err)
}
if _, err = tx.Exec(`UPDATE roomserver_events SET state_snapshot_nid=$1 WHERE state_snapshot_nid=$2 AND state_snapshot_nid<$3`, newNID, snapshotdata.StateSnapshotNID, maxsnapshotid); err != nil {
return fmt.Errorf("tx.Exec (update events): %w", err)
}
if _, err = tx.Exec(`UPDATE roomserver_rooms SET state_snapshot_nid=$1 WHERE state_snapshot_nid=$2 AND state_snapshot_nid<$3`, newNID, snapshotdata.StateSnapshotNID, maxsnapshotid); err != nil {
return fmt.Errorf("tx.Exec (update rooms): %w", err)
}
}
}
if _, err = tx.Exec(`
DROP TABLE _roomserver_state_snapshots;
DROP SEQUENCE roomserver_state_snapshot_nid_seq;
`); err != nil {
return fmt.Errorf("tx.Exec (delete old snapshot table): %w", err)
}
if _, err = tx.Exec(`
DROP TABLE _roomserver_state_block;
DROP SEQUENCE roomserver_state_block_nid_seq;
`); err != nil {
return fmt.Errorf("tx.Exec (delete old block table): %w", err)
}
return nil
}
func DownStateBlocksRefactor(tx *sql.Tx) error {
panic("Downgrading state storage is not supported")
}

View file

@ -59,12 +59,14 @@ type eventJSONStatements struct {
bulkSelectEventJSONStmt *sql.Stmt
}
func NewPostgresEventJSONTable(db *sql.DB) (tables.EventJSON, error) {
s := &eventJSONStatements{}
func createEventJSONTable(db *sql.DB) error {
_, err := db.Exec(eventJSONSchema)
if err != nil {
return nil, err
}
return err
}
func prepareEventJSONTable(db *sql.DB) (tables.EventJSON, error) {
s := &eventJSONStatements{}
return s, shared.StatementList{
{&s.insertEventJSONStmt, insertEventJSONSQL},
{&s.bulkSelectEventJSONStmt, bulkSelectEventJSONSQL},

View file

@ -77,12 +77,14 @@ type eventStateKeyStatements struct {
bulkSelectEventStateKeyStmt *sql.Stmt
}
func NewPostgresEventStateKeysTable(db *sql.DB) (tables.EventStateKeys, error) {
s := &eventStateKeyStatements{}
func createEventStateKeysTable(db *sql.DB) error {
_, err := db.Exec(eventStateKeysSchema)
if err != nil {
return nil, err
}
return err
}
func prepareEventStateKeysTable(db *sql.DB) (tables.EventStateKeys, error) {
s := &eventStateKeyStatements{}
return s, shared.StatementList{
{&s.insertEventStateKeyNIDStmt, insertEventStateKeyNIDSQL},
{&s.selectEventStateKeyNIDStmt, selectEventStateKeyNIDSQL},

View file

@ -100,12 +100,13 @@ type eventTypeStatements struct {
bulkSelectEventTypeNIDStmt *sql.Stmt
}
func NewPostgresEventTypesTable(db *sql.DB) (tables.EventTypes, error) {
s := &eventTypeStatements{}
func createEventTypesTable(db *sql.DB) error {
_, err := db.Exec(eventTypesSchema)
if err != nil {
return nil, err
}
return err
}
func prepareEventTypesTable(db *sql.DB) (tables.EventTypes, error) {
s := &eventTypeStatements{}
return s, shared.StatementList{
{&s.insertEventTypeNIDStmt, insertEventTypeNIDSQL},

View file

@ -19,6 +19,7 @@ import (
"context"
"database/sql"
"fmt"
"sort"
"github.com/lib/pq"
"github.com/matrix-org/dendrite/internal"
@ -88,6 +89,16 @@ const bulkSelectStateEventByIDSQL = "" +
" WHERE event_id = ANY($1)" +
" ORDER BY event_type_nid, event_state_key_nid ASC"
// Bulk look up of events by event NID, optionally filtering by the event type
// or event state key NIDs if provided. (The CARDINALITY check will return true
// if the provided arrays are empty, ergo no filtering).
const bulkSelectStateEventByNIDSQL = "" +
"SELECT event_type_nid, event_state_key_nid, event_nid FROM roomserver_events" +
" WHERE event_nid = ANY($1)" +
" AND (CARDINALITY($2::bigint[]) = 0 OR event_type_nid = ANY($2))" +
" AND (CARDINALITY($3::bigint[]) = 0 OR event_state_key_nid = ANY($3))" +
" ORDER BY event_type_nid, event_state_key_nid ASC"
const bulkSelectStateAtEventByIDSQL = "" +
"SELECT event_type_nid, event_state_key_nid, event_nid, state_snapshot_nid, is_rejected FROM roomserver_events" +
" WHERE event_id = ANY($1)"
@ -127,6 +138,7 @@ type eventStatements struct {
insertEventStmt *sql.Stmt
selectEventStmt *sql.Stmt
bulkSelectStateEventByIDStmt *sql.Stmt
bulkSelectStateEventByNIDStmt *sql.Stmt
bulkSelectStateAtEventByIDStmt *sql.Stmt
updateEventStateStmt *sql.Stmt
selectEventSentToOutputStmt *sql.Stmt
@ -140,17 +152,19 @@ type eventStatements struct {
selectRoomNIDsForEventNIDsStmt *sql.Stmt
}
func NewPostgresEventsTable(db *sql.DB) (tables.Events, error) {
s := &eventStatements{}
func createEventsTable(db *sql.DB) error {
_, err := db.Exec(eventsSchema)
if err != nil {
return nil, err
}
return err
}
func prepareEventsTable(db *sql.DB) (tables.Events, error) {
s := &eventStatements{}
return s, shared.StatementList{
{&s.insertEventStmt, insertEventSQL},
{&s.selectEventStmt, selectEventSQL},
{&s.bulkSelectStateEventByIDStmt, bulkSelectStateEventByIDSQL},
{&s.bulkSelectStateEventByNIDStmt, bulkSelectStateEventByNIDSQL},
{&s.bulkSelectStateAtEventByIDStmt, bulkSelectStateAtEventByIDSQL},
{&s.updateEventStateStmt, updateEventStateSQL},
{&s.updateEventSentToOutputStmt, updateEventSentToOutputSQL},
@ -238,6 +252,42 @@ func (s *eventStatements) BulkSelectStateEventByID(
return results, nil
}
// bulkSelectStateEventByNID lookups a list of state events by event NID.
// If any of the requested events are missing from the database it returns a types.MissingEventError
func (s *eventStatements) BulkSelectStateEventByNID(
ctx context.Context, eventNIDs []types.EventNID,
stateKeyTuples []types.StateKeyTuple,
) ([]types.StateEntry, error) {
tuples := stateKeyTupleSorter(stateKeyTuples)
sort.Sort(tuples)
eventTypeNIDArray, eventStateKeyNIDArray := tuples.typesAndStateKeysAsArrays()
rows, err := s.bulkSelectStateEventByNIDStmt.QueryContext(ctx, eventNIDsAsArray(eventNIDs), eventTypeNIDArray, eventStateKeyNIDArray)
if err != nil {
return nil, err
}
defer internal.CloseAndLogIfError(ctx, rows, "bulkSelectStateEventByID: rows.close() failed")
// We know that we will only get as many results as event IDs
// because of the unique constraint on event IDs.
// So we can allocate an array of the correct size now.
// We might get fewer results than IDs so we adjust the length of the slice before returning it.
results := make([]types.StateEntry, len(eventNIDs))
i := 0
for ; rows.Next(); i++ {
result := &results[i]
if err = rows.Scan(
&result.EventTypeNID,
&result.EventStateKeyNID,
&result.EventNID,
); err != nil {
return nil, err
}
}
if err = rows.Err(); err != nil {
return nil, err
}
return results[:i], nil
}
// bulkSelectStateAtEventByID lookups the state at a list of events by event ID.
// If any of the requested events are missing from the database it returns a types.MissingEventError.
// If we do not have the state for any of the requested events it returns a types.MissingEventError.

View file

@ -82,12 +82,13 @@ type inviteStatements struct {
updateInviteRetiredStmt *sql.Stmt
}
func NewPostgresInvitesTable(db *sql.DB) (tables.Invites, error) {
s := &inviteStatements{}
func createInvitesTable(db *sql.DB) error {
_, err := db.Exec(inviteSchema)
if err != nil {
return nil, err
}
return err
}
func prepareInvitesTable(db *sql.DB) (tables.Invites, error) {
s := &inviteStatements{}
return s, shared.StatementList{
{&s.insertInviteEventStmt, insertInviteEventSQL},

View file

@ -139,12 +139,13 @@ type membershipStatements struct {
updateMembershipForgetRoomStmt *sql.Stmt
}
func NewPostgresMembershipTable(db *sql.DB) (tables.Membership, error) {
s := &membershipStatements{}
func createMembershipTable(db *sql.DB) error {
_, err := db.Exec(membershipSchema)
if err != nil {
return nil, err
}
return err
}
func prepareMembershipTable(db *sql.DB) (tables.Membership, error) {
s := &membershipStatements{}
return s, shared.StatementList{
{&s.insertMembershipStmt, insertMembershipSQL},
@ -162,11 +163,6 @@ func NewPostgresMembershipTable(db *sql.DB) (tables.Membership, error) {
}.Prepare(db)
}
func (s *membershipStatements) execSchema(db *sql.DB) error {
_, err := db.Exec(membershipSchema)
return err
}
func (s *membershipStatements) InsertMembership(
ctx context.Context,
txn *sql.Tx, roomNID types.RoomNID, targetUserNID types.EventStateKeyNID,

View file

@ -65,12 +65,13 @@ type previousEventStatements struct {
selectPreviousEventExistsStmt *sql.Stmt
}
func NewPostgresPreviousEventsTable(db *sql.DB) (tables.PreviousEvents, error) {
s := &previousEventStatements{}
func createPrevEventsTable(db *sql.DB) error {
_, err := db.Exec(previousEventSchema)
if err != nil {
return nil, err
}
return err
}
func preparePrevEventsTable(db *sql.DB) (tables.PreviousEvents, error) {
s := &previousEventStatements{}
return s, shared.StatementList{
{&s.insertPreviousEventStmt, insertPreviousEventSQL},

View file

@ -50,12 +50,14 @@ type publishedStatements struct {
selectPublishedStmt *sql.Stmt
}
func NewPostgresPublishedTable(db *sql.DB) (tables.Published, error) {
s := &publishedStatements{}
func createPublishedTable(db *sql.DB) error {
_, err := db.Exec(publishedSchema)
if err != nil {
return nil, err
}
return err
}
func preparePublishedTable(db *sql.DB) (tables.Published, error) {
s := &publishedStatements{}
return s, shared.StatementList{
{&s.upsertPublishedStmt, upsertPublishedSQL},
{&s.selectAllPublishedStmt, selectAllPublishedSQL},

View file

@ -60,12 +60,13 @@ type redactionStatements struct {
markRedactionValidatedStmt *sql.Stmt
}
func NewPostgresRedactionsTable(db *sql.DB) (tables.Redactions, error) {
s := &redactionStatements{}
func createRedactionsTable(db *sql.DB) error {
_, err := db.Exec(redactionsSchema)
if err != nil {
return nil, err
}
return err
}
func prepareRedactionsTable(db *sql.DB) (tables.Redactions, error) {
s := &redactionStatements{}
return s, shared.StatementList{
{&s.insertRedactionStmt, insertRedactionSQL},

View file

@ -62,12 +62,14 @@ type roomAliasesStatements struct {
deleteRoomAliasStmt *sql.Stmt
}
func NewPostgresRoomAliasesTable(db *sql.DB) (tables.RoomAliases, error) {
s := &roomAliasesStatements{}
func createRoomAliasesTable(db *sql.DB) error {
_, err := db.Exec(roomAliasesSchema)
if err != nil {
return nil, err
}
return err
}
func prepareRoomAliasesTable(db *sql.DB) (tables.RoomAliases, error) {
s := &roomAliasesStatements{}
return s, shared.StatementList{
{&s.insertRoomAliasStmt, insertRoomAliasSQL},
{&s.selectRoomIDFromAliasStmt, selectRoomIDFromAliasSQL},

View file

@ -96,12 +96,14 @@ type roomStatements struct {
bulkSelectRoomNIDsStmt *sql.Stmt
}
func NewPostgresRoomsTable(db *sql.DB) (tables.Rooms, error) {
s := &roomStatements{}
func createRoomsTable(db *sql.DB) error {
_, err := db.Exec(roomsSchema)
if err != nil {
return nil, err
}
return err
}
func prepareRoomsTable(db *sql.DB) (tables.Rooms, error) {
s := &roomStatements{}
return s, shared.StatementList{
{&s.insertRoomNIDStmt, insertRoomNIDSQL},
{&s.selectRoomNIDStmt, selectRoomNIDSQL},

View file

@ -41,141 +41,88 @@ const stateDataSchema = `
-- which in turn makes it easier to merge state data blocks.
CREATE SEQUENCE IF NOT EXISTS roomserver_state_block_nid_seq;
CREATE TABLE IF NOT EXISTS roomserver_state_block (
-- Local numeric ID for this state data.
state_block_nid bigint NOT NULL,
event_type_nid bigint NOT NULL,
event_state_key_nid bigint NOT NULL,
event_nid bigint NOT NULL,
UNIQUE (state_block_nid, event_type_nid, event_state_key_nid)
-- The state snapshot NID that identifies this snapshot.
state_block_nid bigint PRIMARY KEY DEFAULT nextval('roomserver_state_block_nid_seq'),
-- The hash of the state block, which is used to enforce uniqueness. The hash is
-- generated in Dendrite and passed through to the database, as a btree index over
-- this column is cheap and fits within the maximum index size.
state_block_hash BYTEA UNIQUE,
-- The event NIDs contained within the state block.
event_nids bigint[] NOT NULL
);
`
// Insert a new state block. If we conflict on the hash column then
// we must perform an update so that the RETURNING statement returns the
// ID of the row that we conflicted with, so that we can then refer to
// the original block.
const insertStateDataSQL = "" +
"INSERT INTO roomserver_state_block (state_block_nid, event_type_nid, event_state_key_nid, event_nid)" +
" VALUES ($1, $2, $3, $4)"
"INSERT INTO roomserver_state_block (state_block_hash, event_nids)" +
" VALUES ($1, $2)" +
" ON CONFLICT (state_block_hash) DO UPDATE SET event_nids=$2" +
" RETURNING state_block_nid"
const selectNextStateBlockNIDSQL = "" +
"SELECT nextval('roomserver_state_block_nid_seq')"
// Bulk state lookup by numeric state block ID.
// Sort by the state_block_nid, event_type_nid, event_state_key_nid
// This means that all the entries for a given state_block_nid will appear
// together in the list and those entries will sorted by event_type_nid
// and event_state_key_nid. This property makes it easier to merge two
// state data blocks together.
const bulkSelectStateBlockEntriesSQL = "" +
"SELECT state_block_nid, event_type_nid, event_state_key_nid, event_nid" +
" FROM roomserver_state_block WHERE state_block_nid = ANY($1)" +
" ORDER BY state_block_nid, event_type_nid, event_state_key_nid"
// Bulk state lookup by numeric state block ID.
// Filters the rows in each block to the requested types and state keys.
// We would like to restrict to particular type state key pairs but we are
// restricted by the query language to pull the cross product of a list
// of types and a list state_keys. So we have to filter the result in the
// application to restrict it to the list of event types and state keys we
// actually wanted.
const bulkSelectFilteredStateBlockEntriesSQL = "" +
"SELECT state_block_nid, event_type_nid, event_state_key_nid, event_nid" +
" FROM roomserver_state_block WHERE state_block_nid = ANY($1)" +
" AND event_type_nid = ANY($2) AND event_state_key_nid = ANY($3)" +
" ORDER BY state_block_nid, event_type_nid, event_state_key_nid"
"SELECT state_block_nid, event_nids" +
" FROM roomserver_state_block WHERE state_block_nid = ANY($1)"
type stateBlockStatements struct {
insertStateDataStmt *sql.Stmt
selectNextStateBlockNIDStmt *sql.Stmt
bulkSelectStateBlockEntriesStmt *sql.Stmt
bulkSelectFilteredStateBlockEntriesStmt *sql.Stmt
}
func NewPostgresStateBlockTable(db *sql.DB) (tables.StateBlock, error) {
s := &stateBlockStatements{}
func createStateBlockTable(db *sql.DB) error {
_, err := db.Exec(stateDataSchema)
if err != nil {
return nil, err
}
return err
}
func prepareStateBlockTable(db *sql.DB) (tables.StateBlock, error) {
s := &stateBlockStatements{}
return s, shared.StatementList{
{&s.insertStateDataStmt, insertStateDataSQL},
{&s.selectNextStateBlockNIDStmt, selectNextStateBlockNIDSQL},
{&s.bulkSelectStateBlockEntriesStmt, bulkSelectStateBlockEntriesSQL},
{&s.bulkSelectFilteredStateBlockEntriesStmt, bulkSelectFilteredStateBlockEntriesSQL},
}.Prepare(db)
}
func (s *stateBlockStatements) BulkInsertStateData(
ctx context.Context,
txn *sql.Tx,
entries []types.StateEntry,
) (types.StateBlockNID, error) {
stateBlockNID, err := s.selectNextStateBlockNID(ctx)
if err != nil {
return 0, err
entries types.StateEntries,
) (id types.StateBlockNID, err error) {
entries = entries[:util.SortAndUnique(entries)]
var nids types.EventNIDs
for _, e := range entries {
nids = append(nids, e.EventNID)
}
for _, entry := range entries {
_, err := s.insertStateDataStmt.ExecContext(
ctx,
int64(stateBlockNID),
int64(entry.EventTypeNID),
int64(entry.EventStateKeyNID),
int64(entry.EventNID),
)
if err != nil {
return 0, err
}
}
return stateBlockNID, nil
}
func (s *stateBlockStatements) selectNextStateBlockNID(
ctx context.Context,
) (types.StateBlockNID, error) {
var stateBlockNID int64
err := s.selectNextStateBlockNIDStmt.QueryRowContext(ctx).Scan(&stateBlockNID)
return types.StateBlockNID(stateBlockNID), err
err = s.insertStateDataStmt.QueryRowContext(
ctx, nids.Hash(), eventNIDsAsArray(nids),
).Scan(&id)
return
}
func (s *stateBlockStatements) BulkSelectStateBlockEntries(
ctx context.Context, stateBlockNIDs []types.StateBlockNID,
) ([]types.StateEntryList, error) {
nids := make([]int64, len(stateBlockNIDs))
for i := range stateBlockNIDs {
nids[i] = int64(stateBlockNIDs[i])
}
rows, err := s.bulkSelectStateBlockEntriesStmt.QueryContext(ctx, pq.Int64Array(nids))
ctx context.Context, stateBlockNIDs types.StateBlockNIDs,
) ([][]types.EventNID, error) {
rows, err := s.bulkSelectStateBlockEntriesStmt.QueryContext(ctx, stateBlockNIDsAsArray(stateBlockNIDs))
if err != nil {
return nil, err
}
defer internal.CloseAndLogIfError(ctx, rows, "bulkSelectStateBlockEntries: rows.close() failed")
results := make([]types.StateEntryList, len(stateBlockNIDs))
// current is a pointer to the StateEntryList to append the state entries to.
var current *types.StateEntryList
results := make([][]types.EventNID, len(stateBlockNIDs))
i := 0
for rows.Next() {
var (
stateBlockNID int64
eventTypeNID int64
eventStateKeyNID int64
eventNID int64
entry types.StateEntry
)
if err = rows.Scan(
&stateBlockNID, &eventTypeNID, &eventStateKeyNID, &eventNID,
); err != nil {
for ; rows.Next(); i++ {
var stateBlockNID types.StateBlockNID
var result pq.Int64Array
if err = rows.Scan(&stateBlockNID, &result); err != nil {
return nil, err
}
entry.EventTypeNID = types.EventTypeNID(eventTypeNID)
entry.EventStateKeyNID = types.EventStateKeyNID(eventStateKeyNID)
entry.EventNID = types.EventNID(eventNID)
if current == nil || types.StateBlockNID(stateBlockNID) != current.StateBlockNID {
// The state entry row is for a different state data block to the current one.
// So we start appending to the next entry in the list.
current = &results[i]
current.StateBlockNID = types.StateBlockNID(stateBlockNID)
i++
r := []types.EventNID{}
for _, e := range result {
r = append(r, types.EventNID(e))
}
current.StateEntries = append(current.StateEntries, entry)
results[i] = r
}
if err = rows.Err(); err != nil {
return nil, err
@ -186,71 +133,6 @@ func (s *stateBlockStatements) BulkSelectStateBlockEntries(
return results, err
}
func (s *stateBlockStatements) BulkSelectFilteredStateBlockEntries(
ctx context.Context,
stateBlockNIDs []types.StateBlockNID,
stateKeyTuples []types.StateKeyTuple,
) ([]types.StateEntryList, error) {
tuples := stateKeyTupleSorter(stateKeyTuples)
// Sort the tuples so that we can run binary search against them as we filter the rows returned by the db.
sort.Sort(tuples)
eventTypeNIDArray, eventStateKeyNIDArray := tuples.typesAndStateKeysAsArrays()
rows, err := s.bulkSelectFilteredStateBlockEntriesStmt.QueryContext(
ctx,
stateBlockNIDsAsArray(stateBlockNIDs),
eventTypeNIDArray,
eventStateKeyNIDArray,
)
if err != nil {
return nil, err
}
defer internal.CloseAndLogIfError(ctx, rows, "bulkSelectFilteredStateBlockEntries: rows.close() failed")
var results []types.StateEntryList
var current types.StateEntryList
for rows.Next() {
var (
stateBlockNID int64
eventTypeNID int64
eventStateKeyNID int64
eventNID int64
entry types.StateEntry
)
if err := rows.Scan(
&stateBlockNID, &eventTypeNID, &eventStateKeyNID, &eventNID,
); err != nil {
return nil, err
}
entry.EventTypeNID = types.EventTypeNID(eventTypeNID)
entry.EventStateKeyNID = types.EventStateKeyNID(eventStateKeyNID)
entry.EventNID = types.EventNID(eventNID)
// We can use binary search here because we sorted the tuples earlier
if !tuples.contains(entry.StateKeyTuple) {
// The select will return the cross product of types and state keys.
// So we need to check if type of the entry is in the list.
continue
}
if types.StateBlockNID(stateBlockNID) != current.StateBlockNID {
// The state entry row is for a different state data block to the current one.
// So we append the current entry to the results and start adding to a new one.
// The first time through the loop current will be empty.
if current.StateEntries != nil {
results = append(results, current)
}
current = types.StateEntryList{StateBlockNID: types.StateBlockNID(stateBlockNID)}
}
current.StateEntries = append(current.StateEntries, entry)
}
// Add the last entry to the list if it is not empty.
if current.StateEntries != nil {
results = append(results, current)
}
return results, rows.Err()
}
func stateBlockNIDsAsArray(stateBlockNIDs []types.StateBlockNID) pq.Int64Array {
nids := make([]int64, len(stateBlockNIDs))
for i := range stateBlockNIDs {

View file

@ -25,6 +25,7 @@ import (
"github.com/matrix-org/dendrite/roomserver/storage/shared"
"github.com/matrix-org/dendrite/roomserver/storage/tables"
"github.com/matrix-org/dendrite/roomserver/types"
"github.com/matrix-org/util"
)
const stateSnapshotSchema = `
@ -40,19 +41,29 @@ const stateSnapshotSchema = `
-- the full state under single state_block_nid.
CREATE SEQUENCE IF NOT EXISTS roomserver_state_snapshot_nid_seq;
CREATE TABLE IF NOT EXISTS roomserver_state_snapshots (
-- Local numeric ID for the state.
-- The state snapshot NID that identifies this snapshot.
state_snapshot_nid bigint PRIMARY KEY DEFAULT nextval('roomserver_state_snapshot_nid_seq'),
-- Local numeric ID of the room this state is for.
-- Unused in normal operation, but useful for background work or ad-hoc debugging.
-- The hash of the state snapshot, which is used to enforce uniqueness. The hash is
-- generated in Dendrite and passed through to the database, as a btree index over
-- this column is cheap and fits within the maximum index size.
state_snapshot_hash BYTEA UNIQUE,
-- The room NID that the snapshot belongs to.
room_nid bigint NOT NULL,
-- List of state_block_nids, stored sorted by state_block_nid.
-- The state blocks contained within this snapshot.
state_block_nids bigint[] NOT NULL
);
`
// Insert a new state snapshot. If we conflict on the hash column then
// we must perform an update so that the RETURNING statement returns the
// ID of the row that we conflicted with, so that we can then refer to
// the original snapshot.
const insertStateSQL = "" +
"INSERT INTO roomserver_state_snapshots (room_nid, state_block_nids)" +
" VALUES ($1, $2)" +
"INSERT INTO roomserver_state_snapshots (state_snapshot_hash, room_nid, state_block_nids)" +
" VALUES ($1, $2, $3)" +
" ON CONFLICT (state_snapshot_hash) DO UPDATE SET room_nid=$2" +
// Performing an update, above, ensures that the RETURNING statement
// below will always return a valid state snapshot ID
" RETURNING state_snapshot_nid"
// Bulk state data NID lookup.
@ -67,12 +78,13 @@ type stateSnapshotStatements struct {
bulkSelectStateBlockNIDsStmt *sql.Stmt
}
func NewPostgresStateSnapshotTable(db *sql.DB) (tables.StateSnapshot, error) {
s := &stateSnapshotStatements{}
func createStateSnapshotTable(db *sql.DB) error {
_, err := db.Exec(stateSnapshotSchema)
if err != nil {
return nil, err
}
return err
}
func prepareStateSnapshotTable(db *sql.DB) (tables.StateSnapshot, error) {
s := &stateSnapshotStatements{}
return s, shared.StatementList{
{&s.insertStateStmt, insertStateSQL},
@ -81,13 +93,15 @@ func NewPostgresStateSnapshotTable(db *sql.DB) (tables.StateSnapshot, error) {
}
func (s *stateSnapshotStatements) InsertState(
ctx context.Context, txn *sql.Tx, roomNID types.RoomNID, stateBlockNIDs []types.StateBlockNID,
ctx context.Context, txn *sql.Tx, roomNID types.RoomNID, nids types.StateBlockNIDs,
) (stateNID types.StateSnapshotNID, err error) {
nids := make([]int64, len(stateBlockNIDs))
for i := range stateBlockNIDs {
nids[i] = int64(stateBlockNIDs[i])
nids = nids[:util.SortAndUnique(nids)]
var id int64
err = sqlutil.TxStmt(txn, s.insertStateStmt).QueryRowContext(ctx, nids.Hash(), int64(roomNID), stateBlockNIDsAsArray(nids)).Scan(&id)
if err != nil {
return 0, err
}
err = sqlutil.TxStmt(txn, s.insertStateStmt).QueryRowContext(ctx, int64(roomNID), pq.Int64Array(nids)).Scan(&stateNID)
stateNID = types.StateSnapshotNID(id)
return
}

View file

@ -17,6 +17,7 @@ package postgres
import (
"database/sql"
"fmt"
// Import the postgres database driver.
_ "github.com/lib/pq"
@ -39,20 +40,25 @@ func Open(dbProperties *config.DatabaseOptions, cache caching.RoomServerCaches)
var db *sql.DB
var err error
if db, err = sqlutil.Open(dbProperties); err != nil {
return nil, fmt.Errorf("sqlutil.Open: %w", err)
}
// Create the tables.
if err := d.create(db); err != nil {
return nil, err
}
// Create tables before executing migrations so we don't fail if the table is missing,
// and THEN prepare statements so we don't fail due to referencing new columns
ms := membershipStatements{}
if err := ms.execSchema(db); err != nil {
return nil, err
}
// Then execute the migrations. By this point the tables are created with the latest
// schemas.
m := sqlutil.NewMigrations()
deltas.LoadAddForgottenColumn(m)
deltas.LoadStateBlocksRefactor(m)
if err := m.RunDeltas(db, dbProperties); err != nil {
return nil, err
}
// Then prepare the statements. Now that the migrations have run, any columns referred
// to in the database code should now exist.
if err := d.prepare(db, cache); err != nil {
return nil, err
}
@ -60,61 +66,107 @@ func Open(dbProperties *config.DatabaseOptions, cache caching.RoomServerCaches)
return &d, nil
}
// nolint: gocyclo
func (d *Database) prepare(db *sql.DB, cache caching.RoomServerCaches) (err error) {
eventStateKeys, err := NewPostgresEventStateKeysTable(db)
func (d *Database) create(db *sql.DB) error {
if err := createEventStateKeysTable(db); err != nil {
return err
}
if err := createEventTypesTable(db); err != nil {
return err
}
if err := createEventJSONTable(db); err != nil {
return err
}
if err := createEventsTable(db); err != nil {
return err
}
if err := createRoomsTable(db); err != nil {
return err
}
if err := createTransactionsTable(db); err != nil {
return err
}
if err := createStateBlockTable(db); err != nil {
return err
}
if err := createStateSnapshotTable(db); err != nil {
return err
}
if err := createPrevEventsTable(db); err != nil {
return err
}
if err := createRoomAliasesTable(db); err != nil {
return err
}
if err := createInvitesTable(db); err != nil {
return err
}
if err := createMembershipTable(db); err != nil {
return err
}
if err := createPublishedTable(db); err != nil {
return err
}
if err := createRedactionsTable(db); err != nil {
return err
}
return nil
}
func (d *Database) prepare(db *sql.DB, cache caching.RoomServerCaches) error {
eventStateKeys, err := prepareEventStateKeysTable(db)
if err != nil {
return err
}
eventTypes, err := NewPostgresEventTypesTable(db)
eventTypes, err := prepareEventTypesTable(db)
if err != nil {
return err
}
eventJSON, err := NewPostgresEventJSONTable(db)
eventJSON, err := prepareEventJSONTable(db)
if err != nil {
return err
}
events, err := NewPostgresEventsTable(db)
events, err := prepareEventsTable(db)
if err != nil {
return err
}
rooms, err := NewPostgresRoomsTable(db)
rooms, err := prepareRoomsTable(db)
if err != nil {
return err
}
transactions, err := NewPostgresTransactionsTable(db)
transactions, err := prepareTransactionsTable(db)
if err != nil {
return err
}
stateBlock, err := NewPostgresStateBlockTable(db)
stateBlock, err := prepareStateBlockTable(db)
if err != nil {
return err
}
stateSnapshot, err := NewPostgresStateSnapshotTable(db)
stateSnapshot, err := prepareStateSnapshotTable(db)
if err != nil {
return err
}
roomAliases, err := NewPostgresRoomAliasesTable(db)
prevEvents, err := preparePrevEventsTable(db)
if err != nil {
return err
}
prevEvents, err := NewPostgresPreviousEventsTable(db)
roomAliases, err := prepareRoomAliasesTable(db)
if err != nil {
return err
}
invites, err := NewPostgresInvitesTable(db)
invites, err := prepareInvitesTable(db)
if err != nil {
return err
}
membership, err := NewPostgresMembershipTable(db)
membership, err := prepareMembershipTable(db)
if err != nil {
return err
}
published, err := NewPostgresPublishedTable(db)
published, err := preparePublishedTable(db)
if err != nil {
return err
}
redactions, err := NewPostgresRedactionsTable(db)
redactions, err := prepareRedactionsTable(db)
if err != nil {
return err
}

View file

@ -54,12 +54,13 @@ type transactionStatements struct {
selectTransactionEventIDStmt *sql.Stmt
}
func NewPostgresTransactionsTable(db *sql.DB) (tables.Transactions, error) {
s := &transactionStatements{}
func createTransactionsTable(db *sql.DB) error {
_, err := db.Exec(transactionsSchema)
if err != nil {
return nil, err
}
return err
}
func prepareTransactionsTable(db *sql.DB) (tables.Transactions, error) {
s := &transactionStatements{}
return s, shared.StatementList{
{&s.insertTransactionStmt, insertTransactionSQL},

View file

@ -118,9 +118,24 @@ func (d *Database) StateEntriesForTuples(
stateBlockNIDs []types.StateBlockNID,
stateKeyTuples []types.StateKeyTuple,
) ([]types.StateEntryList, error) {
return d.StateBlockTable.BulkSelectFilteredStateBlockEntries(
ctx, stateBlockNIDs, stateKeyTuples,
entries, err := d.StateBlockTable.BulkSelectStateBlockEntries(
ctx, stateBlockNIDs,
)
if err != nil {
return nil, fmt.Errorf("d.StateBlockTable.BulkSelectStateBlockEntries: %w", err)
}
lists := []types.StateEntryList{}
for i, entry := range entries {
entries, err := d.EventsTable.BulkSelectStateEventByNID(ctx, entry, stateKeyTuples)
if err != nil {
return nil, fmt.Errorf("d.EventsTable.BulkSelectStateEventByNID: %w", err)
}
lists = append(lists, types.StateEntryList{
StateBlockNID: stateBlockNIDs[i],
StateEntries: entries,
})
}
return lists, nil
}
func (d *Database) RoomInfo(ctx context.Context, roomID string) (*types.RoomInfo, error) {
@ -141,8 +156,28 @@ func (d *Database) AddState(
stateBlockNIDs []types.StateBlockNID,
state []types.StateEntry,
) (stateNID types.StateSnapshotNID, err error) {
if len(stateBlockNIDs) > 0 {
// Check to see if the event already appears in any of the existing state
// blocks. If it does then we should not add it again, as this will just
// result in excess state blocks and snapshots.
// TODO: Investigate why this is happening - probably input_events.go!
blocks, berr := d.StateBlockTable.BulkSelectStateBlockEntries(ctx, stateBlockNIDs)
if berr != nil {
return 0, fmt.Errorf("d.StateBlockTable.BulkSelectStateBlockEntries: %w", berr)
}
for i := len(state) - 1; i >= 0; i-- {
for _, events := range blocks {
for _, event := range events {
if state[i].EventNID == event {
state = append(state[:i], state[i+1:]...)
}
}
}
}
}
err = d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
if len(state) > 0 {
// If there's any state left to add then let's add new blocks.
var stateBlockNID types.StateBlockNID
stateBlockNID, err = d.StateBlockTable.BulkInsertStateData(ctx, txn, state)
if err != nil {
@ -237,7 +272,24 @@ func (d *Database) StateBlockNIDs(
func (d *Database) StateEntries(
ctx context.Context, stateBlockNIDs []types.StateBlockNID,
) ([]types.StateEntryList, error) {
return d.StateBlockTable.BulkSelectStateBlockEntries(ctx, stateBlockNIDs)
entries, err := d.StateBlockTable.BulkSelectStateBlockEntries(
ctx, stateBlockNIDs,
)
if err != nil {
return nil, fmt.Errorf("d.StateBlockTable.BulkSelectStateBlockEntries: %w", err)
}
lists := make([]types.StateEntryList, 0, len(entries))
for i, entry := range entries {
eventNIDs, err := d.EventsTable.BulkSelectStateEventByNID(ctx, entry, nil)
if err != nil {
return nil, fmt.Errorf("d.EventsTable.BulkSelectStateEventByNID: %w", err)
}
lists = append(lists, types.StateEntryList{
StateBlockNID: stateBlockNIDs[i],
StateEntries: eventNIDs,
})
}
return lists, nil
}
func (d *Database) SetRoomAlias(ctx context.Context, alias string, roomID string, creatorUserID string) error {

View file

@ -24,6 +24,7 @@ import (
func LoadFromGoose() {
goose.AddMigration(UpAddForgottenColumn, DownAddForgottenColumn)
goose.AddMigration(UpStateBlocksRefactor, DownStateBlocksRefactor)
}
func LoadAddForgottenColumn(m *sqlutil.Migrations) {

View file

@ -0,0 +1,168 @@
// Copyright 2020 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package deltas
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"github.com/matrix-org/dendrite/internal"
"github.com/matrix-org/dendrite/internal/sqlutil"
"github.com/matrix-org/dendrite/roomserver/types"
"github.com/matrix-org/util"
"github.com/sirupsen/logrus"
)
func LoadStateBlocksRefactor(m *sqlutil.Migrations) {
m.AddMigration(UpStateBlocksRefactor, DownStateBlocksRefactor)
}
func UpStateBlocksRefactor(tx *sql.Tx) error {
logrus.Warn("Performing state storage upgrade. Please wait, this may take some time!")
defer logrus.Warn("State storage upgrade complete")
var maxsnapshotid int
var maxblockid int
if err := tx.QueryRow(`SELECT IFNULL(MAX(state_snapshot_nid),0) FROM roomserver_state_snapshots;`).Scan(&maxsnapshotid); err != nil {
return fmt.Errorf("tx.QueryRow.Scan (count snapshots): %w", err)
}
if err := tx.QueryRow(`SELECT IFNULL(MAX(state_block_nid),0) FROM roomserver_state_block;`).Scan(&maxblockid); err != nil {
return fmt.Errorf("tx.QueryRow.Scan (count snapshots): %w", err)
}
maxsnapshotid++
maxblockid++
if _, err := tx.Exec(`ALTER TABLE roomserver_state_block RENAME TO _roomserver_state_block;`); err != nil {
return fmt.Errorf("tx.Exec: %w", err)
}
if _, err := tx.Exec(`ALTER TABLE roomserver_state_snapshots RENAME TO _roomserver_state_snapshots;`); err != nil {
return fmt.Errorf("tx.Exec: %w", err)
}
_, err := tx.Exec(`
CREATE TABLE IF NOT EXISTS roomserver_state_block (
state_block_nid INTEGER PRIMARY KEY AUTOINCREMENT,
state_block_hash BLOB UNIQUE,
event_nids TEXT NOT NULL DEFAULT '[]'
);
`)
if err != nil {
return fmt.Errorf("tx.Exec: %w", err)
}
_, err = tx.Exec(`
CREATE TABLE IF NOT EXISTS roomserver_state_snapshots (
state_snapshot_nid INTEGER PRIMARY KEY AUTOINCREMENT,
state_snapshot_hash BLOB UNIQUE,
room_nid INTEGER NOT NULL,
state_block_nids TEXT NOT NULL DEFAULT '[]'
);
`)
if err != nil {
return fmt.Errorf("tx.Exec: %w", err)
}
snapshotrows, err := tx.Query(`SELECT state_snapshot_nid, room_nid, state_block_nids FROM _roomserver_state_snapshots;`)
if err != nil {
return fmt.Errorf("tx.Query: %w", err)
}
defer internal.CloseAndLogIfError(context.TODO(), snapshotrows, "rows.close() failed")
for snapshotrows.Next() {
var snapshot types.StateSnapshotNID
var room types.RoomNID
var jsonblocks string
var blocks []types.StateBlockNID
if err = snapshotrows.Scan(&snapshot, &room, &jsonblocks); err != nil {
return fmt.Errorf("rows.Scan: %w", err)
}
if err = json.Unmarshal([]byte(jsonblocks), &blocks); err != nil {
return fmt.Errorf("json.Unmarshal: %w", err)
}
var newblocks types.StateBlockNIDs
for _, block := range blocks {
if err = func() error {
blockrows, berr := tx.Query(`SELECT event_nid FROM _roomserver_state_block WHERE state_block_nid = $1`, block)
if berr != nil {
return fmt.Errorf("tx.Query (event nids from old block): %w", berr)
}
defer internal.CloseAndLogIfError(context.TODO(), blockrows, "rows.close() failed")
events := types.EventNIDs{}
for blockrows.Next() {
var event types.EventNID
if err = blockrows.Scan(&event); err != nil {
return fmt.Errorf("rows.Scan: %w", err)
}
events = append(events, event)
}
events = events[:util.SortAndUnique(events)]
eventjson, eerr := json.Marshal(events)
if eerr != nil {
return fmt.Errorf("json.Marshal: %w", eerr)
}
var blocknid types.StateBlockNID
err = tx.QueryRow(`
INSERT INTO roomserver_state_block (state_block_nid, state_block_hash, event_nids)
VALUES ($1, $2, $3)
ON CONFLICT (state_block_hash) DO UPDATE SET event_nids=$3
RETURNING state_block_nid
`, maxblockid, events.Hash(), eventjson).Scan(&blocknid)
if err != nil {
return fmt.Errorf("tx.QueryRow.Scan (insert new block): %w", err)
}
maxblockid++
newblocks = append(newblocks, blocknid)
return nil
}(); err != nil {
return err
}
newblocksjson, jerr := json.Marshal(newblocks)
if jerr != nil {
return fmt.Errorf("json.Marshal (new blocks): %w", jerr)
}
var newsnapshot types.StateSnapshotNID
err = tx.QueryRow(`
INSERT INTO roomserver_state_snapshots (state_snapshot_nid, state_snapshot_hash, room_nid, state_block_nids)
VALUES ($1, $2, $3, $4)
ON CONFLICT (state_snapshot_hash) DO UPDATE SET room_nid=$3
RETURNING state_snapshot_nid
`, maxsnapshotid, newblocks.Hash(), room, newblocksjson).Scan(&newsnapshot)
if err != nil {
return fmt.Errorf("tx.QueryRow.Scan (insert new snapshot): %w", err)
}
maxsnapshotid++
if _, err = tx.Exec(`UPDATE roomserver_events SET state_snapshot_nid=$1 WHERE state_snapshot_nid=$2 AND state_snapshot_nid<$3`, newsnapshot, snapshot, maxsnapshotid); err != nil {
return fmt.Errorf("tx.Exec (update events): %w", err)
}
if _, err = tx.Exec(`UPDATE roomserver_rooms SET state_snapshot_nid=$1 WHERE state_snapshot_nid=$2 AND state_snapshot_nid<$3`, newsnapshot, snapshot, maxsnapshotid); err != nil {
return fmt.Errorf("tx.Exec (update rooms): %w", err)
}
}
}
if _, err = tx.Exec(`DROP TABLE _roomserver_state_snapshots;`); err != nil {
return fmt.Errorf("tx.Exec (delete old snapshot table): %w", err)
}
if _, err = tx.Exec(`DROP TABLE _roomserver_state_block;`); err != nil {
return fmt.Errorf("tx.Exec (delete old block table): %w", err)
}
return nil
}
func DownStateBlocksRefactor(tx *sql.Tx) error {
panic("Downgrading state storage is not supported")
}

View file

@ -53,14 +53,16 @@ type eventJSONStatements struct {
bulkSelectEventJSONStmt *sql.Stmt
}
func NewSqliteEventJSONTable(db *sql.DB) (tables.EventJSON, error) {
func createEventJSONTable(db *sql.DB) error {
_, err := db.Exec(eventJSONSchema)
return err
}
func prepareEventJSONTable(db *sql.DB) (tables.EventJSON, error) {
s := &eventJSONStatements{
db: db,
}
_, err := db.Exec(eventJSONSchema)
if err != nil {
return nil, err
}
return s, shared.StatementList{
{&s.insertEventJSONStmt, insertEventJSONSQL},
{&s.bulkSelectEventJSONStmt, bulkSelectEventJSONSQL},

View file

@ -70,14 +70,16 @@ type eventStateKeyStatements struct {
bulkSelectEventStateKeyStmt *sql.Stmt
}
func NewSqliteEventStateKeysTable(db *sql.DB) (tables.EventStateKeys, error) {
func createEventStateKeysTable(db *sql.DB) error {
_, err := db.Exec(eventStateKeysSchema)
return err
}
func prepareEventStateKeysTable(db *sql.DB) (tables.EventStateKeys, error) {
s := &eventStateKeyStatements{
db: db,
}
_, err := db.Exec(eventStateKeysSchema)
if err != nil {
return nil, err
}
return s, shared.StatementList{
{&s.insertEventStateKeyNIDStmt, insertEventStateKeyNIDSQL},
{&s.selectEventStateKeyNIDStmt, selectEventStateKeyNIDSQL},

View file

@ -85,14 +85,15 @@ type eventTypeStatements struct {
bulkSelectEventTypeNIDStmt *sql.Stmt
}
func NewSqliteEventTypesTable(db *sql.DB) (tables.EventTypes, error) {
func createEventTypesTable(db *sql.DB) error {
_, err := db.Exec(eventTypesSchema)
return err
}
func prepareEventTypesTable(db *sql.DB) (tables.EventTypes, error) {
s := &eventTypeStatements{
db: db,
}
_, err := db.Exec(eventTypesSchema)
if err != nil {
return nil, err
}
return s, shared.StatementList{
{&s.insertEventTypeNIDStmt, insertEventTypeNIDSQL},

View file

@ -20,6 +20,7 @@ import (
"database/sql"
"encoding/json"
"fmt"
"sort"
"strings"
"github.com/matrix-org/dendrite/internal"
@ -63,6 +64,11 @@ const bulkSelectStateEventByIDSQL = "" +
" WHERE event_id IN ($1)" +
" ORDER BY event_type_nid, event_state_key_nid ASC"
const bulkSelectStateEventByNIDSQL = "" +
"SELECT event_type_nid, event_state_key_nid, event_nid FROM roomserver_events" +
" WHERE event_nid IN ($1)"
// Rest of query is built by BulkSelectStateEventByNID
const bulkSelectStateAtEventByIDSQL = "" +
"SELECT event_type_nid, event_state_key_nid, event_nid, state_snapshot_nid, is_rejected FROM roomserver_events" +
" WHERE event_id IN ($1)"
@ -115,14 +121,15 @@ type eventStatements struct {
//selectRoomNIDsForEventNIDsStmt *sql.Stmt
}
func NewSqliteEventsTable(db *sql.DB) (tables.Events, error) {
func createEventsTable(db *sql.DB) error {
_, err := db.Exec(eventsSchema)
return err
}
func prepareEventsTable(db *sql.DB) (tables.Events, error) {
s := &eventStatements{
db: db,
}
_, err := db.Exec(eventsSchema)
if err != nil {
return nil, err
}
return s, shared.StatementList{
{&s.insertEventStmt, insertEventSQL},
@ -232,6 +239,61 @@ func (s *eventStatements) BulkSelectStateEventByID(
return results, err
}
// bulkSelectStateEventByID lookups a list of state events by event ID.
// If any of the requested events are missing from the database it returns a types.MissingEventError
func (s *eventStatements) BulkSelectStateEventByNID(
ctx context.Context, eventNIDs []types.EventNID,
stateKeyTuples []types.StateKeyTuple,
) ([]types.StateEntry, error) {
tuples := stateKeyTupleSorter(stateKeyTuples)
sort.Sort(tuples)
eventTypeNIDArray, eventStateKeyNIDArray := tuples.typesAndStateKeysAsArrays()
params := make([]interface{}, 0, len(eventNIDs)+len(eventTypeNIDArray)+len(eventStateKeyNIDArray))
selectOrig := strings.Replace(bulkSelectStateEventByNIDSQL, "($1)", sqlutil.QueryVariadic(len(eventNIDs)), 1)
for _, v := range eventNIDs {
params = append(params, v)
}
if len(eventTypeNIDArray) > 0 {
selectOrig += " AND event_type_nid IN " + sqlutil.QueryVariadicOffset(len(eventTypeNIDArray), len(params))
for _, v := range eventTypeNIDArray {
params = append(params, v)
}
}
if len(eventStateKeyNIDArray) > 0 {
selectOrig += " AND event_state_key_nid IN " + sqlutil.QueryVariadicOffset(len(eventStateKeyNIDArray), len(params))
for _, v := range eventStateKeyNIDArray {
params = append(params, v)
}
}
selectOrig += " ORDER BY event_type_nid, event_state_key_nid ASC"
selectStmt, err := s.db.Prepare(selectOrig)
if err != nil {
return nil, fmt.Errorf("s.db.Prepare: %w", err)
}
rows, err := selectStmt.QueryContext(ctx, params...)
if err != nil {
return nil, fmt.Errorf("selectStmt.QueryContext: %w", err)
}
defer internal.CloseAndLogIfError(ctx, rows, "bulkSelectStateEventByID: rows.close() failed")
// We know that we will only get as many results as event IDs
// because of the unique constraint on event IDs.
// So we can allocate an array of the correct size now.
// We might get fewer results than IDs so we adjust the length of the slice before returning it.
results := make([]types.StateEntry, len(eventNIDs))
i := 0
for ; rows.Next(); i++ {
result := &results[i]
if err = rows.Scan(
&result.EventTypeNID,
&result.EventStateKeyNID,
&result.EventNID,
); err != nil {
return nil, err
}
}
return results[:i], err
}
// bulkSelectStateAtEventByID lookups the state at a list of events by event ID.
// If any of the requested events are missing from the database it returns a types.MissingEventError.
// If we do not have the state for any of the requested events it returns a types.MissingEventError.

View file

@ -70,14 +70,15 @@ type inviteStatements struct {
selectInvitesAboutToRetireStmt *sql.Stmt
}
func NewSqliteInvitesTable(db *sql.DB) (tables.Invites, error) {
func createInvitesTable(db *sql.DB) error {
_, err := db.Exec(inviteSchema)
return err
}
func prepareInvitesTable(db *sql.DB) (tables.Invites, error) {
s := &inviteStatements{
db: db,
}
_, err := db.Exec(inviteSchema)
if err != nil {
return nil, err
}
return s, shared.StatementList{
{&s.insertInviteEventStmt, insertInviteEventSQL},

View file

@ -115,7 +115,12 @@ type membershipStatements struct {
updateMembershipForgetRoomStmt *sql.Stmt
}
func NewSqliteMembershipTable(db *sql.DB) (tables.Membership, error) {
func createMembershipTable(db *sql.DB) error {
_, err := db.Exec(membershipSchema)
return err
}
func prepareMembershipTable(db *sql.DB) (tables.Membership, error) {
s := &membershipStatements{
db: db,
}
@ -135,11 +140,6 @@ func NewSqliteMembershipTable(db *sql.DB) (tables.Membership, error) {
}.Prepare(db)
}
func (s *membershipStatements) execSchema(db *sql.DB) error {
_, err := db.Exec(membershipSchema)
return err
}
func (s *membershipStatements) InsertMembership(
ctx context.Context, txn *sql.Tx,
roomNID types.RoomNID, targetUserNID types.EventStateKeyNID,

View file

@ -71,14 +71,15 @@ type previousEventStatements struct {
selectPreviousEventExistsStmt *sql.Stmt
}
func NewSqlitePrevEventsTable(db *sql.DB) (tables.PreviousEvents, error) {
func createPrevEventsTable(db *sql.DB) error {
_, err := db.Exec(previousEventSchema)
return err
}
func preparePrevEventsTable(db *sql.DB) (tables.PreviousEvents, error) {
s := &previousEventStatements{
db: db,
}
_, err := db.Exec(previousEventSchema)
if err != nil {
return nil, err
}
return s, shared.StatementList{
{&s.insertPreviousEventStmt, insertPreviousEventSQL},

View file

@ -50,14 +50,16 @@ type publishedStatements struct {
selectPublishedStmt *sql.Stmt
}
func NewSqlitePublishedTable(db *sql.DB) (tables.Published, error) {
func createPublishedTable(db *sql.DB) error {
_, err := db.Exec(publishedSchema)
return err
}
func preparePublishedTable(db *sql.DB) (tables.Published, error) {
s := &publishedStatements{
db: db,
}
_, err := db.Exec(publishedSchema)
if err != nil {
return nil, err
}
return s, shared.StatementList{
{&s.upsertPublishedStmt, upsertPublishedSQL},
{&s.selectAllPublishedStmt, selectAllPublishedSQL},

View file

@ -59,14 +59,15 @@ type redactionStatements struct {
markRedactionValidatedStmt *sql.Stmt
}
func NewSqliteRedactionsTable(db *sql.DB) (tables.Redactions, error) {
func createRedactionsTable(db *sql.DB) error {
_, err := db.Exec(redactionsSchema)
return err
}
func prepareRedactionsTable(db *sql.DB) (tables.Redactions, error) {
s := &redactionStatements{
db: db,
}
_, err := db.Exec(redactionsSchema)
if err != nil {
return nil, err
}
return s, shared.StatementList{
{&s.insertRedactionStmt, insertRedactionSQL},

View file

@ -64,14 +64,16 @@ type roomAliasesStatements struct {
deleteRoomAliasStmt *sql.Stmt
}
func NewSqliteRoomAliasesTable(db *sql.DB) (tables.RoomAliases, error) {
func createRoomAliasesTable(db *sql.DB) error {
_, err := db.Exec(roomAliasesSchema)
return err
}
func prepareRoomAliasesTable(db *sql.DB) (tables.RoomAliases, error) {
s := &roomAliasesStatements{
db: db,
}
_, err := db.Exec(roomAliasesSchema)
if err != nil {
return nil, err
}
return s, shared.StatementList{
{&s.insertRoomAliasStmt, insertRoomAliasSQL},
{&s.selectRoomIDFromAliasStmt, selectRoomIDFromAliasSQL},

View file

@ -86,14 +86,16 @@ type roomStatements struct {
selectRoomIDsStmt *sql.Stmt
}
func NewSqliteRoomsTable(db *sql.DB) (tables.Rooms, error) {
func createRoomsTable(db *sql.DB) error {
_, err := db.Exec(roomsSchema)
return err
}
func prepareRoomsTable(db *sql.DB) (tables.Rooms, error) {
s := &roomStatements{
db: db,
}
_, err := db.Exec(roomsSchema)
if err != nil {
return nil, err
}
return s, shared.StatementList{
{&s.insertRoomNIDStmt, insertRoomNIDSQL},
{&s.selectRoomNIDStmt, selectRoomNIDSQL},

View file

@ -18,6 +18,7 @@ package sqlite3
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"sort"
"strings"
@ -32,228 +33,113 @@ import (
const stateDataSchema = `
CREATE TABLE IF NOT EXISTS roomserver_state_block (
state_block_nid INTEGER NOT NULL,
event_type_nid INTEGER NOT NULL,
event_state_key_nid INTEGER NOT NULL,
event_nid INTEGER NOT NULL,
UNIQUE (state_block_nid, event_type_nid, event_state_key_nid)
-- The state snapshot NID that identifies this snapshot.
state_block_nid INTEGER PRIMARY KEY AUTOINCREMENT,
-- The hash of the state block, which is used to enforce uniqueness. The hash is
-- generated in Dendrite and passed through to the database, as a btree index over
-- this column is cheap and fits within the maximum index size.
state_block_hash BLOB UNIQUE,
-- The event NIDs contained within the state block, encoded as JSON.
event_nids TEXT NOT NULL DEFAULT '[]'
);
`
const insertStateDataSQL = "" +
"INSERT INTO roomserver_state_block (state_block_nid, event_type_nid, event_state_key_nid, event_nid)" +
" VALUES ($1, $2, $3, $4)"
const selectNextStateBlockNIDSQL = `
SELECT IFNULL(MAX(state_block_nid), 0) + 1 FROM roomserver_state_block
// Insert a new state block. If we conflict on the hash column then
// we must perform an update so that the RETURNING statement returns the
// ID of the row that we conflicted with, so that we can then refer to
// the original block.
const insertStateDataSQL = `
INSERT INTO roomserver_state_block (state_block_hash, event_nids)
VALUES ($1, $2)
ON CONFLICT (state_block_hash) DO UPDATE SET event_nids=$2
RETURNING state_block_nid
`
// Bulk state lookup by numeric state block ID.
// Sort by the state_block_nid, event_type_nid, event_state_key_nid
// This means that all the entries for a given state_block_nid will appear
// together in the list and those entries will sorted by event_type_nid
// and event_state_key_nid. This property makes it easier to merge two
// state data blocks together.
const bulkSelectStateBlockEntriesSQL = "" +
"SELECT state_block_nid, event_type_nid, event_state_key_nid, event_nid" +
" FROM roomserver_state_block WHERE state_block_nid IN ($1)" +
" ORDER BY state_block_nid, event_type_nid, event_state_key_nid"
// Bulk state lookup by numeric state block ID.
// Filters the rows in each block to the requested types and state keys.
// We would like to restrict to particular type state key pairs but we are
// restricted by the query language to pull the cross product of a list
// of types and a list state_keys. So we have to filter the result in the
// application to restrict it to the list of event types and state keys we
// actually wanted.
const bulkSelectFilteredStateBlockEntriesSQL = "" +
"SELECT state_block_nid, event_type_nid, event_state_key_nid, event_nid" +
" FROM roomserver_state_block WHERE state_block_nid IN ($1)" +
" AND event_type_nid IN ($2) AND event_state_key_nid IN ($3)" +
" ORDER BY state_block_nid, event_type_nid, event_state_key_nid"
"SELECT state_block_nid, event_nids" +
" FROM roomserver_state_block WHERE state_block_nid IN ($1)"
type stateBlockStatements struct {
db *sql.DB
insertStateDataStmt *sql.Stmt
selectNextStateBlockNIDStmt *sql.Stmt
bulkSelectStateBlockEntriesStmt *sql.Stmt
bulkSelectFilteredStateBlockEntriesStmt *sql.Stmt
}
func NewSqliteStateBlockTable(db *sql.DB) (tables.StateBlock, error) {
func createStateBlockTable(db *sql.DB) error {
_, err := db.Exec(stateDataSchema)
return err
}
func prepareStateBlockTable(db *sql.DB) (tables.StateBlock, error) {
s := &stateBlockStatements{
db: db,
}
_, err := db.Exec(stateDataSchema)
if err != nil {
return nil, err
}
return s, shared.StatementList{
{&s.insertStateDataStmt, insertStateDataSQL},
{&s.selectNextStateBlockNIDStmt, selectNextStateBlockNIDSQL},
{&s.bulkSelectStateBlockEntriesStmt, bulkSelectStateBlockEntriesSQL},
{&s.bulkSelectFilteredStateBlockEntriesStmt, bulkSelectFilteredStateBlockEntriesSQL},
}.Prepare(db)
}
func (s *stateBlockStatements) BulkInsertStateData(
ctx context.Context, txn *sql.Tx,
entries []types.StateEntry,
) (types.StateBlockNID, error) {
if len(entries) == 0 {
return 0, nil
ctx context.Context,
txn *sql.Tx,
entries types.StateEntries,
) (id types.StateBlockNID, err error) {
entries = entries[:util.SortAndUnique(entries)]
var nids types.EventNIDs
for _, e := range entries {
nids = append(nids, e.EventNID)
}
var stateBlockNID types.StateBlockNID
err := sqlutil.TxStmt(txn, s.selectNextStateBlockNIDStmt).QueryRowContext(ctx).Scan(&stateBlockNID)
js, err := json.Marshal(nids)
if err != nil {
return 0, err
return 0, fmt.Errorf("json.Marshal: %w", err)
}
for _, entry := range entries {
_, err = sqlutil.TxStmt(txn, s.insertStateDataStmt).ExecContext(
ctx,
int64(stateBlockNID),
int64(entry.EventTypeNID),
int64(entry.EventStateKeyNID),
int64(entry.EventNID),
)
if err != nil {
return 0, err
}
}
return stateBlockNID, err
err = s.insertStateDataStmt.QueryRowContext(
ctx, nids.Hash(), js,
).Scan(&id)
return
}
func (s *stateBlockStatements) BulkSelectStateBlockEntries(
ctx context.Context, stateBlockNIDs []types.StateBlockNID,
) ([]types.StateEntryList, error) {
nids := make([]interface{}, len(stateBlockNIDs))
for k, v := range stateBlockNIDs {
nids[k] = v
ctx context.Context, stateBlockNIDs types.StateBlockNIDs,
) ([][]types.EventNID, error) {
intfs := make([]interface{}, len(stateBlockNIDs))
for i := range stateBlockNIDs {
intfs[i] = int64(stateBlockNIDs[i])
}
selectOrig := strings.Replace(bulkSelectStateBlockEntriesSQL, "($1)", sqlutil.QueryVariadic(len(nids)), 1)
selectOrig := strings.Replace(bulkSelectStateBlockEntriesSQL, "($1)", sqlutil.QueryVariadic(len(intfs)), 1)
selectStmt, err := s.db.Prepare(selectOrig)
if err != nil {
return nil, err
}
rows, err := selectStmt.QueryContext(ctx, nids...)
rows, err := selectStmt.QueryContext(ctx, intfs...)
if err != nil {
return nil, err
}
defer internal.CloseAndLogIfError(ctx, rows, "bulkSelectStateBlockEntries: rows.close() failed")
results := make([]types.StateEntryList, len(stateBlockNIDs))
// current is a pointer to the StateEntryList to append the state entries to.
var current *types.StateEntryList
results := make([][]types.EventNID, len(stateBlockNIDs))
i := 0
for rows.Next() {
var (
stateBlockNID int64
eventTypeNID int64
eventStateKeyNID int64
eventNID int64
entry types.StateEntry
)
if err := rows.Scan(
&stateBlockNID, &eventTypeNID, &eventStateKeyNID, &eventNID,
); err != nil {
for ; rows.Next(); i++ {
var stateBlockNID types.StateBlockNID
var result json.RawMessage
if err = rows.Scan(&stateBlockNID, &result); err != nil {
return nil, err
}
entry.EventTypeNID = types.EventTypeNID(eventTypeNID)
entry.EventStateKeyNID = types.EventStateKeyNID(eventStateKeyNID)
entry.EventNID = types.EventNID(eventNID)
if current == nil || types.StateBlockNID(stateBlockNID) != current.StateBlockNID {
// The state entry row is for a different state data block to the current one.
// So we start appending to the next entry in the list.
current = &results[i]
current.StateBlockNID = types.StateBlockNID(stateBlockNID)
i++
r := []types.EventNID{}
if err = json.Unmarshal(result, &r); err != nil {
return nil, fmt.Errorf("json.Unmarshal: %w", err)
}
current.StateEntries = append(current.StateEntries, entry)
results[i] = r
}
if i != len(nids) {
return nil, fmt.Errorf("storage: state data NIDs missing from the database (%d != %d)", i, len(nids))
}
return results, nil
}
func (s *stateBlockStatements) BulkSelectFilteredStateBlockEntries(
ctx context.Context,
stateBlockNIDs []types.StateBlockNID,
stateKeyTuples []types.StateKeyTuple,
) ([]types.StateEntryList, error) {
tuples := stateKeyTupleSorter(stateKeyTuples)
// Sort the tuples so that we can run binary search against them as we filter the rows returned by the db.
sort.Sort(tuples)
eventTypeNIDArray, eventStateKeyNIDArray := tuples.typesAndStateKeysAsArrays()
sqlStatement := strings.Replace(bulkSelectFilteredStateBlockEntriesSQL, "($1)", sqlutil.QueryVariadic(len(stateBlockNIDs)), 1)
sqlStatement = strings.Replace(sqlStatement, "($2)", sqlutil.QueryVariadicOffset(len(eventTypeNIDArray), len(stateBlockNIDs)), 1)
sqlStatement = strings.Replace(sqlStatement, "($3)", sqlutil.QueryVariadicOffset(len(eventStateKeyNIDArray), len(stateBlockNIDs)+len(eventTypeNIDArray)), 1)
var params []interface{}
for _, val := range stateBlockNIDs {
params = append(params, int64(val))
}
for _, val := range eventTypeNIDArray {
params = append(params, val)
}
for _, val := range eventStateKeyNIDArray {
params = append(params, val)
}
rows, err := s.db.QueryContext(
ctx,
sqlStatement,
params...,
)
if err != nil {
if err = rows.Err(); err != nil {
return nil, err
}
defer internal.CloseAndLogIfError(ctx, rows, "bulkSelectFilteredStateBlockEntries: rows.close() failed")
var results []types.StateEntryList
var current types.StateEntryList
for rows.Next() {
var (
stateBlockNID int64
eventTypeNID int64
eventStateKeyNID int64
eventNID int64
entry types.StateEntry
)
if err := rows.Scan(
&stateBlockNID, &eventTypeNID, &eventStateKeyNID, &eventNID,
); err != nil {
return nil, err
if i != len(stateBlockNIDs) {
return nil, fmt.Errorf("storage: state data NIDs missing from the database (%d != %d)", len(results), len(stateBlockNIDs))
}
entry.EventTypeNID = types.EventTypeNID(eventTypeNID)
entry.EventStateKeyNID = types.EventStateKeyNID(eventStateKeyNID)
entry.EventNID = types.EventNID(eventNID)
// We can use binary search here because we sorted the tuples earlier
if !tuples.contains(entry.StateKeyTuple) {
// The select will return the cross product of types and state keys.
// So we need to check if type of the entry is in the list.
continue
}
if types.StateBlockNID(stateBlockNID) != current.StateBlockNID {
// The state entry row is for a different state data block to the current one.
// So we append the current entry to the results and start adding to a new one.
// The first time through the loop current will be empty.
if current.StateEntries != nil {
results = append(results, current)
}
current = types.StateEntryList{StateBlockNID: types.StateBlockNID(stateBlockNID)}
}
current.StateEntries = append(current.StateEntries, entry)
}
// Add the last entry to the list if it is not empty.
if current.StateEntries != nil {
results = append(results, current)
}
return results, nil
return results, err
}
type stateKeyTupleSorter []types.StateKeyTuple

View file

@ -27,19 +27,34 @@ import (
"github.com/matrix-org/dendrite/roomserver/storage/shared"
"github.com/matrix-org/dendrite/roomserver/storage/tables"
"github.com/matrix-org/dendrite/roomserver/types"
"github.com/matrix-org/util"
)
const stateSnapshotSchema = `
CREATE TABLE IF NOT EXISTS roomserver_state_snapshots (
-- The state snapshot NID that identifies this snapshot.
state_snapshot_nid INTEGER PRIMARY KEY AUTOINCREMENT,
-- The hash of the state snapshot, which is used to enforce uniqueness. The hash is
-- generated in Dendrite and passed through to the database, as a btree index over
-- this column is cheap and fits within the maximum index size.
state_snapshot_hash BLOB UNIQUE,
-- The room NID that the snapshot belongs to.
room_nid INTEGER NOT NULL,
-- The state blocks contained within this snapshot, encoded as JSON.
state_block_nids TEXT NOT NULL DEFAULT '[]'
);
`
// Insert a new state snapshot. If we conflict on the hash column then
// we must perform an update so that the RETURNING statement returns the
// ID of the row that we conflicted with, so that we can then refer to
// the original snapshot.
const insertStateSQL = `
INSERT INTO roomserver_state_snapshots (room_nid, state_block_nids)
VALUES ($1, $2);`
INSERT INTO roomserver_state_snapshots (state_snapshot_hash, room_nid, state_block_nids)
VALUES ($1, $2, $3)
ON CONFLICT (state_snapshot_hash) DO UPDATE SET room_nid=$2
RETURNING state_snapshot_nid
`
// Bulk state data NID lookup.
// Sorting by state_snapshot_nid means we can use binary search over the result
@ -54,14 +69,15 @@ type stateSnapshotStatements struct {
bulkSelectStateBlockNIDsStmt *sql.Stmt
}
func NewSqliteStateSnapshotTable(db *sql.DB) (tables.StateSnapshot, error) {
func createStateSnapshotTable(db *sql.DB) error {
_, err := db.Exec(stateSnapshotSchema)
return err
}
func prepareStateSnapshotTable(db *sql.DB) (tables.StateSnapshot, error) {
s := &stateSnapshotStatements{
db: db,
}
_, err := db.Exec(stateSnapshotSchema)
if err != nil {
return nil, err
}
return s, shared.StatementList{
{&s.insertStateStmt, insertStateSQL},
@ -70,22 +86,20 @@ func NewSqliteStateSnapshotTable(db *sql.DB) (tables.StateSnapshot, error) {
}
func (s *stateSnapshotStatements) InsertState(
ctx context.Context, txn *sql.Tx, roomNID types.RoomNID, stateBlockNIDs []types.StateBlockNID,
ctx context.Context, txn *sql.Tx, roomNID types.RoomNID, stateBlockNIDs types.StateBlockNIDs,
) (stateNID types.StateSnapshotNID, err error) {
stateBlockNIDs = stateBlockNIDs[:util.SortAndUnique(stateBlockNIDs)]
stateBlockNIDsJSON, err := json.Marshal(stateBlockNIDs)
if err != nil {
return
}
insertStmt := sqlutil.TxStmt(txn, s.insertStateStmt)
res, err := insertStmt.ExecContext(ctx, int64(roomNID), string(stateBlockNIDsJSON))
var id int64
err = insertStmt.QueryRowContext(ctx, stateBlockNIDs.Hash(), int64(roomNID), string(stateBlockNIDsJSON)).Scan(&id)
if err != nil {
return 0, err
}
lastRowID, err := res.LastInsertId()
if err != nil {
return 0, err
}
stateNID = types.StateSnapshotNID(lastRowID)
stateNID = types.StateSnapshotNID(id)
return
}

View file

@ -53,17 +53,22 @@ func Open(dbProperties *config.DatabaseOptions, cache caching.RoomServerCaches)
// which it will never obtain.
db.SetMaxOpenConns(20)
// Create tables before executing migrations so we don't fail if the table is missing,
// and THEN prepare statements so we don't fail due to referencing new columns
ms := membershipStatements{}
if err := ms.execSchema(db); err != nil {
// Create the tables.
if err := d.create(db); err != nil {
return nil, err
}
// Then execute the migrations. By this point the tables are created with the latest
// schemas.
m := sqlutil.NewMigrations()
deltas.LoadAddForgottenColumn(m)
deltas.LoadStateBlocksRefactor(m)
if err := m.RunDeltas(db, dbProperties); err != nil {
return nil, err
}
// Then prepare the statements. Now that the migrations have run, any columns referred
// to in the database code should now exist.
if err := d.prepare(db, cache); err != nil {
return nil, err
}
@ -71,62 +76,107 @@ func Open(dbProperties *config.DatabaseOptions, cache caching.RoomServerCaches)
return &d, nil
}
// nolint: gocyclo
func (d *Database) create(db *sql.DB) error {
if err := createEventStateKeysTable(db); err != nil {
return err
}
if err := createEventTypesTable(db); err != nil {
return err
}
if err := createEventJSONTable(db); err != nil {
return err
}
if err := createEventsTable(db); err != nil {
return err
}
if err := createRoomsTable(db); err != nil {
return err
}
if err := createTransactionsTable(db); err != nil {
return err
}
if err := createStateBlockTable(db); err != nil {
return err
}
if err := createStateSnapshotTable(db); err != nil {
return err
}
if err := createPrevEventsTable(db); err != nil {
return err
}
if err := createRoomAliasesTable(db); err != nil {
return err
}
if err := createInvitesTable(db); err != nil {
return err
}
if err := createMembershipTable(db); err != nil {
return err
}
if err := createPublishedTable(db); err != nil {
return err
}
if err := createRedactionsTable(db); err != nil {
return err
}
return nil
}
func (d *Database) prepare(db *sql.DB, cache caching.RoomServerCaches) error {
var err error
eventStateKeys, err := NewSqliteEventStateKeysTable(db)
eventStateKeys, err := prepareEventStateKeysTable(db)
if err != nil {
return err
}
eventTypes, err := NewSqliteEventTypesTable(db)
eventTypes, err := prepareEventTypesTable(db)
if err != nil {
return err
}
eventJSON, err := NewSqliteEventJSONTable(db)
eventJSON, err := prepareEventJSONTable(db)
if err != nil {
return err
}
events, err := NewSqliteEventsTable(db)
events, err := prepareEventsTable(db)
if err != nil {
return err
}
rooms, err := NewSqliteRoomsTable(db)
rooms, err := prepareRoomsTable(db)
if err != nil {
return err
}
transactions, err := NewSqliteTransactionsTable(db)
transactions, err := prepareTransactionsTable(db)
if err != nil {
return err
}
stateBlock, err := NewSqliteStateBlockTable(db)
stateBlock, err := prepareStateBlockTable(db)
if err != nil {
return err
}
stateSnapshot, err := NewSqliteStateSnapshotTable(db)
stateSnapshot, err := prepareStateSnapshotTable(db)
if err != nil {
return err
}
prevEvents, err := NewSqlitePrevEventsTable(db)
prevEvents, err := preparePrevEventsTable(db)
if err != nil {
return err
}
roomAliases, err := NewSqliteRoomAliasesTable(db)
roomAliases, err := prepareRoomAliasesTable(db)
if err != nil {
return err
}
invites, err := NewSqliteInvitesTable(db)
invites, err := prepareInvitesTable(db)
if err != nil {
return err
}
membership, err := NewSqliteMembershipTable(db)
membership, err := prepareMembershipTable(db)
if err != nil {
return err
}
published, err := NewSqlitePublishedTable(db)
published, err := preparePublishedTable(db)
if err != nil {
return err
}
redactions, err := NewSqliteRedactionsTable(db)
redactions, err := prepareRedactionsTable(db)
if err != nil {
return err
}

View file

@ -49,14 +49,15 @@ type transactionStatements struct {
selectTransactionEventIDStmt *sql.Stmt
}
func NewSqliteTransactionsTable(db *sql.DB) (tables.Transactions, error) {
func createTransactionsTable(db *sql.DB) error {
_, err := db.Exec(transactionsSchema)
return err
}
func prepareTransactionsTable(db *sql.DB) (tables.Transactions, error) {
s := &transactionStatements{
db: db,
}
_, err := db.Exec(transactionsSchema)
if err != nil {
return nil, err
}
return s, shared.StatementList{
{&s.insertTransactionStmt, insertTransactionSQL},

View file

@ -43,6 +43,7 @@ type Events interface {
// bulkSelectStateEventByID lookups a list of state events by event ID.
// If any of the requested events are missing from the database it returns a types.MissingEventError
BulkSelectStateEventByID(ctx context.Context, eventIDs []string) ([]types.StateEntry, error)
BulkSelectStateEventByNID(ctx context.Context, eventNIDs []types.EventNID, stateKeyTuples []types.StateKeyTuple) ([]types.StateEntry, error)
// BulkSelectStateAtEventByID lookups the state at a list of events by event ID.
// If any of the requested events are missing from the database it returns a types.MissingEventError.
// If we do not have the state for any of the requested events it returns a types.MissingEventError.
@ -81,14 +82,14 @@ type Transactions interface {
}
type StateSnapshot interface {
InsertState(ctx context.Context, txn *sql.Tx, roomNID types.RoomNID, stateBlockNIDs []types.StateBlockNID) (stateNID types.StateSnapshotNID, err error)
InsertState(ctx context.Context, txn *sql.Tx, roomNID types.RoomNID, stateBlockNIDs types.StateBlockNIDs) (stateNID types.StateSnapshotNID, err error)
BulkSelectStateBlockNIDs(ctx context.Context, stateNIDs []types.StateSnapshotNID) ([]types.StateBlockNIDList, error)
}
type StateBlock interface {
BulkInsertStateData(ctx context.Context, txn *sql.Tx, entries []types.StateEntry) (types.StateBlockNID, error)
BulkSelectStateBlockEntries(ctx context.Context, stateBlockNIDs []types.StateBlockNID) ([]types.StateEntryList, error)
BulkSelectFilteredStateBlockEntries(ctx context.Context, stateBlockNIDs []types.StateBlockNID, stateKeyTuples []types.StateKeyTuple) ([]types.StateEntryList, error)
BulkInsertStateData(ctx context.Context, txn *sql.Tx, entries types.StateEntries) (types.StateBlockNID, error)
BulkSelectStateBlockEntries(ctx context.Context, stateBlockNIDs types.StateBlockNIDs) ([][]types.EventNID, error)
//BulkSelectFilteredStateBlockEntries(ctx context.Context, stateBlockNIDs []types.StateBlockNID, stateKeyTuples []types.StateKeyTuple) ([]types.StateEntryList, error)
}
type RoomAliases interface {

View file

@ -16,9 +16,11 @@
package types
import (
"encoding/json"
"sort"
"github.com/matrix-org/gomatrixserverlib"
"golang.org/x/crypto/blake2b"
)
// EventTypeNID is a numeric ID for an event type.
@ -40,6 +42,38 @@ type StateSnapshotNID int64
// These blocks of state data are combined to form the actual state.
type StateBlockNID int64
// EventNIDs is used to sort and dedupe event NIDs.
type EventNIDs []EventNID
func (a EventNIDs) Len() int { return len(a) }
func (a EventNIDs) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a EventNIDs) Less(i, j int) bool { return a[i] < a[j] }
func (a EventNIDs) Hash() []byte {
j, err := json.Marshal(a)
if err != nil {
return nil
}
h := blake2b.Sum256(j)
return h[:]
}
// StateBlockNIDs is used to sort and dedupe state block NIDs.
type StateBlockNIDs []StateBlockNID
func (a StateBlockNIDs) Len() int { return len(a) }
func (a StateBlockNIDs) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a StateBlockNIDs) Less(i, j int) bool { return a[i] < a[j] }
func (a StateBlockNIDs) Hash() []byte {
j, err := json.Marshal(a)
if err != nil {
return nil
}
h := blake2b.Sum256(j)
return h[:]
}
// A StateKeyTuple is a pair of a numeric event type and a numeric state key.
// It is used to lookup state entries.
type StateKeyTuple struct {
@ -65,6 +99,12 @@ type StateEntry struct {
EventNID EventNID
}
type StateEntries []StateEntry
func (a StateEntries) Len() int { return len(a) }
func (a StateEntries) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a StateEntries) Less(i, j int) bool { return a[i].EventNID < a[j].EventNID }
// LessThan returns true if this state entry is less than the other state entry.
// The ordering is arbitrary and is used to implement binary search and to efficiently deduplicate entries.
func (a StateEntry) LessThan(b StateEntry) bool {

View file

@ -280,7 +280,7 @@ func (b *BaseDendrite) KeyServerHTTPClient() keyserverAPI.KeyInternalAPI {
// CreateAccountsDB creates a new instance of the accounts database. Should only
// be called once per component.
func (b *BaseDendrite) CreateAccountsDB() accounts.Database {
db, err := accounts.NewDatabase(&b.Cfg.UserAPI.AccountDatabase, b.Cfg.Global.ServerName, b.Cfg.UserAPI.BCryptCost)
db, err := accounts.NewDatabase(&b.Cfg.UserAPI.AccountDatabase, b.Cfg.Global.ServerName, b.Cfg.UserAPI.BCryptCost, b.Cfg.UserAPI.OpenIDTokenLifetimeMS)
if err != nil {
logrus.WithError(err).Panicf("failed to connect to accounts db")
}

View file

@ -10,6 +10,9 @@ type UserAPI struct {
// The cost when hashing passwords.
BCryptCost int `yaml:"bcrypt_cost"`
// The length of time an OpenID token is condidered valid in milliseconds
OpenIDTokenLifetimeMS int64 `yaml:"openid_token_lifetime_ms"`
// The Account database stores the login details and account information
// for local users. It is accessed by the UserAPI.
AccountDatabase DatabaseOptions `yaml:"account_database"`
@ -18,6 +21,8 @@ type UserAPI struct {
DeviceDatabase DatabaseOptions `yaml:"device_database"`
}
const DefaultOpenIDTokenLifetimeMS = 3600000 // 60 minutes
func (c *UserAPI) Defaults() {
c.InternalAPI.Listen = "http://localhost:7781"
c.InternalAPI.Connect = "http://localhost:7781"
@ -26,6 +31,7 @@ func (c *UserAPI) Defaults() {
c.AccountDatabase.ConnectionString = "file:userapi_accounts.db"
c.DeviceDatabase.ConnectionString = "file:userapi_devices.db"
c.BCryptCost = bcrypt.DefaultCost
c.OpenIDTokenLifetimeMS = DefaultOpenIDTokenLifetimeMS
}
func (c *UserAPI) Verify(configErrs *ConfigErrors, isMonolith bool) {
@ -33,4 +39,5 @@ func (c *UserAPI) Verify(configErrs *ConfigErrors, isMonolith bool) {
checkURL(configErrs, "user_api.internal_api.connect", string(c.InternalAPI.Connect))
checkNotEmpty(configErrs, "user_api.account_database.connection_string", string(c.AccountDatabase.ConnectionString))
checkNotEmpty(configErrs, "user_api.device_database.connection_string", string(c.DeviceDatabase.ConnectionString))
checkPositive(configErrs, "user_api.openid_token_lifetime_ms", c.OpenIDTokenLifetimeMS)
}

View file

@ -524,6 +524,9 @@ func (u *testUserAPI) PerformLastSeenUpdate(ctx context.Context, req *userapi.Pe
func (u *testUserAPI) PerformAccountDeactivation(ctx context.Context, req *userapi.PerformAccountDeactivationRequest, res *userapi.PerformAccountDeactivationResponse) error {
return nil
}
func (u *testUserAPI) PerformOpenIDTokenCreation(ctx context.Context, req *userapi.PerformOpenIDTokenCreationRequest, res *userapi.PerformOpenIDTokenCreationResponse) error {
return nil
}
func (u *testUserAPI) QueryProfile(ctx context.Context, req *userapi.QueryProfileRequest, res *userapi.QueryProfileResponse) error {
return nil
}
@ -548,6 +551,9 @@ func (u *testUserAPI) QueryDeviceInfos(ctx context.Context, req *userapi.QueryDe
func (u *testUserAPI) QuerySearchProfiles(ctx context.Context, req *userapi.QuerySearchProfilesRequest, res *userapi.QuerySearchProfilesResponse) error {
return nil
}
func (u *testUserAPI) QueryOpenIDToken(ctx context.Context, req *userapi.QueryOpenIDTokenRequest, res *userapi.QueryOpenIDTokenResponse) error {
return nil
}
type testRoomserverAPI struct {
// use a trace API as it implements method stubs so we don't need to have them here.

View file

@ -367,6 +367,9 @@ func (u *testUserAPI) PerformLastSeenUpdate(ctx context.Context, req *userapi.Pe
func (u *testUserAPI) PerformAccountDeactivation(ctx context.Context, req *userapi.PerformAccountDeactivationRequest, res *userapi.PerformAccountDeactivationResponse) error {
return nil
}
func (u *testUserAPI) PerformOpenIDTokenCreation(ctx context.Context, req *userapi.PerformOpenIDTokenCreationRequest, res *userapi.PerformOpenIDTokenCreationResponse) error {
return nil
}
func (u *testUserAPI) QueryProfile(ctx context.Context, req *userapi.QueryProfileRequest, res *userapi.QueryProfileResponse) error {
return nil
}
@ -391,6 +394,9 @@ func (u *testUserAPI) QueryDeviceInfos(ctx context.Context, req *userapi.QueryDe
func (u *testUserAPI) QuerySearchProfiles(ctx context.Context, req *userapi.QuerySearchProfilesRequest, res *userapi.QuerySearchProfilesResponse) error {
return nil
}
func (u *testUserAPI) QueryOpenIDToken(ctx context.Context, req *userapi.QueryOpenIDTokenRequest, res *userapi.QueryOpenIDTokenResponse) error {
return nil
}
type testRoomserverAPI struct {
// use a trace API as it implements method stubs so we don't need to have them here.

View file

@ -156,7 +156,7 @@ func (rp *RequestPool) OnIncomingSyncRequest(req *http.Request, device *userapi.
currentPos := rp.Notifier.CurrentPosition()
if !rp.shouldReturnImmediately(syncReq) {
if !rp.shouldReturnImmediately(syncReq, currentPos) {
timer := time.NewTimer(syncReq.Timeout) // case of timeout=0 is handled above
defer timer.Stop()
@ -303,8 +303,8 @@ func (rp *RequestPool) OnIncomingKeyChangeRequest(req *http.Request, device *use
// shouldReturnImmediately returns whether the /sync request is an initial sync,
// or timeout=0, or full_state=true, in any of the cases the request should
// return immediately.
func (rp *RequestPool) shouldReturnImmediately(syncReq *types.SyncRequest) bool {
if syncReq.Since.IsEmpty() || syncReq.Timeout == 0 || syncReq.WantFullState {
func (rp *RequestPool) shouldReturnImmediately(syncReq *types.SyncRequest, currentPos types.StreamingToken) bool {
if currentPos.IsAfter(syncReq.Since) || syncReq.Timeout == 0 || syncReq.WantFullState {
return true
}
return false

View file

@ -517,3 +517,6 @@ AS can set avatar for ghosted users
AS can set displayname for ghosted users
Ghost user must register before joining room
Inviting an AS-hosted user asks the AS server
Can generate a openid access_token that can be exchanged for information about a user
Invalid openid access tokens are rejected
Requests to userinfo without access tokens are rejected

View file

@ -32,12 +32,14 @@ type UserInternalAPI interface {
PerformLastSeenUpdate(ctx context.Context, req *PerformLastSeenUpdateRequest, res *PerformLastSeenUpdateResponse) error
PerformDeviceUpdate(ctx context.Context, req *PerformDeviceUpdateRequest, res *PerformDeviceUpdateResponse) error
PerformAccountDeactivation(ctx context.Context, req *PerformAccountDeactivationRequest, res *PerformAccountDeactivationResponse) error
PerformOpenIDTokenCreation(ctx context.Context, req *PerformOpenIDTokenCreationRequest, res *PerformOpenIDTokenCreationResponse) error
QueryProfile(ctx context.Context, req *QueryProfileRequest, res *QueryProfileResponse) error
QueryAccessToken(ctx context.Context, req *QueryAccessTokenRequest, res *QueryAccessTokenResponse) error
QueryDevices(ctx context.Context, req *QueryDevicesRequest, res *QueryDevicesResponse) error
QueryAccountData(ctx context.Context, req *QueryAccountDataRequest, res *QueryAccountDataResponse) error
QueryDeviceInfos(ctx context.Context, req *QueryDeviceInfosRequest, res *QueryDeviceInfosResponse) error
QuerySearchProfiles(ctx context.Context, req *QuerySearchProfilesRequest, res *QuerySearchProfilesResponse) error
QueryOpenIDToken(ctx context.Context, req *QueryOpenIDTokenRequest, res *QueryOpenIDTokenResponse) error
}
// InputAccountDataRequest is the request for InputAccountData
@ -226,6 +228,27 @@ type PerformAccountDeactivationResponse struct {
AccountDeactivated bool
}
// PerformOpenIDTokenCreationRequest is the request for PerformOpenIDTokenCreation
type PerformOpenIDTokenCreationRequest struct {
UserID string
}
// PerformOpenIDTokenCreationResponse is the response for PerformOpenIDTokenCreation
type PerformOpenIDTokenCreationResponse struct {
Token OpenIDToken
}
// QueryOpenIDTokenRequest is the request for QueryOpenIDToken
type QueryOpenIDTokenRequest struct {
Token string
}
// QueryOpenIDTokenResponse is the response for QueryOpenIDToken
type QueryOpenIDTokenResponse struct {
Sub string // The Matrix User ID that generated the token
ExpiresAtMS int64
}
// Device represents a client's device (mobile, web, etc)
type Device struct {
ID string
@ -256,6 +279,24 @@ type Account struct {
// TODO: Associations (e.g. with application services)
}
// OpenIDToken represents an OpenID token
type OpenIDToken struct {
Token string
UserID string
ExpiresAtMS int64
}
// OpenIDTokenInfo represents the attributes associated with an issued OpenID token
type OpenIDTokenAttributes struct {
UserID string
ExpiresAtMS int64
}
// UserInfo is for returning information about the user an OpenID token was issued for
type UserInfo struct {
Sub string // The Matrix user's ID who generated the token
}
// ErrorForbidden is an error indicating that the supplied access token is forbidden
type ErrorForbidden struct {
Message string

View file

@ -414,3 +414,31 @@ func (a *UserInternalAPI) PerformAccountDeactivation(ctx context.Context, req *a
res.AccountDeactivated = err == nil
return err
}
// PerformOpenIDTokenCreation creates a new token that a relying party uses to authenticate a user
func (a *UserInternalAPI) PerformOpenIDTokenCreation(ctx context.Context, req *api.PerformOpenIDTokenCreationRequest, res *api.PerformOpenIDTokenCreationResponse) error {
token := util.RandomString(24)
exp, err := a.AccountDB.CreateOpenIDToken(ctx, token, req.UserID)
res.Token = api.OpenIDToken{
Token: token,
UserID: req.UserID,
ExpiresAtMS: exp,
}
return err
}
// QueryOpenIDToken validates that the OpenID token was issued for the user, the replying party uses this for validation
func (a *UserInternalAPI) QueryOpenIDToken(ctx context.Context, req *api.QueryOpenIDTokenRequest, res *api.QueryOpenIDTokenResponse) error {
openIDTokenAttrs, err := a.AccountDB.GetOpenIDTokenAttributes(ctx, req.Token)
if err != nil {
return err
}
res.Sub = openIDTokenAttrs.UserID
res.ExpiresAtMS = openIDTokenAttrs.ExpiresAtMS
return nil
}

View file

@ -35,6 +35,7 @@ const (
PerformLastSeenUpdatePath = "/userapi/performLastSeenUpdate"
PerformDeviceUpdatePath = "/userapi/performDeviceUpdate"
PerformAccountDeactivationPath = "/userapi/performAccountDeactivation"
PerformOpenIDTokenCreationPath = "/userapi/performOpenIDTokenCreation"
QueryProfilePath = "/userapi/queryProfile"
QueryAccessTokenPath = "/userapi/queryAccessToken"
@ -42,6 +43,7 @@ const (
QueryAccountDataPath = "/userapi/queryAccountData"
QueryDeviceInfosPath = "/userapi/queryDeviceInfos"
QuerySearchProfilesPath = "/userapi/querySearchProfiles"
QueryOpenIDTokenPath = "/userapi/queryOpenIDToken"
)
// NewUserAPIClient creates a UserInternalAPI implemented by talking to a HTTP POST API.
@ -148,6 +150,14 @@ func (h *httpUserInternalAPI) PerformAccountDeactivation(ctx context.Context, re
return httputil.PostJSON(ctx, span, h.httpClient, apiURL, req, res)
}
func (h *httpUserInternalAPI) PerformOpenIDTokenCreation(ctx context.Context, request *api.PerformOpenIDTokenCreationRequest, response *api.PerformOpenIDTokenCreationResponse) error {
span, ctx := opentracing.StartSpanFromContext(ctx, "PerformOpenIDTokenCreation")
defer span.Finish()
apiURL := h.apiURL + PerformOpenIDTokenCreationPath
return httputil.PostJSON(ctx, span, h.httpClient, apiURL, request, response)
}
func (h *httpUserInternalAPI) QueryProfile(
ctx context.Context,
request *api.QueryProfileRequest,
@ -207,3 +217,11 @@ func (h *httpUserInternalAPI) QuerySearchProfiles(ctx context.Context, req *api.
apiURL := h.apiURL + QuerySearchProfilesPath
return httputil.PostJSON(ctx, span, h.httpClient, apiURL, req, res)
}
func (h *httpUserInternalAPI) QueryOpenIDToken(ctx context.Context, req *api.QueryOpenIDTokenRequest, res *api.QueryOpenIDTokenResponse) error {
span, ctx := opentracing.StartSpanFromContext(ctx, "QueryOpenIDToken")
defer span.Finish()
apiURL := h.apiURL + QueryOpenIDTokenPath
return httputil.PostJSON(ctx, span, h.httpClient, apiURL, req, res)
}

View file

@ -117,6 +117,19 @@ func AddRoutes(internalAPIMux *mux.Router, s api.UserInternalAPI) {
return util.JSONResponse{Code: http.StatusOK, JSON: &response}
}),
)
internalAPIMux.Handle(PerformOpenIDTokenCreationPath,
httputil.MakeInternalAPI("performOpenIDTokenCreation", func(req *http.Request) util.JSONResponse {
request := api.PerformOpenIDTokenCreationRequest{}
response := api.PerformOpenIDTokenCreationResponse{}
if err := json.NewDecoder(req.Body).Decode(&request); err != nil {
return util.MessageResponse(http.StatusBadRequest, err.Error())
}
if err := s.PerformOpenIDTokenCreation(req.Context(), &request, &response); err != nil {
return util.ErrorResponse(err)
}
return util.JSONResponse{Code: http.StatusOK, JSON: &response}
}),
)
internalAPIMux.Handle(QueryProfilePath,
httputil.MakeInternalAPI("queryProfile", func(req *http.Request) util.JSONResponse {
request := api.QueryProfileRequest{}
@ -195,6 +208,19 @@ func AddRoutes(internalAPIMux *mux.Router, s api.UserInternalAPI) {
return util.JSONResponse{Code: http.StatusOK, JSON: &response}
}),
)
internalAPIMux.Handle(QueryOpenIDTokenPath,
httputil.MakeInternalAPI("queryOpenIDToken", func(req *http.Request) util.JSONResponse {
request := api.QueryOpenIDTokenRequest{}
response := api.QueryOpenIDTokenResponse{}
if err := json.NewDecoder(req.Body).Decode(&request); err != nil {
return util.MessageResponse(http.StatusBadRequest, err.Error())
}
if err := s.QueryOpenIDToken(req.Context(), &request, &response); err != nil {
return util.ErrorResponse(err)
}
return util.JSONResponse{Code: http.StatusOK, JSON: &response}
}),
)
internalAPIMux.Handle(InputAccountDataPath,
httputil.MakeInternalAPI("inputAccountDataPath", func(req *http.Request) util.JSONResponse {
request := api.InputAccountDataRequest{}

View file

@ -52,6 +52,8 @@ type Database interface {
GetAccountByLocalpart(ctx context.Context, localpart string) (*api.Account, error)
SearchProfiles(ctx context.Context, searchString string, limit int) ([]authtypes.Profile, error)
DeactivateAccount(ctx context.Context, localpart string) (err error)
CreateOpenIDToken(ctx context.Context, token, localpart string) (exp int64, err error)
GetOpenIDTokenAttributes(ctx context.Context, token string) (*api.OpenIDTokenAttributes, error)
}
// Err3PIDInUse is the error returned when trying to save an association involving

View file

@ -0,0 +1,84 @@
package postgres
import (
"context"
"database/sql"
"github.com/matrix-org/dendrite/internal/sqlutil"
"github.com/matrix-org/dendrite/userapi/api"
"github.com/matrix-org/gomatrixserverlib"
log "github.com/sirupsen/logrus"
)
const openIDTokenSchema = `
-- Stores data about openid tokens issued for accounts.
CREATE TABLE IF NOT EXISTS open_id_tokens (
-- The value of the token issued to a user
token TEXT NOT NULL PRIMARY KEY,
-- The Matrix user ID for this account
localpart TEXT NOT NULL,
-- When the token expires, as a unix timestamp (ms resolution).
token_expires_at_ms BIGINT NOT NULL
);
`
const insertTokenSQL = "" +
"INSERT INTO open_id_tokens(token, localpart, token_expires_at_ms) VALUES ($1, $2, $3)"
const selectTokenSQL = "" +
"SELECT localpart, token_expires_at_ms FROM open_id_tokens WHERE token = $1"
type tokenStatements struct {
insertTokenStmt *sql.Stmt
selectTokenStmt *sql.Stmt
serverName gomatrixserverlib.ServerName
}
func (s *tokenStatements) prepare(db *sql.DB, server gomatrixserverlib.ServerName) (err error) {
_, err = db.Exec(openIDTokenSchema)
if err != nil {
return
}
if s.insertTokenStmt, err = db.Prepare(insertTokenSQL); err != nil {
return
}
if s.selectTokenStmt, err = db.Prepare(selectTokenSQL); err != nil {
return
}
s.serverName = server
return
}
// insertToken inserts a new OpenID Connect token to the DB.
// Returns new token, otherwise returns error if the token already exists.
func (s *tokenStatements) insertToken(
ctx context.Context,
txn *sql.Tx,
token, localpart string,
expiresAtMS int64,
) (err error) {
stmt := sqlutil.TxStmt(txn, s.insertTokenStmt)
_, err = stmt.ExecContext(ctx, token, localpart, expiresAtMS)
return
}
// selectOpenIDTokenAtrributes gets the attributes associated with an OpenID token from the DB
// Returns the existing token's attributes, or err if no token is found
func (s *tokenStatements) selectOpenIDTokenAtrributes(
ctx context.Context,
token string,
) (*api.OpenIDTokenAttributes, error) {
var openIDTokenAttrs api.OpenIDTokenAttributes
err := s.selectTokenStmt.QueryRowContext(ctx, token).Scan(
&openIDTokenAttrs.UserID,
&openIDTokenAttrs.ExpiresAtMS,
)
if err != nil {
if err != sql.ErrNoRows {
log.WithError(err).Error("Unable to retrieve token from the db")
}
return nil, err
}
return &openIDTokenAttrs, nil
}

View file

@ -20,6 +20,7 @@ import (
"encoding/json"
"errors"
"strconv"
"time"
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
"github.com/matrix-org/dendrite/internal/sqlutil"
@ -43,12 +44,14 @@ type Database struct {
profiles profilesStatements
accountDatas accountDataStatements
threepids threepidStatements
openIDTokens tokenStatements
serverName gomatrixserverlib.ServerName
bcryptCost int
openIDTokenLifetimeMS int64
}
// NewDatabase creates a new accounts and profiles database
func NewDatabase(dbProperties *config.DatabaseOptions, serverName gomatrixserverlib.ServerName, bcryptCost int) (*Database, error) {
func NewDatabase(dbProperties *config.DatabaseOptions, serverName gomatrixserverlib.ServerName, bcryptCost int, openIDTokenLifetimeMS int64) (*Database, error) {
db, err := sqlutil.Open(dbProperties)
if err != nil {
return nil, err
@ -58,6 +61,7 @@ func NewDatabase(dbProperties *config.DatabaseOptions, serverName gomatrixserver
db: db,
writer: sqlutil.NewDummyWriter(),
bcryptCost: bcryptCost,
openIDTokenLifetimeMS: openIDTokenLifetimeMS,
}
// Create tables before executing migrations so we don't fail if the table is missing,
@ -86,6 +90,9 @@ func NewDatabase(dbProperties *config.DatabaseOptions, serverName gomatrixserver
if err = d.threepids.prepare(db); err != nil {
return nil, err
}
if err = d.openIDTokens.prepare(db, serverName); err != nil {
return nil, err
}
return d, nil
}
@ -341,3 +348,23 @@ func (d *Database) SearchProfiles(ctx context.Context, searchString string, limi
func (d *Database) DeactivateAccount(ctx context.Context, localpart string) (err error) {
return d.accounts.deactivateAccount(ctx, localpart)
}
// CreateOpenIDToken persists a new token that was issued through OpenID Connect
func (d *Database) CreateOpenIDToken(
ctx context.Context,
token, localpart string,
) (int64, error) {
expiresAtMS := time.Now().UnixNano()/int64(time.Millisecond) + d.openIDTokenLifetimeMS
err := sqlutil.WithTransaction(d.db, func(txn *sql.Tx) error {
return d.openIDTokens.insertToken(ctx, txn, token, localpart, expiresAtMS)
})
return expiresAtMS, err
}
// GetOpenIDTokenAttributes gets the attributes of issued an OIDC auth token
func (d *Database) GetOpenIDTokenAttributes(
ctx context.Context,
token string,
) (*api.OpenIDTokenAttributes, error) {
return d.openIDTokens.selectOpenIDTokenAtrributes(ctx, token)
}

View file

@ -0,0 +1,86 @@
package sqlite3
import (
"context"
"database/sql"
"github.com/matrix-org/dendrite/internal/sqlutil"
"github.com/matrix-org/dendrite/userapi/api"
"github.com/matrix-org/gomatrixserverlib"
log "github.com/sirupsen/logrus"
)
const openIDTokenSchema = `
-- Stores data about accounts.
CREATE TABLE IF NOT EXISTS open_id_tokens (
-- The value of the token issued to a user
token TEXT NOT NULL PRIMARY KEY,
-- The Matrix user ID for this account
localpart TEXT NOT NULL,
-- When the token expires, as a unix timestamp (ms resolution).
token_expires_at_ms BIGINT NOT NULL
);
`
const insertTokenSQL = "" +
"INSERT INTO open_id_tokens(token, localpart, token_expires_at_ms) VALUES ($1, $2, $3)"
const selectTokenSQL = "" +
"SELECT localpart, token_expires_at_ms FROM open_id_tokens WHERE token = $1"
type tokenStatements struct {
db *sql.DB
insertTokenStmt *sql.Stmt
selectTokenStmt *sql.Stmt
serverName gomatrixserverlib.ServerName
}
func (s *tokenStatements) prepare(db *sql.DB, server gomatrixserverlib.ServerName) (err error) {
s.db = db
_, err = db.Exec(openIDTokenSchema)
if err != nil {
return err
}
if s.insertTokenStmt, err = db.Prepare(insertTokenSQL); err != nil {
return
}
if s.selectTokenStmt, err = db.Prepare(selectTokenSQL); err != nil {
return
}
s.serverName = server
return
}
// insertToken inserts a new OpenID Connect token to the DB.
// Returns new token, otherwise returns error if the token already exists.
func (s *tokenStatements) insertToken(
ctx context.Context,
txn *sql.Tx,
token, localpart string,
expiresAtMS int64,
) (err error) {
stmt := sqlutil.TxStmt(txn, s.insertTokenStmt)
_, err = stmt.ExecContext(ctx, token, localpart, expiresAtMS)
return
}
// selectOpenIDTokenAtrributes gets the attributes associated with an OpenID token from the DB
// Returns the existing token's attributes, or err if no token is found
func (s *tokenStatements) selectOpenIDTokenAtrributes(
ctx context.Context,
token string,
) (*api.OpenIDTokenAttributes, error) {
var openIDTokenAttrs api.OpenIDTokenAttributes
err := s.selectTokenStmt.QueryRowContext(ctx, token).Scan(
&openIDTokenAttrs.UserID,
&openIDTokenAttrs.ExpiresAtMS,
)
if err != nil {
if err != sql.ErrNoRows {
log.WithError(err).Error("Unable to retrieve token from the db")
}
return nil, err
}
return &openIDTokenAttrs, nil
}

View file

@ -21,6 +21,7 @@ import (
"errors"
"strconv"
"sync"
"time"
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
"github.com/matrix-org/dendrite/internal/sqlutil"
@ -41,8 +42,10 @@ type Database struct {
profiles profilesStatements
accountDatas accountDataStatements
threepids threepidStatements
openIDTokens tokenStatements
serverName gomatrixserverlib.ServerName
bcryptCost int
openIDTokenLifetimeMS int64
accountsMu sync.Mutex
profilesMu sync.Mutex
@ -51,7 +54,7 @@ type Database struct {
}
// NewDatabase creates a new accounts and profiles database
func NewDatabase(dbProperties *config.DatabaseOptions, serverName gomatrixserverlib.ServerName, bcryptCost int) (*Database, error) {
func NewDatabase(dbProperties *config.DatabaseOptions, serverName gomatrixserverlib.ServerName, bcryptCost int, openIDTokenLifetimeMS int64) (*Database, error) {
db, err := sqlutil.Open(dbProperties)
if err != nil {
return nil, err
@ -61,6 +64,7 @@ func NewDatabase(dbProperties *config.DatabaseOptions, serverName gomatrixserver
db: db,
writer: sqlutil.NewExclusiveWriter(),
bcryptCost: bcryptCost,
openIDTokenLifetimeMS: openIDTokenLifetimeMS,
}
// Create tables before executing migrations so we don't fail if the table is missing,
@ -90,6 +94,9 @@ func NewDatabase(dbProperties *config.DatabaseOptions, serverName gomatrixserver
if err = d.threepids.prepare(db); err != nil {
return nil, err
}
if err = d.openIDTokens.prepare(db, serverName); err != nil {
return nil, err
}
return d, nil
}
@ -379,3 +386,23 @@ func (d *Database) SearchProfiles(ctx context.Context, searchString string, limi
func (d *Database) DeactivateAccount(ctx context.Context, localpart string) (err error) {
return d.accounts.deactivateAccount(ctx, localpart)
}
// CreateOpenIDToken persists a new token that was issued for OpenID Connect
func (d *Database) CreateOpenIDToken(
ctx context.Context,
token, localpart string,
) (int64, error) {
expiresAtMS := time.Now().UnixNano()/int64(time.Millisecond) + d.openIDTokenLifetimeMS
err := d.writer.Do(d.db, nil, func(txn *sql.Tx) error {
return d.openIDTokens.insertToken(ctx, txn, token, localpart, expiresAtMS)
})
return expiresAtMS, err
}
// GetOpenIDTokenAttributes gets the attributes of issued an OIDC auth token
func (d *Database) GetOpenIDTokenAttributes(
ctx context.Context,
token string,
) (*api.OpenIDTokenAttributes, error) {
return d.openIDTokens.selectOpenIDTokenAtrributes(ctx, token)
}

View file

@ -27,12 +27,12 @@ import (
// NewDatabase opens a new Postgres or Sqlite database (based on dataSourceName scheme)
// and sets postgres connection parameters
func NewDatabase(dbProperties *config.DatabaseOptions, serverName gomatrixserverlib.ServerName, bcryptCost int) (Database, error) {
func NewDatabase(dbProperties *config.DatabaseOptions, serverName gomatrixserverlib.ServerName, bcryptCost int, openIDTokenLifetimeMS int64) (Database, error) {
switch {
case dbProperties.ConnectionString.IsSQLite():
return sqlite3.NewDatabase(dbProperties, serverName, bcryptCost)
return sqlite3.NewDatabase(dbProperties, serverName, bcryptCost, openIDTokenLifetimeMS)
case dbProperties.ConnectionString.IsPostgres():
return postgres.NewDatabase(dbProperties, serverName, bcryptCost)
return postgres.NewDatabase(dbProperties, serverName, bcryptCost, openIDTokenLifetimeMS)
default:
return nil, fmt.Errorf("unexpected database type")
}

View file

@ -26,10 +26,11 @@ func NewDatabase(
dbProperties *config.DatabaseOptions,
serverName gomatrixserverlib.ServerName,
bcryptCost int,
openIDTokenLifetimeMS int64,
) (Database, error) {
switch {
case dbProperties.ConnectionString.IsSQLite():
return sqlite3.NewDatabase(dbProperties, serverName, bcryptCost)
return sqlite3.NewDatabase(dbProperties, serverName, bcryptCost, openIDTokenLifetimeMS)
case dbProperties.ConnectionString.IsPostgres():
return nil, fmt.Errorf("can't use Postgres implementation")
default:

View file

@ -26,7 +26,7 @@ const (
func MustMakeInternalAPI(t *testing.T) (api.UserInternalAPI, accounts.Database) {
accountDB, err := accounts.NewDatabase(&config.DatabaseOptions{
ConnectionString: "file::memory:",
}, serverName, bcrypt.MinCost)
}, serverName, bcrypt.MinCost, config.DefaultOpenIDTokenLifetimeMS)
if err != nil {
t.Fatalf("failed to create account DB: %s", err)
}