mirror of
https://github.com/matrix-org/dendrite.git
synced 2025-12-26 08:13:09 -06:00
Merge branch 'master' into feature/room_messages_filtering
This commit is contained in:
commit
bde1b634e3
1
.gitignore
vendored
1
.gitignore
vendored
|
|
@ -19,6 +19,7 @@
|
|||
/_test
|
||||
/vendor/bin
|
||||
/docker/build
|
||||
/logs
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
# Dendrite [](https://buildkite.com/matrix-dot-org/dendrite) [](https://matrix.to/#/#dendrite:matrix.org) [](https://matrix.to/#/#dendrite-dev:matrix.org)
|
||||
|
||||
Dendrite is a second-generation Matrix homeserver written in Go.
|
||||
It intends to provide an **efficient**, **reliable** and **scalable** alternative to Synapse:
|
||||
It intends to provide an **efficient**, **reliable** and **scalable** alternative to [Synapse](https://github.com/matrix-org/synapse):
|
||||
- Efficient: A small memory footprint with better baseline performance than an out-of-the-box Synapse.
|
||||
- Reliable: Implements the Matrix specification as written, using the
|
||||
[same test suite](https://github.com/matrix-org/sytest) as Synapse as well as
|
||||
|
|
|
|||
|
|
@ -32,14 +32,18 @@ INSERT OR IGNORE INTO appservice_counters (name, last_id) VALUES('txn_id', 1);
|
|||
`
|
||||
|
||||
const selectTxnIDSQL = `
|
||||
SELECT last_id FROM appservice_counters WHERE name='txn_id';
|
||||
UPDATE appservice_counters SET last_id=last_id+1 WHERE name='txn_id';
|
||||
SELECT last_id FROM appservice_counters WHERE name='txn_id'
|
||||
`
|
||||
|
||||
const updateTxnIDSQL = `
|
||||
UPDATE appservice_counters SET last_id=last_id+1 WHERE name='txn_id'
|
||||
`
|
||||
|
||||
type txnStatements struct {
|
||||
db *sql.DB
|
||||
writer sqlutil.Writer
|
||||
selectTxnIDStmt *sql.Stmt
|
||||
updateTxnIDStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func (s *txnStatements) prepare(db *sql.DB, writer sqlutil.Writer) (err error) {
|
||||
|
|
@ -54,6 +58,10 @@ func (s *txnStatements) prepare(db *sql.DB, writer sqlutil.Writer) (err error) {
|
|||
return
|
||||
}
|
||||
|
||||
if s.updateTxnIDStmt, err = db.Prepare(updateTxnIDSQL); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -63,6 +71,11 @@ func (s *txnStatements) selectTxnID(
|
|||
) (txnID int, err error) {
|
||||
err = s.writer.Do(s.db, nil, func(txn *sql.Tx) error {
|
||||
err := s.selectTxnIDStmt.QueryRowContext(ctx).Scan(&txnID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = s.updateTxnIDStmt.ExecContext(ctx)
|
||||
return err
|
||||
})
|
||||
return
|
||||
|
|
|
|||
13
build/gobind-pinecone/build.sh
Normal file
13
build/gobind-pinecone/build.sh
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
#!/bin/sh
|
||||
|
||||
TARGET=""
|
||||
|
||||
while getopts "ai" option
|
||||
do
|
||||
case "$option"
|
||||
in
|
||||
a) gomobile bind -v -target android -trimpath -ldflags="-s -w" github.com/matrix-org/dendrite/build/gobind-pinecone ;;
|
||||
i) gomobile bind -v -target ios -trimpath -ldflags="" github.com/matrix-org/dendrite/build/gobind-pinecone ;;
|
||||
*) echo "No target specified, specify -a or -i"; exit 1 ;;
|
||||
esac
|
||||
done
|
||||
421
build/gobind-pinecone/monolith.go
Normal file
421
build/gobind-pinecone/monolith.go
Normal file
|
|
@ -0,0 +1,421 @@
|
|||
package gobind
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ed25519"
|
||||
"crypto/rand"
|
||||
"crypto/tls"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/matrix-org/dendrite/appservice"
|
||||
"github.com/matrix-org/dendrite/clientapi/userutil"
|
||||
"github.com/matrix-org/dendrite/cmd/dendrite-demo-pinecone/conn"
|
||||
"github.com/matrix-org/dendrite/cmd/dendrite-demo-pinecone/rooms"
|
||||
"github.com/matrix-org/dendrite/cmd/dendrite-demo-yggdrasil/signing"
|
||||
"github.com/matrix-org/dendrite/eduserver"
|
||||
"github.com/matrix-org/dendrite/eduserver/cache"
|
||||
"github.com/matrix-org/dendrite/federationsender"
|
||||
"github.com/matrix-org/dendrite/federationsender/api"
|
||||
"github.com/matrix-org/dendrite/internal/httputil"
|
||||
"github.com/matrix-org/dendrite/keyserver"
|
||||
"github.com/matrix-org/dendrite/roomserver"
|
||||
"github.com/matrix-org/dendrite/setup"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/dendrite/userapi"
|
||||
userapiAPI "github.com/matrix-org/dendrite/userapi/api"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.uber.org/atomic"
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/h2c"
|
||||
|
||||
pineconeMulticast "github.com/matrix-org/pinecone/multicast"
|
||||
pineconeRouter "github.com/matrix-org/pinecone/router"
|
||||
pineconeSessions "github.com/matrix-org/pinecone/sessions"
|
||||
"github.com/matrix-org/pinecone/types"
|
||||
pineconeTypes "github.com/matrix-org/pinecone/types"
|
||||
|
||||
_ "golang.org/x/mobile/bind"
|
||||
)
|
||||
|
||||
const (
|
||||
PeerTypeRemote = pineconeRouter.PeerTypeRemote
|
||||
PeerTypeMulticast = pineconeRouter.PeerTypeMulticast
|
||||
PeerTypeBluetooth = pineconeRouter.PeerTypeBluetooth
|
||||
)
|
||||
|
||||
type DendriteMonolith struct {
|
||||
logger logrus.Logger
|
||||
PineconeRouter *pineconeRouter.Router
|
||||
PineconeMulticast *pineconeMulticast.Multicast
|
||||
PineconeQUIC *pineconeSessions.Sessions
|
||||
StorageDirectory string
|
||||
CacheDirectory string
|
||||
staticPeerURI string
|
||||
staticPeerMutex sync.RWMutex
|
||||
staticPeerAttempts atomic.Uint32
|
||||
listener net.Listener
|
||||
httpServer *http.Server
|
||||
processContext *process.ProcessContext
|
||||
userAPI userapiAPI.UserInternalAPI
|
||||
}
|
||||
|
||||
func (m *DendriteMonolith) BaseURL() string {
|
||||
return fmt.Sprintf("http://%s", m.listener.Addr().String())
|
||||
}
|
||||
|
||||
func (m *DendriteMonolith) PeerCount(peertype int) int {
|
||||
return m.PineconeRouter.PeerCount(peertype)
|
||||
}
|
||||
|
||||
func (m *DendriteMonolith) SessionCount() int {
|
||||
return len(m.PineconeQUIC.Sessions())
|
||||
}
|
||||
|
||||
func (m *DendriteMonolith) SetMulticastEnabled(enabled bool) {
|
||||
if enabled {
|
||||
m.PineconeMulticast.Start()
|
||||
} else {
|
||||
m.PineconeMulticast.Stop()
|
||||
m.DisconnectType(pineconeRouter.PeerTypeMulticast)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *DendriteMonolith) SetStaticPeer(uri string) {
|
||||
m.staticPeerMutex.Lock()
|
||||
m.staticPeerURI = uri
|
||||
m.staticPeerMutex.Unlock()
|
||||
m.DisconnectType(pineconeRouter.PeerTypeRemote)
|
||||
if uri != "" {
|
||||
go m.staticPeerConnect()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *DendriteMonolith) DisconnectType(peertype int) {
|
||||
for _, p := range m.PineconeRouter.Peers() {
|
||||
if peertype == p.PeerType {
|
||||
_ = m.PineconeRouter.Disconnect(types.SwitchPortID(p.Port), nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *DendriteMonolith) DisconnectZone(zone string) {
|
||||
for _, p := range m.PineconeRouter.Peers() {
|
||||
if zone == p.Zone {
|
||||
_ = m.PineconeRouter.Disconnect(types.SwitchPortID(p.Port), nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *DendriteMonolith) DisconnectPort(port int) error {
|
||||
return m.PineconeRouter.Disconnect(types.SwitchPortID(port), nil)
|
||||
}
|
||||
|
||||
func (m *DendriteMonolith) Conduit(zone string, peertype int) (*Conduit, error) {
|
||||
l, r := net.Pipe()
|
||||
conduit := &Conduit{conn: r, port: 0}
|
||||
go func() {
|
||||
conduit.portMutex.Lock()
|
||||
defer conduit.portMutex.Unlock()
|
||||
loop:
|
||||
for i := 1; i <= 10; i++ {
|
||||
logrus.Errorf("Attempting authenticated connect (attempt %d)", i)
|
||||
var err error
|
||||
conduit.port, err = m.PineconeRouter.AuthenticatedConnect(l, zone, peertype)
|
||||
switch err {
|
||||
case io.ErrClosedPipe:
|
||||
logrus.Errorf("Authenticated connect failed due to closed pipe (attempt %d)", i)
|
||||
return
|
||||
case io.EOF:
|
||||
logrus.Errorf("Authenticated connect failed due to EOF (attempt %d)", i)
|
||||
break loop
|
||||
case nil:
|
||||
logrus.Errorf("Authenticated connect succeeded, connected to port %d (attempt %d)", conduit.port, i)
|
||||
return
|
||||
default:
|
||||
logrus.WithError(err).Errorf("Authenticated connect failed (attempt %d)", i)
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
_ = l.Close()
|
||||
_ = r.Close()
|
||||
}()
|
||||
return conduit, nil
|
||||
}
|
||||
|
||||
func (m *DendriteMonolith) RegisterUser(localpart, password string) (string, error) {
|
||||
pubkey := m.PineconeRouter.PublicKey()
|
||||
userID := userutil.MakeUserID(
|
||||
localpart,
|
||||
gomatrixserverlib.ServerName(hex.EncodeToString(pubkey[:])),
|
||||
)
|
||||
userReq := &userapiAPI.PerformAccountCreationRequest{
|
||||
AccountType: userapiAPI.AccountTypeUser,
|
||||
Localpart: localpart,
|
||||
Password: password,
|
||||
}
|
||||
userRes := &userapiAPI.PerformAccountCreationResponse{}
|
||||
if err := m.userAPI.PerformAccountCreation(context.Background(), userReq, userRes); err != nil {
|
||||
return userID, fmt.Errorf("userAPI.PerformAccountCreation: %w", err)
|
||||
}
|
||||
return userID, nil
|
||||
}
|
||||
|
||||
func (m *DendriteMonolith) RegisterDevice(localpart, deviceID string) (string, error) {
|
||||
accessTokenBytes := make([]byte, 16)
|
||||
n, err := rand.Read(accessTokenBytes)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("rand.Read: %w", err)
|
||||
}
|
||||
loginReq := &userapiAPI.PerformDeviceCreationRequest{
|
||||
Localpart: localpart,
|
||||
DeviceID: &deviceID,
|
||||
AccessToken: hex.EncodeToString(accessTokenBytes[:n]),
|
||||
}
|
||||
loginRes := &userapiAPI.PerformDeviceCreationResponse{}
|
||||
if err := m.userAPI.PerformDeviceCreation(context.Background(), loginReq, loginRes); err != nil {
|
||||
return "", fmt.Errorf("userAPI.PerformDeviceCreation: %w", err)
|
||||
}
|
||||
if !loginRes.DeviceCreated {
|
||||
return "", fmt.Errorf("device was not created")
|
||||
}
|
||||
return loginRes.Device.AccessToken, nil
|
||||
}
|
||||
|
||||
func (m *DendriteMonolith) staticPeerConnect() {
|
||||
m.staticPeerMutex.RLock()
|
||||
uri := m.staticPeerURI
|
||||
m.staticPeerMutex.RUnlock()
|
||||
if uri == "" {
|
||||
return
|
||||
}
|
||||
if err := conn.ConnectToPeer(m.PineconeRouter, uri); err != nil {
|
||||
exp := time.Second * time.Duration(math.Exp2(float64(m.staticPeerAttempts.Inc())))
|
||||
time.AfterFunc(exp, m.staticPeerConnect)
|
||||
} else {
|
||||
m.staticPeerAttempts.Store(0)
|
||||
}
|
||||
}
|
||||
|
||||
// nolint:gocyclo
|
||||
func (m *DendriteMonolith) Start() {
|
||||
var err error
|
||||
var sk ed25519.PrivateKey
|
||||
var pk ed25519.PublicKey
|
||||
keyfile := fmt.Sprintf("%s/p2p.key", m.StorageDirectory)
|
||||
if _, err = os.Stat(keyfile); os.IsNotExist(err) {
|
||||
if pk, sk, err = ed25519.GenerateKey(nil); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err = ioutil.WriteFile(keyfile, sk, 0644); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
} else if err == nil {
|
||||
if sk, err = ioutil.ReadFile(keyfile); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if len(sk) != ed25519.PrivateKeySize {
|
||||
panic("the private key is not long enough")
|
||||
}
|
||||
pk = sk.Public().(ed25519.PublicKey)
|
||||
}
|
||||
|
||||
m.listener, err = net.Listen("tcp", "localhost:65432")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
m.logger = logrus.Logger{
|
||||
Out: BindLogger{},
|
||||
}
|
||||
m.logger.SetOutput(BindLogger{})
|
||||
logrus.SetOutput(BindLogger{})
|
||||
|
||||
logger := log.New(os.Stdout, "PINECONE: ", 0)
|
||||
m.PineconeRouter = pineconeRouter.NewRouter(logger, "dendrite", sk, pk, nil)
|
||||
m.PineconeQUIC = pineconeSessions.NewSessions(logger, m.PineconeRouter)
|
||||
m.PineconeMulticast = pineconeMulticast.NewMulticast(logger, m.PineconeRouter)
|
||||
|
||||
m.PineconeRouter.SetDisconnectedCallback(func(port pineconeTypes.SwitchPortID, public pineconeTypes.PublicKey, peertype int, err error) {
|
||||
if peertype == pineconeRouter.PeerTypeRemote {
|
||||
m.staticPeerAttempts.Store(0)
|
||||
time.AfterFunc(time.Second, m.staticPeerConnect)
|
||||
}
|
||||
})
|
||||
|
||||
prefix := hex.EncodeToString(pk)
|
||||
cfg := &config.Dendrite{}
|
||||
cfg.Defaults()
|
||||
cfg.Global.ServerName = gomatrixserverlib.ServerName(hex.EncodeToString(pk))
|
||||
cfg.Global.PrivateKey = sk
|
||||
cfg.Global.KeyID = gomatrixserverlib.KeyID(signing.KeyID)
|
||||
cfg.Global.Kafka.UseNaffka = true
|
||||
cfg.Global.Kafka.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-naffka.db", m.StorageDirectory, prefix))
|
||||
cfg.UserAPI.AccountDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-account.db", m.StorageDirectory, prefix))
|
||||
cfg.UserAPI.DeviceDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-device.db", m.StorageDirectory, prefix))
|
||||
cfg.MediaAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-mediaapi.db", m.CacheDirectory, prefix))
|
||||
cfg.SyncAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-syncapi.db", m.StorageDirectory, prefix))
|
||||
cfg.RoomServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-roomserver.db", m.StorageDirectory, prefix))
|
||||
cfg.SigningKeyServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-signingkeyserver.db", m.StorageDirectory, prefix))
|
||||
cfg.KeyServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-keyserver.db", m.StorageDirectory, prefix))
|
||||
cfg.FederationSender.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-federationsender.db", m.StorageDirectory, prefix))
|
||||
cfg.AppServiceAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-appservice.db", m.StorageDirectory, prefix))
|
||||
cfg.MediaAPI.BasePath = config.Path(fmt.Sprintf("%s/media", m.CacheDirectory))
|
||||
cfg.MediaAPI.AbsBasePath = config.Path(fmt.Sprintf("%s/media", m.CacheDirectory))
|
||||
cfg.MSCs.MSCs = []string{"msc2836", "msc2946"}
|
||||
if err := cfg.Derive(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
base := setup.NewBaseDendrite(cfg, "Monolith", false)
|
||||
defer base.Close() // nolint: errcheck
|
||||
|
||||
accountDB := base.CreateAccountsDB()
|
||||
federation := conn.CreateFederationClient(base, m.PineconeQUIC)
|
||||
|
||||
serverKeyAPI := &signing.YggdrasilKeys{}
|
||||
keyRing := serverKeyAPI.KeyRing()
|
||||
|
||||
rsAPI := roomserver.NewInternalAPI(
|
||||
base, keyRing,
|
||||
)
|
||||
|
||||
fsAPI := federationsender.NewInternalAPI(
|
||||
base, federation, rsAPI, keyRing, true,
|
||||
)
|
||||
|
||||
keyAPI := keyserver.NewInternalAPI(&base.Cfg.KeyServer, fsAPI)
|
||||
m.userAPI = userapi.NewInternalAPI(accountDB, &cfg.UserAPI, cfg.Derived.ApplicationServices, keyAPI)
|
||||
keyAPI.SetUserAPI(m.userAPI)
|
||||
|
||||
eduInputAPI := eduserver.NewInternalAPI(
|
||||
base, cache.New(), m.userAPI,
|
||||
)
|
||||
|
||||
asAPI := appservice.NewInternalAPI(base, m.userAPI, rsAPI)
|
||||
|
||||
// The underlying roomserver implementation needs to be able to call the fedsender.
|
||||
// This is different to rsAPI which can be the http client which doesn't need this dependency
|
||||
rsAPI.SetFederationSenderAPI(fsAPI)
|
||||
|
||||
monolith := setup.Monolith{
|
||||
Config: base.Cfg,
|
||||
AccountDB: accountDB,
|
||||
Client: conn.CreateClient(base, m.PineconeQUIC),
|
||||
FedClient: federation,
|
||||
KeyRing: keyRing,
|
||||
|
||||
AppserviceAPI: asAPI,
|
||||
EDUInternalAPI: eduInputAPI,
|
||||
FederationSenderAPI: fsAPI,
|
||||
RoomserverAPI: rsAPI,
|
||||
UserAPI: m.userAPI,
|
||||
KeyAPI: keyAPI,
|
||||
ExtPublicRoomsProvider: rooms.NewPineconeRoomProvider(m.PineconeRouter, m.PineconeQUIC, fsAPI, federation),
|
||||
}
|
||||
monolith.AddAllPublicRoutes(
|
||||
base.ProcessContext,
|
||||
base.PublicClientAPIMux,
|
||||
base.PublicFederationAPIMux,
|
||||
base.PublicKeyAPIMux,
|
||||
base.PublicMediaAPIMux,
|
||||
)
|
||||
|
||||
httpRouter := mux.NewRouter().SkipClean(true).UseEncodedPath()
|
||||
httpRouter.PathPrefix(httputil.InternalPathPrefix).Handler(base.InternalAPIMux)
|
||||
httpRouter.PathPrefix(httputil.PublicClientPathPrefix).Handler(base.PublicClientAPIMux)
|
||||
httpRouter.PathPrefix(httputil.PublicMediaPathPrefix).Handler(base.PublicMediaAPIMux)
|
||||
|
||||
pMux := mux.NewRouter().SkipClean(true).UseEncodedPath()
|
||||
pMux.PathPrefix(httputil.PublicFederationPathPrefix).Handler(base.PublicFederationAPIMux)
|
||||
pMux.PathPrefix(httputil.PublicMediaPathPrefix).Handler(base.PublicMediaAPIMux)
|
||||
|
||||
pHTTP := m.PineconeQUIC.HTTP()
|
||||
pHTTP.Mux().Handle(httputil.PublicFederationPathPrefix, pMux)
|
||||
pHTTP.Mux().Handle(httputil.PublicMediaPathPrefix, pMux)
|
||||
|
||||
// Build both ends of a HTTP multiplex.
|
||||
h2s := &http2.Server{}
|
||||
m.httpServer = &http.Server{
|
||||
Addr: ":0",
|
||||
TLSNextProto: map[string]func(*http.Server, *tls.Conn, http.Handler){},
|
||||
ReadTimeout: 10 * time.Second,
|
||||
WriteTimeout: 10 * time.Second,
|
||||
IdleTimeout: 30 * time.Second,
|
||||
BaseContext: func(_ net.Listener) context.Context {
|
||||
return context.Background()
|
||||
},
|
||||
Handler: h2c.NewHandler(pMux, h2s),
|
||||
}
|
||||
m.processContext = base.ProcessContext
|
||||
|
||||
go func() {
|
||||
m.logger.Info("Listening on ", cfg.Global.ServerName)
|
||||
m.logger.Fatal(m.httpServer.Serve(m.PineconeQUIC))
|
||||
}()
|
||||
go func() {
|
||||
logrus.Info("Listening on ", m.listener.Addr())
|
||||
logrus.Fatal(http.Serve(m.listener, httpRouter))
|
||||
}()
|
||||
go func() {
|
||||
logrus.Info("Sending wake-up message to known nodes")
|
||||
req := &api.PerformBroadcastEDURequest{}
|
||||
res := &api.PerformBroadcastEDUResponse{}
|
||||
if err := fsAPI.PerformBroadcastEDU(context.TODO(), req, res); err != nil {
|
||||
logrus.WithError(err).Error("Failed to send wake-up message to known nodes")
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (m *DendriteMonolith) Stop() {
|
||||
_ = m.listener.Close()
|
||||
m.PineconeMulticast.Stop()
|
||||
_ = m.PineconeQUIC.Close()
|
||||
m.processContext.ShutdownDendrite()
|
||||
_ = m.PineconeRouter.Close()
|
||||
}
|
||||
|
||||
type Conduit struct {
|
||||
conn net.Conn
|
||||
port types.SwitchPortID
|
||||
portMutex sync.Mutex
|
||||
}
|
||||
|
||||
func (c *Conduit) Port() int {
|
||||
c.portMutex.Lock()
|
||||
defer c.portMutex.Unlock()
|
||||
return int(c.port)
|
||||
}
|
||||
|
||||
func (c *Conduit) Read(b []byte) (int, error) {
|
||||
return c.conn.Read(b)
|
||||
}
|
||||
|
||||
func (c *Conduit) ReadCopy() ([]byte, error) {
|
||||
var buf [65535 * 2]byte
|
||||
n, err := c.conn.Read(buf[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf[:n], nil
|
||||
}
|
||||
|
||||
func (c *Conduit) Write(b []byte) (int, error) {
|
||||
return c.conn.Write(b)
|
||||
}
|
||||
|
||||
func (c *Conduit) Close() error {
|
||||
return c.conn.Close()
|
||||
}
|
||||
25
build/gobind-yggdrasil/build.sh
Normal file
25
build/gobind-yggdrasil/build.sh
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
#!/bin/sh
|
||||
|
||||
#!/bin/sh
|
||||
|
||||
TARGET=""
|
||||
|
||||
while getopts "ai" option
|
||||
do
|
||||
case "$option"
|
||||
in
|
||||
a) TARGET="android";;
|
||||
i) TARGET="ios";;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ $TARGET = "" ]];
|
||||
then
|
||||
echo "No target specified, specify -a or -i"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
gomobile bind -v \
|
||||
-target $TARGET \
|
||||
-ldflags "-X github.com/yggdrasil-network/yggdrasil-go/src/version.buildName=dendrite" \
|
||||
github.com/matrix-org/dendrite/build/gobind-pinecone
|
||||
|
|
@ -25,6 +25,8 @@ import (
|
|||
"github.com/matrix-org/dendrite/userapi"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
_ "golang.org/x/mobile/bind"
|
||||
)
|
||||
|
||||
type DendriteMonolith struct {
|
||||
|
|
@ -118,7 +120,7 @@ func (m *DendriteMonolith) Start() {
|
|||
)
|
||||
|
||||
fsAPI := federationsender.NewInternalAPI(
|
||||
base, federation, rsAPI, keyRing,
|
||||
base, federation, rsAPI, keyRing, true,
|
||||
)
|
||||
|
||||
keyAPI := keyserver.NewInternalAPI(&base.Cfg.KeyServer, federation)
|
||||
25
build/gobind-yggdrasil/platform_ios.go
Normal file
25
build/gobind-yggdrasil/platform_ios.go
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
// +build ios
|
||||
|
||||
package gobind
|
||||
|
||||
/*
|
||||
#cgo CFLAGS: -x objective-c
|
||||
#cgo LDFLAGS: -framework Foundation
|
||||
#import <Foundation/Foundation.h>
|
||||
void Log(const char *text) {
|
||||
NSString *nss = [NSString stringWithUTF8String:text];
|
||||
NSLog(@"%@", nss);
|
||||
}
|
||||
*/
|
||||
import "C"
|
||||
import "unsafe"
|
||||
|
||||
type BindLogger struct {
|
||||
}
|
||||
|
||||
func (nsl BindLogger) Write(p []byte) (n int, err error) {
|
||||
p = append(p, 0)
|
||||
cstr := (*C.char)(unsafe.Pointer(&p[0]))
|
||||
C.Log(cstr)
|
||||
return len(p), nil
|
||||
}
|
||||
12
build/gobind-yggdrasil/platform_other.go
Normal file
12
build/gobind-yggdrasil/platform_other.go
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
// +build !ios
|
||||
|
||||
package gobind
|
||||
|
||||
import "log"
|
||||
|
||||
type BindLogger struct{}
|
||||
|
||||
func (nsl BindLogger) Write(p []byte) (n int, err error) {
|
||||
log.Println(string(p))
|
||||
return len(p), nil
|
||||
}
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
gomobile bind -v \
|
||||
-ldflags "-X github.com/yggdrasil-network/yggdrasil-go/src/version.buildName=dendrite" \
|
||||
-target ios \
|
||||
github.com/matrix-org/dendrite/build/gobind
|
||||
70
clientapi/routing/openid.go
Normal file
70
clientapi/routing/openid.go
Normal file
|
|
@ -0,0 +1,70 @@
|
|||
// Copyright 2021 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package routing
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/userapi/api"
|
||||
"github.com/matrix-org/util"
|
||||
)
|
||||
|
||||
type openIDTokenResponse struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
MatrixServerName string `json:"matrix_server_name"`
|
||||
ExpiresIn int64 `json:"expires_in"`
|
||||
}
|
||||
|
||||
// CreateOpenIDToken creates a new OpenID Connect (OIDC) token that a Matrix user
|
||||
// can supply to an OpenID Relying Party to verify their identity
|
||||
func CreateOpenIDToken(
|
||||
req *http.Request,
|
||||
userAPI api.UserInternalAPI,
|
||||
device *api.Device,
|
||||
userID string,
|
||||
cfg *config.ClientAPI,
|
||||
) util.JSONResponse {
|
||||
// does the incoming user ID match the user that the token was issued for?
|
||||
if userID != device.UserID {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusForbidden,
|
||||
JSON: jsonerror.Forbidden("Cannot request tokens for other users"),
|
||||
}
|
||||
}
|
||||
|
||||
request := api.PerformOpenIDTokenCreationRequest{
|
||||
UserID: userID, // this is the user ID from the incoming path
|
||||
}
|
||||
response := api.PerformOpenIDTokenCreationResponse{}
|
||||
|
||||
err := userAPI.PerformOpenIDTokenCreation(req.Context(), &request, &response)
|
||||
if err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("userAPI.CreateOpenIDToken failed")
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusOK,
|
||||
JSON: openIDTokenResponse{
|
||||
AccessToken: response.Token.Token,
|
||||
TokenType: "Bearer",
|
||||
MatrixServerName: string(cfg.Matrix.ServerName),
|
||||
ExpiresIn: response.Token.ExpiresAtMS / 1000, // convert ms to s
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
@ -161,15 +161,6 @@ type userInteractiveResponse struct {
|
|||
Session string `json:"session"`
|
||||
}
|
||||
|
||||
// legacyRegisterRequest represents the submitted registration request for v1 API.
|
||||
type legacyRegisterRequest struct {
|
||||
Password string `json:"password"`
|
||||
Username string `json:"user"`
|
||||
Admin bool `json:"admin"`
|
||||
Type authtypes.LoginType `json:"type"`
|
||||
Mac gomatrixserverlib.HexString `json:"mac"`
|
||||
}
|
||||
|
||||
// newUserInteractiveResponse will return a struct to be sent back to the client
|
||||
// during registration.
|
||||
func newUserInteractiveResponse(
|
||||
|
|
@ -623,7 +614,10 @@ func handleRegistrationFlow(
|
|||
}
|
||||
|
||||
if cfg.RegistrationDisabled && r.Auth.Type != authtypes.LoginTypeSharedSecret {
|
||||
return util.MessageResponse(http.StatusForbidden, "Registration has been disabled")
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusForbidden,
|
||||
JSON: jsonerror.Forbidden("Registration is disabled"),
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure normal user isn't registering under an exclusive application
|
||||
|
|
@ -757,85 +751,6 @@ func checkAndCompleteFlow(
|
|||
}
|
||||
}
|
||||
|
||||
// LegacyRegister process register requests from the legacy v1 API
|
||||
func LegacyRegister(
|
||||
req *http.Request,
|
||||
userAPI userapi.UserInternalAPI,
|
||||
cfg *config.ClientAPI,
|
||||
) util.JSONResponse {
|
||||
var r legacyRegisterRequest
|
||||
resErr := parseAndValidateLegacyLogin(req, &r)
|
||||
if resErr != nil {
|
||||
return *resErr
|
||||
}
|
||||
|
||||
logger := util.GetLogger(req.Context())
|
||||
logger.WithFields(log.Fields{
|
||||
"username": r.Username,
|
||||
"auth.type": r.Type,
|
||||
}).Info("Processing registration request")
|
||||
|
||||
if cfg.RegistrationDisabled && r.Type != authtypes.LoginTypeSharedSecret {
|
||||
return util.MessageResponse(http.StatusForbidden, "Registration has been disabled")
|
||||
}
|
||||
|
||||
switch r.Type {
|
||||
case authtypes.LoginTypeSharedSecret:
|
||||
if cfg.RegistrationSharedSecret == "" {
|
||||
return util.MessageResponse(http.StatusBadRequest, "Shared secret registration is disabled")
|
||||
}
|
||||
|
||||
valid, err := isValidMacLogin(cfg, r.Username, r.Password, r.Admin, r.Mac)
|
||||
if err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("isValidMacLogin failed")
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
||||
if !valid {
|
||||
return util.MessageResponse(http.StatusForbidden, "HMAC incorrect")
|
||||
}
|
||||
|
||||
return completeRegistration(req.Context(), userAPI, r.Username, r.Password, "", req.RemoteAddr, req.UserAgent(), false, nil, nil)
|
||||
case authtypes.LoginTypeDummy:
|
||||
// there is nothing to do
|
||||
return completeRegistration(req.Context(), userAPI, r.Username, r.Password, "", req.RemoteAddr, req.UserAgent(), false, nil, nil)
|
||||
default:
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusNotImplemented,
|
||||
JSON: jsonerror.Unknown("unknown/unimplemented auth type"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// parseAndValidateLegacyLogin parses the request into r and checks that the
|
||||
// request is valid (e.g. valid user names, etc)
|
||||
func parseAndValidateLegacyLogin(req *http.Request, r *legacyRegisterRequest) *util.JSONResponse {
|
||||
resErr := httputil.UnmarshalJSONRequest(req, &r)
|
||||
if resErr != nil {
|
||||
return resErr
|
||||
}
|
||||
|
||||
// Squash username to all lowercase letters
|
||||
r.Username = strings.ToLower(r.Username)
|
||||
|
||||
if resErr = validateUsername(r.Username); resErr != nil {
|
||||
return resErr
|
||||
}
|
||||
if resErr = validatePassword(r.Password); resErr != nil {
|
||||
return resErr
|
||||
}
|
||||
|
||||
// All registration requests must specify what auth they are using to perform this request
|
||||
if r.Type == "" {
|
||||
return &util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.BadJSON("invalid type"),
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// completeRegistration runs some rudimentary checks against the submitted
|
||||
// input, then if successful creates an account and a newly associated device
|
||||
// We pass in each individual part of the request here instead of just passing a
|
||||
|
|
|
|||
|
|
@ -89,7 +89,6 @@ func Setup(
|
|||
).Methods(http.MethodGet, http.MethodOptions)
|
||||
|
||||
r0mux := publicAPIMux.PathPrefix("/r0").Subrouter()
|
||||
v1mux := publicAPIMux.PathPrefix("/api/v1").Subrouter()
|
||||
unstableMux := publicAPIMux.PathPrefix("/unstable").Subrouter()
|
||||
|
||||
r0mux.Handle("/createRoom",
|
||||
|
|
@ -306,13 +305,6 @@ func Setup(
|
|||
return Register(req, userAPI, accountDB, cfg)
|
||||
})).Methods(http.MethodPost, http.MethodOptions)
|
||||
|
||||
v1mux.Handle("/register", httputil.MakeExternalAPI("register", func(req *http.Request) util.JSONResponse {
|
||||
if r := rateLimits.rateLimit(req); r != nil {
|
||||
return *r
|
||||
}
|
||||
return LegacyRegister(req, userAPI, cfg)
|
||||
})).Methods(http.MethodPost, http.MethodOptions)
|
||||
|
||||
r0mux.Handle("/register/available", httputil.MakeExternalAPI("registerAvailable", func(req *http.Request) util.JSONResponse {
|
||||
if r := rateLimits.rateLimit(req); r != nil {
|
||||
return *r
|
||||
|
|
@ -469,7 +461,7 @@ func Setup(
|
|||
}),
|
||||
).Methods(http.MethodPost, http.MethodOptions)
|
||||
|
||||
// Stub endpoints required by Riot
|
||||
// Stub endpoints required by Element
|
||||
|
||||
r0mux.Handle("/login",
|
||||
httputil.MakeExternalAPI("login", func(req *http.Request) util.JSONResponse {
|
||||
|
|
@ -506,7 +498,7 @@ func Setup(
|
|||
}),
|
||||
).Methods(http.MethodGet, http.MethodOptions)
|
||||
|
||||
// Riot user settings
|
||||
// Element user settings
|
||||
|
||||
r0mux.Handle("/profile/{userID}",
|
||||
httputil.MakeExternalAPI("profile", func(req *http.Request) util.JSONResponse {
|
||||
|
|
@ -592,7 +584,7 @@ func Setup(
|
|||
}),
|
||||
).Methods(http.MethodPost, http.MethodOptions)
|
||||
|
||||
// Riot logs get flooded unless this is handled
|
||||
// Element logs get flooded unless this is handled
|
||||
r0mux.Handle("/presence/{userID}/status",
|
||||
httputil.MakeExternalAPI("presence", func(req *http.Request) util.JSONResponse {
|
||||
if r := rateLimits.rateLimit(req); r != nil {
|
||||
|
|
@ -685,6 +677,19 @@ func Setup(
|
|||
}),
|
||||
).Methods(http.MethodGet)
|
||||
|
||||
r0mux.Handle("/user/{userID}/openid/request_token",
|
||||
httputil.MakeAuthAPI("openid_request_token", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
if r := rateLimits.rateLimit(req); r != nil {
|
||||
return *r
|
||||
}
|
||||
vars, err := httputil.URLDecodeMapValues(mux.Vars(req))
|
||||
if err != nil {
|
||||
return util.ErrorResponse(err)
|
||||
}
|
||||
return CreateOpenIDToken(req, userAPI, device, vars["userID"], cfg)
|
||||
}),
|
||||
).Methods(http.MethodPost, http.MethodOptions)
|
||||
|
||||
r0mux.Handle("/user_directory/search",
|
||||
httputil.MakeAuthAPI("userdirectory_search", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
if r := rateLimits.rateLimit(req); r != nil {
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ func main() {
|
|||
|
||||
accountDB, err := accounts.NewDatabase(&config.DatabaseOptions{
|
||||
ConnectionString: cfg.UserAPI.AccountDatabase.ConnectionString,
|
||||
}, cfg.Global.ServerName, bcrypt.DefaultCost)
|
||||
}, cfg.Global.ServerName, bcrypt.DefaultCost, cfg.UserAPI.OpenIDTokenLifetimeMS)
|
||||
if err != nil {
|
||||
logrus.Fatalln("Failed to connect to the database:", err.Error())
|
||||
}
|
||||
|
|
|
|||
|
|
@ -166,7 +166,7 @@ func main() {
|
|||
asAPI := appservice.NewInternalAPI(&base.Base, userAPI, rsAPI)
|
||||
rsAPI.SetAppserviceAPI(asAPI)
|
||||
fsAPI := federationsender.NewInternalAPI(
|
||||
&base.Base, federation, rsAPI, keyRing,
|
||||
&base.Base, federation, rsAPI, keyRing, true,
|
||||
)
|
||||
rsAPI.SetFederationSenderAPI(fsAPI)
|
||||
provider := newPublicRoomsProvider(base.LibP2PPubsub, rsAPI)
|
||||
|
|
|
|||
81
cmd/dendrite-demo-pinecone/conn/client.go
Normal file
81
cmd/dendrite-demo-pinecone/conn/client.go
Normal file
|
|
@ -0,0 +1,81 @@
|
|||
package conn
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/matrix-org/dendrite/setup"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
|
||||
pineconeRouter "github.com/matrix-org/pinecone/router"
|
||||
pineconeSessions "github.com/matrix-org/pinecone/sessions"
|
||||
)
|
||||
|
||||
func ConnectToPeer(pRouter *pineconeRouter.Router, peer string) error {
|
||||
var parent net.Conn
|
||||
if strings.HasPrefix(peer, "ws://") || strings.HasPrefix(peer, "wss://") {
|
||||
c, _, err := websocket.DefaultDialer.Dial(peer, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("websocket.DefaultDialer.Dial: %w", err)
|
||||
}
|
||||
parent = WrapWebSocketConn(c)
|
||||
} else {
|
||||
var err error
|
||||
parent, err = net.Dial("tcp", peer)
|
||||
if err != nil {
|
||||
return fmt.Errorf("net.Dial: %w", err)
|
||||
}
|
||||
}
|
||||
if parent == nil {
|
||||
return fmt.Errorf("failed to wrap connection")
|
||||
}
|
||||
_, err := pRouter.AuthenticatedConnect(parent, "static", pineconeRouter.PeerTypeRemote)
|
||||
return err
|
||||
}
|
||||
|
||||
type RoundTripper struct {
|
||||
inner *http.Transport
|
||||
}
|
||||
|
||||
func (y *RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
req.URL.Scheme = "http"
|
||||
return y.inner.RoundTrip(req)
|
||||
}
|
||||
|
||||
func createTransport(s *pineconeSessions.Sessions) *http.Transport {
|
||||
tr := &http.Transport{}
|
||||
tr.RegisterProtocol(
|
||||
"matrix", &RoundTripper{
|
||||
inner: &http.Transport{
|
||||
DisableKeepAlives: false,
|
||||
Dial: s.Dial,
|
||||
DialContext: s.DialContext,
|
||||
DialTLS: s.DialTLS,
|
||||
DialTLSContext: s.DialTLSContext,
|
||||
},
|
||||
},
|
||||
)
|
||||
return tr
|
||||
}
|
||||
|
||||
func CreateClient(
|
||||
base *setup.BaseDendrite, s *pineconeSessions.Sessions,
|
||||
) *gomatrixserverlib.Client {
|
||||
return gomatrixserverlib.NewClient(
|
||||
gomatrixserverlib.WithTransport(createTransport(s)),
|
||||
)
|
||||
}
|
||||
|
||||
func CreateFederationClient(
|
||||
base *setup.BaseDendrite, s *pineconeSessions.Sessions,
|
||||
) *gomatrixserverlib.FederationClient {
|
||||
return gomatrixserverlib.NewFederationClient(
|
||||
base.Cfg.Global.ServerName,
|
||||
base.Cfg.Global.KeyID,
|
||||
base.Cfg.Global.PrivateKey,
|
||||
gomatrixserverlib.WithTransport(createTransport(s)),
|
||||
)
|
||||
}
|
||||
81
cmd/dendrite-demo-pinecone/conn/ws.go
Normal file
81
cmd/dendrite-demo-pinecone/conn/ws.go
Normal file
|
|
@ -0,0 +1,81 @@
|
|||
package conn
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
)
|
||||
|
||||
func WrapWebSocketConn(c *websocket.Conn) *WebSocketConn {
|
||||
return &WebSocketConn{c: c}
|
||||
}
|
||||
|
||||
type WebSocketConn struct {
|
||||
r io.Reader
|
||||
c *websocket.Conn
|
||||
}
|
||||
|
||||
func (c *WebSocketConn) Write(p []byte) (int, error) {
|
||||
err := c.c.WriteMessage(websocket.BinaryMessage, p)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (c *WebSocketConn) Read(p []byte) (int, error) {
|
||||
for {
|
||||
if c.r == nil {
|
||||
// Advance to next message.
|
||||
var err error
|
||||
_, c.r, err = c.c.NextReader()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
n, err := c.r.Read(p)
|
||||
if err == io.EOF {
|
||||
// At end of message.
|
||||
c.r = nil
|
||||
if n > 0 {
|
||||
return n, nil
|
||||
} else {
|
||||
// No data read, continue to next message.
|
||||
continue
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
func (c *WebSocketConn) Close() error {
|
||||
return c.c.Close()
|
||||
}
|
||||
|
||||
func (c *WebSocketConn) LocalAddr() net.Addr {
|
||||
return c.c.LocalAddr()
|
||||
}
|
||||
|
||||
func (c *WebSocketConn) RemoteAddr() net.Addr {
|
||||
return c.c.RemoteAddr()
|
||||
}
|
||||
|
||||
func (c *WebSocketConn) SetDeadline(t time.Time) error {
|
||||
if err := c.SetReadDeadline(t); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.SetWriteDeadline(t); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *WebSocketConn) SetReadDeadline(t time.Time) error {
|
||||
return c.c.SetReadDeadline(t)
|
||||
}
|
||||
|
||||
func (c *WebSocketConn) SetWriteDeadline(t time.Time) error {
|
||||
return c.c.SetWriteDeadline(t)
|
||||
}
|
||||
9
cmd/dendrite-demo-pinecone/embed/embed_other.go
Normal file
9
cmd/dendrite-demo-pinecone/embed/embed_other.go
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
// +build !riotweb
|
||||
|
||||
package embed
|
||||
|
||||
import "github.com/gorilla/mux"
|
||||
|
||||
func Embed(_ *mux.Router, _ int, _ string) {
|
||||
|
||||
}
|
||||
83
cmd/dendrite-demo-pinecone/embed/embed_riotweb.go
Normal file
83
cmd/dendrite-demo-pinecone/embed/embed_riotweb.go
Normal file
|
|
@ -0,0 +1,83 @@
|
|||
// +build riotweb
|
||||
|
||||
package embed
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"regexp"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/tidwall/sjson"
|
||||
)
|
||||
|
||||
// From within the Riot Web directory:
|
||||
// go run github.com/mjibson/esc -o /path/to/dendrite/internal/embed/fs_riotweb.go -private -pkg embed .
|
||||
|
||||
var cssFile = regexp.MustCompile("\\.css$")
|
||||
var jsFile = regexp.MustCompile("\\.js$")
|
||||
|
||||
type mimeFixingHandler struct {
|
||||
fs http.Handler
|
||||
}
|
||||
|
||||
func (h mimeFixingHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
ruri := r.RequestURI
|
||||
fmt.Println(ruri)
|
||||
switch {
|
||||
case cssFile.MatchString(ruri):
|
||||
w.Header().Set("Content-Type", "text/css")
|
||||
case jsFile.MatchString(ruri):
|
||||
w.Header().Set("Content-Type", "application/javascript")
|
||||
default:
|
||||
}
|
||||
h.fs.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
func Embed(rootMux *mux.Router, listenPort int, serverName string) {
|
||||
embeddedFS := _escFS(false)
|
||||
embeddedServ := mimeFixingHandler{http.FileServer(embeddedFS)}
|
||||
|
||||
rootMux.NotFoundHandler = embeddedServ
|
||||
rootMux.HandleFunc("/config.json", func(w http.ResponseWriter, r *http.Request) {
|
||||
url := fmt.Sprintf("http://%s:%d", r.Header("Host"), listenPort)
|
||||
configFile, err := embeddedFS.Open("/config.sample.json")
|
||||
if err != nil {
|
||||
w.WriteHeader(500)
|
||||
io.WriteString(w, "Couldn't open the file: "+err.Error())
|
||||
return
|
||||
}
|
||||
configFileInfo, err := configFile.Stat()
|
||||
if err != nil {
|
||||
w.WriteHeader(500)
|
||||
io.WriteString(w, "Couldn't stat the file: "+err.Error())
|
||||
return
|
||||
}
|
||||
buf := make([]byte, configFileInfo.Size())
|
||||
n, err := configFile.Read(buf)
|
||||
if err != nil {
|
||||
w.WriteHeader(500)
|
||||
io.WriteString(w, "Couldn't read the file: "+err.Error())
|
||||
return
|
||||
}
|
||||
if int64(n) != configFileInfo.Size() {
|
||||
w.WriteHeader(500)
|
||||
io.WriteString(w, "The returned file size didn't match what we expected")
|
||||
return
|
||||
}
|
||||
js, _ := sjson.SetBytes(buf, "default_server_config.m\\.homeserver.base_url", url)
|
||||
js, _ = sjson.SetBytes(js, "default_server_config.m\\.homeserver.server_name", serverName)
|
||||
js, _ = sjson.SetBytes(js, "brand", fmt.Sprintf("Riot %s", serverName))
|
||||
js, _ = sjson.SetBytes(js, "disable_guests", true)
|
||||
js, _ = sjson.SetBytes(js, "disable_3pid_login", true)
|
||||
js, _ = sjson.DeleteBytes(js, "welcomeUserId")
|
||||
_, _ = w.Write(js)
|
||||
})
|
||||
|
||||
fmt.Println("*-------------------------------*")
|
||||
fmt.Println("| This build includes Riot Web! |")
|
||||
fmt.Println("*-------------------------------*")
|
||||
fmt.Println("Point your browser to:", url)
|
||||
fmt.Println()
|
||||
}
|
||||
280
cmd/dendrite-demo-pinecone/main.go
Normal file
280
cmd/dendrite-demo-pinecone/main.go
Normal file
|
|
@ -0,0 +1,280 @@
|
|||
// Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ed25519"
|
||||
"crypto/tls"
|
||||
"encoding/hex"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/matrix-org/dendrite/appservice"
|
||||
"github.com/matrix-org/dendrite/cmd/dendrite-demo-pinecone/conn"
|
||||
"github.com/matrix-org/dendrite/cmd/dendrite-demo-pinecone/embed"
|
||||
"github.com/matrix-org/dendrite/cmd/dendrite-demo-pinecone/rooms"
|
||||
"github.com/matrix-org/dendrite/cmd/dendrite-demo-yggdrasil/signing"
|
||||
"github.com/matrix-org/dendrite/eduserver"
|
||||
"github.com/matrix-org/dendrite/eduserver/cache"
|
||||
"github.com/matrix-org/dendrite/federationsender"
|
||||
"github.com/matrix-org/dendrite/federationsender/api"
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/internal/httputil"
|
||||
"github.com/matrix-org/dendrite/keyserver"
|
||||
"github.com/matrix-org/dendrite/roomserver"
|
||||
"github.com/matrix-org/dendrite/setup"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/userapi"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"go.uber.org/atomic"
|
||||
|
||||
pineconeMulticast "github.com/matrix-org/pinecone/multicast"
|
||||
pineconeRouter "github.com/matrix-org/pinecone/router"
|
||||
pineconeSessions "github.com/matrix-org/pinecone/sessions"
|
||||
pineconeTypes "github.com/matrix-org/pinecone/types"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
instanceName = flag.String("name", "dendrite-p2p-pinecone", "the name of this P2P demo instance")
|
||||
instancePort = flag.Int("port", 8008, "the port that the client API will listen on")
|
||||
instancePeer = flag.String("peer", "", "the static Pinecone peer to connect to")
|
||||
instanceListen = flag.String("listen", ":0", "the port Pinecone peers can connect to")
|
||||
)
|
||||
|
||||
// nolint:gocyclo
|
||||
func main() {
|
||||
flag.Parse()
|
||||
internal.SetupPprof()
|
||||
|
||||
var pk ed25519.PublicKey
|
||||
var sk ed25519.PrivateKey
|
||||
|
||||
keyfile := *instanceName + ".key"
|
||||
if _, err := os.Stat(keyfile); os.IsNotExist(err) {
|
||||
if pk, sk, err = ed25519.GenerateKey(nil); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err = ioutil.WriteFile(keyfile, sk, 0644); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
} else if err == nil {
|
||||
if sk, err = ioutil.ReadFile(keyfile); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if len(sk) != ed25519.PrivateKeySize {
|
||||
panic("the private key is not long enough")
|
||||
}
|
||||
pk = sk.Public().(ed25519.PublicKey)
|
||||
}
|
||||
|
||||
logger := log.New(os.Stdout, "", 0)
|
||||
pRouter := pineconeRouter.NewRouter(logger, "dendrite", sk, pk, nil)
|
||||
|
||||
go func() {
|
||||
listener, err := net.Listen("tcp", *instanceListen)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
fmt.Println("Listening on", listener.Addr())
|
||||
|
||||
for {
|
||||
conn, err := listener.Accept()
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("listener.Accept failed")
|
||||
continue
|
||||
}
|
||||
|
||||
port, err := pRouter.AuthenticatedConnect(conn, "", pineconeRouter.PeerTypeRemote)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("pSwitch.AuthenticatedConnect failed")
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Println("Inbound connection", conn.RemoteAddr(), "is connected to port", port)
|
||||
}
|
||||
}()
|
||||
|
||||
pQUIC := pineconeSessions.NewSessions(logger, pRouter)
|
||||
pMulticast := pineconeMulticast.NewMulticast(logger, pRouter)
|
||||
pMulticast.Start()
|
||||
|
||||
var staticPeerAttempts atomic.Uint32
|
||||
var connectToStaticPeer func()
|
||||
connectToStaticPeer = func() {
|
||||
uri := *instancePeer
|
||||
if uri == "" {
|
||||
return
|
||||
}
|
||||
if err := conn.ConnectToPeer(pRouter, uri); err != nil {
|
||||
exp := time.Second * time.Duration(math.Exp2(float64(staticPeerAttempts.Inc())))
|
||||
time.AfterFunc(exp, connectToStaticPeer)
|
||||
} else {
|
||||
staticPeerAttempts.Store(0)
|
||||
}
|
||||
}
|
||||
pRouter.SetDisconnectedCallback(func(port pineconeTypes.SwitchPortID, public pineconeTypes.PublicKey, peertype int, err error) {
|
||||
if peertype == pineconeRouter.PeerTypeRemote && err != nil {
|
||||
staticPeerAttempts.Store(0)
|
||||
time.AfterFunc(time.Second, connectToStaticPeer)
|
||||
}
|
||||
})
|
||||
go connectToStaticPeer()
|
||||
|
||||
cfg := &config.Dendrite{}
|
||||
cfg.Defaults()
|
||||
cfg.Global.ServerName = gomatrixserverlib.ServerName(hex.EncodeToString(pk))
|
||||
cfg.Global.PrivateKey = sk
|
||||
cfg.Global.KeyID = gomatrixserverlib.KeyID(signing.KeyID)
|
||||
cfg.Global.Kafka.UseNaffka = true
|
||||
cfg.UserAPI.AccountDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-account.db", *instanceName))
|
||||
cfg.UserAPI.DeviceDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-device.db", *instanceName))
|
||||
cfg.MediaAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-mediaapi.db", *instanceName))
|
||||
cfg.SyncAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-syncapi.db", *instanceName))
|
||||
cfg.RoomServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-roomserver.db", *instanceName))
|
||||
cfg.SigningKeyServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-signingkeyserver.db", *instanceName))
|
||||
cfg.KeyServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-keyserver.db", *instanceName))
|
||||
cfg.FederationSender.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-federationsender.db", *instanceName))
|
||||
cfg.AppServiceAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-appservice.db", *instanceName))
|
||||
cfg.Global.Kafka.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-naffka.db", *instanceName))
|
||||
cfg.MSCs.MSCs = []string{"msc2836", "msc2946"}
|
||||
if err := cfg.Derive(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
base := setup.NewBaseDendrite(cfg, "Monolith", false)
|
||||
defer base.Close() // nolint: errcheck
|
||||
|
||||
accountDB := base.CreateAccountsDB()
|
||||
federation := conn.CreateFederationClient(base, pQUIC)
|
||||
|
||||
serverKeyAPI := &signing.YggdrasilKeys{}
|
||||
keyRing := serverKeyAPI.KeyRing()
|
||||
|
||||
rsComponent := roomserver.NewInternalAPI(
|
||||
base, keyRing,
|
||||
)
|
||||
rsAPI := rsComponent
|
||||
fsAPI := federationsender.NewInternalAPI(
|
||||
base, federation, rsAPI, keyRing, true,
|
||||
)
|
||||
|
||||
keyAPI := keyserver.NewInternalAPI(&base.Cfg.KeyServer, fsAPI)
|
||||
userAPI := userapi.NewInternalAPI(accountDB, &cfg.UserAPI, nil, keyAPI)
|
||||
keyAPI.SetUserAPI(userAPI)
|
||||
|
||||
eduInputAPI := eduserver.NewInternalAPI(
|
||||
base, cache.New(), userAPI,
|
||||
)
|
||||
|
||||
asAPI := appservice.NewInternalAPI(base, userAPI, rsAPI)
|
||||
|
||||
rsComponent.SetFederationSenderAPI(fsAPI)
|
||||
|
||||
monolith := setup.Monolith{
|
||||
Config: base.Cfg,
|
||||
AccountDB: accountDB,
|
||||
Client: conn.CreateClient(base, pQUIC),
|
||||
FedClient: federation,
|
||||
KeyRing: keyRing,
|
||||
|
||||
AppserviceAPI: asAPI,
|
||||
EDUInternalAPI: eduInputAPI,
|
||||
FederationSenderAPI: fsAPI,
|
||||
RoomserverAPI: rsAPI,
|
||||
UserAPI: userAPI,
|
||||
KeyAPI: keyAPI,
|
||||
ExtPublicRoomsProvider: rooms.NewPineconeRoomProvider(pRouter, pQUIC, fsAPI, federation),
|
||||
}
|
||||
monolith.AddAllPublicRoutes(
|
||||
base.ProcessContext,
|
||||
base.PublicClientAPIMux,
|
||||
base.PublicFederationAPIMux,
|
||||
base.PublicKeyAPIMux,
|
||||
base.PublicMediaAPIMux,
|
||||
)
|
||||
|
||||
wsUpgrader := websocket.Upgrader{}
|
||||
httpRouter := mux.NewRouter().SkipClean(true).UseEncodedPath()
|
||||
httpRouter.PathPrefix(httputil.InternalPathPrefix).Handler(base.InternalAPIMux)
|
||||
httpRouter.PathPrefix(httputil.PublicClientPathPrefix).Handler(base.PublicClientAPIMux)
|
||||
httpRouter.PathPrefix(httputil.PublicMediaPathPrefix).Handler(base.PublicMediaAPIMux)
|
||||
httpRouter.HandleFunc("/ws", func(w http.ResponseWriter, r *http.Request) {
|
||||
c, err := wsUpgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Failed to upgrade WebSocket connection")
|
||||
return
|
||||
}
|
||||
conn := conn.WrapWebSocketConn(c)
|
||||
if _, err = pRouter.AuthenticatedConnect(conn, "websocket", pineconeRouter.PeerTypeRemote); err != nil {
|
||||
logrus.WithError(err).Error("Failed to connect WebSocket peer to Pinecone switch")
|
||||
}
|
||||
})
|
||||
embed.Embed(httpRouter, *instancePort, "Pinecone Demo")
|
||||
|
||||
pMux := mux.NewRouter().SkipClean(true).UseEncodedPath()
|
||||
pMux.PathPrefix(httputil.PublicFederationPathPrefix).Handler(base.PublicFederationAPIMux)
|
||||
pMux.PathPrefix(httputil.PublicMediaPathPrefix).Handler(base.PublicMediaAPIMux)
|
||||
|
||||
pHTTP := pQUIC.HTTP()
|
||||
pHTTP.Mux().Handle(httputil.PublicFederationPathPrefix, pMux)
|
||||
pHTTP.Mux().Handle(httputil.PublicMediaPathPrefix, pMux)
|
||||
|
||||
// Build both ends of a HTTP multiplex.
|
||||
httpServer := &http.Server{
|
||||
Addr: ":0",
|
||||
TLSNextProto: map[string]func(*http.Server, *tls.Conn, http.Handler){},
|
||||
ReadTimeout: 10 * time.Second,
|
||||
WriteTimeout: 10 * time.Second,
|
||||
IdleTimeout: 60 * time.Second,
|
||||
BaseContext: func(_ net.Listener) context.Context {
|
||||
return context.Background()
|
||||
},
|
||||
Handler: pMux,
|
||||
}
|
||||
|
||||
go func() {
|
||||
pubkey := pRouter.PublicKey()
|
||||
logrus.Info("Listening on ", hex.EncodeToString(pubkey[:]))
|
||||
logrus.Fatal(httpServer.Serve(pQUIC))
|
||||
}()
|
||||
go func() {
|
||||
httpBindAddr := fmt.Sprintf(":%d", *instancePort)
|
||||
logrus.Info("Listening on ", httpBindAddr)
|
||||
logrus.Fatal(http.ListenAndServe(httpBindAddr, httpRouter))
|
||||
}()
|
||||
go func() {
|
||||
logrus.Info("Sending wake-up message to known nodes")
|
||||
req := &api.PerformBroadcastEDURequest{}
|
||||
res := &api.PerformBroadcastEDUResponse{}
|
||||
if err := fsAPI.PerformBroadcastEDU(context.TODO(), req, res); err != nil {
|
||||
logrus.WithError(err).Error("Failed to send wake-up message to known nodes")
|
||||
}
|
||||
}()
|
||||
|
||||
base.WaitForShutdown()
|
||||
}
|
||||
117
cmd/dendrite-demo-pinecone/rooms/rooms.go
Normal file
117
cmd/dendrite-demo-pinecone/rooms/rooms.go
Normal file
|
|
@ -0,0 +1,117 @@
|
|||
// Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package rooms
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/matrix-org/dendrite/federationsender/api"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/util"
|
||||
|
||||
pineconeRouter "github.com/matrix-org/pinecone/router"
|
||||
pineconeSessions "github.com/matrix-org/pinecone/sessions"
|
||||
)
|
||||
|
||||
type PineconeRoomProvider struct {
|
||||
r *pineconeRouter.Router
|
||||
s *pineconeSessions.Sessions
|
||||
fedSender api.FederationSenderInternalAPI
|
||||
fedClient *gomatrixserverlib.FederationClient
|
||||
}
|
||||
|
||||
func NewPineconeRoomProvider(
|
||||
r *pineconeRouter.Router,
|
||||
s *pineconeSessions.Sessions,
|
||||
fedSender api.FederationSenderInternalAPI,
|
||||
fedClient *gomatrixserverlib.FederationClient,
|
||||
) *PineconeRoomProvider {
|
||||
p := &PineconeRoomProvider{
|
||||
r: r,
|
||||
s: s,
|
||||
fedSender: fedSender,
|
||||
fedClient: fedClient,
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *PineconeRoomProvider) Rooms() []gomatrixserverlib.PublicRoom {
|
||||
known := p.r.KnownNodes()
|
||||
//known = append(known, p.s.Sessions()...)
|
||||
list := []gomatrixserverlib.ServerName{}
|
||||
for _, k := range known {
|
||||
list = append(list, gomatrixserverlib.ServerName(k.String()))
|
||||
}
|
||||
return bulkFetchPublicRoomsFromServers(context.Background(), p.fedClient, list)
|
||||
}
|
||||
|
||||
// bulkFetchPublicRoomsFromServers fetches public rooms from the list of homeservers.
|
||||
// Returns a list of public rooms.
|
||||
func bulkFetchPublicRoomsFromServers(
|
||||
ctx context.Context, fedClient *gomatrixserverlib.FederationClient,
|
||||
homeservers []gomatrixserverlib.ServerName,
|
||||
) (publicRooms []gomatrixserverlib.PublicRoom) {
|
||||
limit := 200
|
||||
// follow pipeline semantics, see https://blog.golang.org/pipelines for more info.
|
||||
// goroutines send rooms to this channel
|
||||
roomCh := make(chan gomatrixserverlib.PublicRoom, int(limit))
|
||||
// signalling channel to tell goroutines to stop sending rooms and quit
|
||||
done := make(chan bool)
|
||||
// signalling to say when we can close the room channel
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(homeservers))
|
||||
// concurrently query for public rooms
|
||||
reqctx, reqcancel := context.WithTimeout(ctx, time.Second*5)
|
||||
for _, hs := range homeservers {
|
||||
go func(homeserverDomain gomatrixserverlib.ServerName) {
|
||||
defer wg.Done()
|
||||
util.GetLogger(reqctx).WithField("hs", homeserverDomain).Info("Querying HS for public rooms")
|
||||
fres, err := fedClient.GetPublicRooms(reqctx, homeserverDomain, int(limit), "", false, "")
|
||||
if err != nil {
|
||||
util.GetLogger(reqctx).WithError(err).WithField("hs", homeserverDomain).Warn(
|
||||
"bulkFetchPublicRoomsFromServers: failed to query hs",
|
||||
)
|
||||
return
|
||||
}
|
||||
for _, room := range fres.Chunk {
|
||||
// atomically send a room or stop
|
||||
select {
|
||||
case roomCh <- room:
|
||||
case <-done:
|
||||
case <-reqctx.Done():
|
||||
util.GetLogger(reqctx).WithError(err).WithField("hs", homeserverDomain).Info("Interrupted whilst sending rooms")
|
||||
return
|
||||
}
|
||||
}
|
||||
}(hs)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
default:
|
||||
wg.Wait()
|
||||
}
|
||||
reqcancel()
|
||||
close(done)
|
||||
close(roomCh)
|
||||
|
||||
for room := range roomCh {
|
||||
publicRooms = append(publicRooms, room)
|
||||
}
|
||||
|
||||
return publicRooms
|
||||
}
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
# Yggdrasil Demo
|
||||
|
||||
This is the Dendrite Yggdrasil demo! It's easy to get started - all you need is Go 1.13 or later.
|
||||
This is the Dendrite Yggdrasil demo! It's easy to get started - all you need is Go 1.14 or later.
|
||||
|
||||
To run the homeserver, start at the root of the Dendrite repository and run:
|
||||
|
||||
|
|
|
|||
|
|
@ -114,7 +114,7 @@ func main() {
|
|||
asAPI := appservice.NewInternalAPI(base, userAPI, rsAPI)
|
||||
rsAPI.SetAppserviceAPI(asAPI)
|
||||
fsAPI := federationsender.NewInternalAPI(
|
||||
base, federation, rsAPI, keyRing,
|
||||
base, federation, rsAPI, keyRing, true,
|
||||
)
|
||||
|
||||
ygg.SetSessionFunc(func(address string) {
|
||||
|
|
|
|||
|
|
@ -99,7 +99,7 @@ func main() {
|
|||
}
|
||||
|
||||
fsAPI := federationsender.NewInternalAPI(
|
||||
base, federation, rsAPI, keyRing,
|
||||
base, federation, rsAPI, keyRing, false,
|
||||
)
|
||||
if base.UseHTTPAPIs {
|
||||
federationsender.AddInternalRoutes(base.InternalAPIMux, fsAPI)
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ func FederationSender(base *setup.BaseDendrite, cfg *config.Dendrite) {
|
|||
|
||||
rsAPI := base.RoomserverHTTPClient()
|
||||
fsAPI := federationsender.NewInternalAPI(
|
||||
base, federation, rsAPI, keyRing,
|
||||
base, federation, rsAPI, keyRing, false,
|
||||
)
|
||||
federationsender.AddInternalRoutes(base.InternalAPIMux, fsAPI)
|
||||
|
||||
|
|
|
|||
|
|
@ -210,7 +210,7 @@ func main() {
|
|||
base, userAPI, rsAPI,
|
||||
)
|
||||
rsAPI.SetAppserviceAPI(asQuery)
|
||||
fedSenderAPI := federationsender.NewInternalAPI(base, federation, rsAPI, &keyRing)
|
||||
fedSenderAPI := federationsender.NewInternalAPI(base, federation, rsAPI, &keyRing, true)
|
||||
rsAPI.SetFederationSenderAPI(fedSenderAPI)
|
||||
p2pPublicRoomProvider := NewLibP2PPublicRoomsProvider(node, fedSenderAPI, federation)
|
||||
|
||||
|
|
|
|||
|
|
@ -248,7 +248,8 @@ media_api:
|
|||
base_path: ./media_store
|
||||
|
||||
# The maximum allowed file size (in bytes) for media uploads to this homeserver
|
||||
# (0 = unlimited).
|
||||
# (0 = unlimited). If using a reverse proxy, ensure it allows requests at
|
||||
# least this large (e.g. client_max_body_size in nginx.)
|
||||
max_file_size_bytes: 10485760
|
||||
|
||||
# Whether to dynamically generate thumbnails if needed.
|
||||
|
|
@ -360,6 +361,11 @@ user_api:
|
|||
max_open_conns: 10
|
||||
max_idle_conns: 2
|
||||
conn_max_lifetime: -1
|
||||
# The length of time that a token issued for a relying party from
|
||||
# /_matrix/client/r0/user/{userId}/openid/request_token endpoint
|
||||
# is considered to be valid in milliseconds.
|
||||
# The default lifetime is 3600000ms (60 minutes).
|
||||
# openid_token_lifetime_ms: 3600000
|
||||
|
||||
# Configuration for Opentracing.
|
||||
# See https://github.com/matrix-org/dendrite/tree/master/docs/tracing for information on
|
||||
|
|
@ -383,4 +389,4 @@ logging:
|
|||
- type: file
|
||||
level: info
|
||||
params:
|
||||
path: /var/log/dendrite
|
||||
path: ./logs
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ use in production environments just yet!
|
|||
|
||||
Dendrite requires:
|
||||
|
||||
* Go 1.13 or higher
|
||||
* Go 1.14 or higher
|
||||
* Postgres 9.6 or higher (if using Postgres databases, not needed for SQLite)
|
||||
|
||||
If you want to run a polylith deployment, you also need:
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
server {
|
||||
listen 443 ssl; # IPv4
|
||||
listen [::]:443; # IPv6
|
||||
listen [::]:443 ssl; # IPv6
|
||||
server_name my.hostname.com;
|
||||
|
||||
ssl_certificate /path/to/fullchain.pem;
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
server {
|
||||
listen 443 ssl; # IPv4
|
||||
listen [::]:443; # IPv6
|
||||
listen [::]:443 ssl; # IPv6
|
||||
server_name my.hostname.com;
|
||||
|
||||
ssl_certificate /path/to/fullchain.pem;
|
||||
|
|
|
|||
65
federationapi/routing/openid.go
Normal file
65
federationapi/routing/openid.go
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
// Copyright 2021 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package routing
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
"github.com/matrix-org/util"
|
||||
)
|
||||
|
||||
type openIDUserInfoResponse struct {
|
||||
Sub string `json:"sub"`
|
||||
}
|
||||
|
||||
// GetOpenIDUserInfo implements GET /_matrix/federation/v1/openid/userinfo
|
||||
func GetOpenIDUserInfo(
|
||||
httpReq *http.Request,
|
||||
userAPI userapi.UserInternalAPI,
|
||||
) util.JSONResponse {
|
||||
token := httpReq.URL.Query().Get("access_token")
|
||||
if len(token) == 0 {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusUnauthorized,
|
||||
JSON: jsonerror.MissingArgument("access_token is missing"),
|
||||
}
|
||||
}
|
||||
|
||||
req := userapi.QueryOpenIDTokenRequest{
|
||||
Token: token,
|
||||
}
|
||||
|
||||
var openIDTokenAttrResponse userapi.QueryOpenIDTokenResponse
|
||||
err := userAPI.QueryOpenIDToken(httpReq.Context(), &req, &openIDTokenAttrResponse)
|
||||
if err != nil {
|
||||
util.GetLogger(httpReq.Context()).WithError(err).Error("userAPI.QueryOpenIDToken failed")
|
||||
}
|
||||
|
||||
var res interface{} = openIDUserInfoResponse{Sub: openIDTokenAttrResponse.Sub}
|
||||
code := http.StatusOK
|
||||
nowMS := time.Now().UnixNano() / int64(time.Millisecond)
|
||||
if openIDTokenAttrResponse.Sub == "" || nowMS > openIDTokenAttrResponse.ExpiresAtMS {
|
||||
code = http.StatusUnauthorized
|
||||
res = jsonerror.UnknownToken("Access Token unknown or expired")
|
||||
}
|
||||
|
||||
return util.JSONResponse{
|
||||
Code: code,
|
||||
JSON: res,
|
||||
}
|
||||
}
|
||||
|
|
@ -21,6 +21,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||
eduserverAPI "github.com/matrix-org/dendrite/eduserver/api"
|
||||
federationSenderAPI "github.com/matrix-org/dendrite/federationsender/api"
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/internal/httputil"
|
||||
keyserverAPI "github.com/matrix-org/dendrite/keyserver/api"
|
||||
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
||||
|
|
@ -92,12 +93,13 @@ func Setup(
|
|||
v2keysmux.Handle("/query", notaryKeys).Methods(http.MethodPost)
|
||||
v2keysmux.Handle("/query/{serverName}/{keyID}", notaryKeys).Methods(http.MethodGet)
|
||||
|
||||
mu := internal.NewMutexByRoom()
|
||||
v1fedmux.Handle("/send/{txnID}", httputil.MakeFedAPI(
|
||||
"federation_send", cfg.Matrix.ServerName, keys, wakeup,
|
||||
func(httpReq *http.Request, request *gomatrixserverlib.FederationRequest, vars map[string]string) util.JSONResponse {
|
||||
return Send(
|
||||
httpReq, request, gomatrixserverlib.TransactionID(vars["txnID"]),
|
||||
cfg, rsAPI, eduAPI, keyAPI, keys, federation,
|
||||
cfg, rsAPI, eduAPI, keyAPI, keys, federation, mu,
|
||||
)
|
||||
},
|
||||
)).Methods(http.MethodPut, http.MethodOptions)
|
||||
|
|
@ -460,4 +462,10 @@ func Setup(
|
|||
return QueryDeviceKeys(httpReq, request, keyAPI, cfg.Matrix.ServerName)
|
||||
},
|
||||
)).Methods(http.MethodPost)
|
||||
|
||||
v1fedmux.Handle("/openid/userinfo",
|
||||
httputil.MakeExternalAPI("federation_openid_userinfo", func(req *http.Request) util.JSONResponse {
|
||||
return GetOpenIDUserInfo(req, userAPI)
|
||||
}),
|
||||
).Methods(http.MethodGet)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ import (
|
|||
"github.com/getsentry/sentry-go"
|
||||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||
eduserverAPI "github.com/matrix-org/dendrite/eduserver/api"
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
keyapi "github.com/matrix-org/dendrite/keyserver/api"
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
|
|
@ -98,6 +99,7 @@ func Send(
|
|||
keyAPI keyapi.KeyInternalAPI,
|
||||
keys gomatrixserverlib.JSONVerifier,
|
||||
federation *gomatrixserverlib.FederationClient,
|
||||
mu *internal.MutexByRoom,
|
||||
) util.JSONResponse {
|
||||
t := txnReq{
|
||||
rsAPI: rsAPI,
|
||||
|
|
@ -107,6 +109,7 @@ func Send(
|
|||
haveEvents: make(map[string]*gomatrixserverlib.HeaderedEvent),
|
||||
newEvents: make(map[string]bool),
|
||||
keyAPI: keyAPI,
|
||||
roomsMu: mu,
|
||||
}
|
||||
|
||||
var txnEvents struct {
|
||||
|
|
@ -163,6 +166,7 @@ type txnReq struct {
|
|||
federation txnFederationClient
|
||||
servers []gomatrixserverlib.ServerName
|
||||
serversMutex sync.RWMutex
|
||||
roomsMu *internal.MutexByRoom
|
||||
// local cache of events for auth checks, etc - this may include events
|
||||
// which the roomserver is unaware of.
|
||||
haveEvents map[string]*gomatrixserverlib.HeaderedEvent
|
||||
|
|
@ -614,7 +618,9 @@ func checkAllowedByState(e *gomatrixserverlib.Event, stateEvents []*gomatrixserv
|
|||
return gomatrixserverlib.Allowed(e, &authUsingState)
|
||||
}
|
||||
|
||||
func (t *txnReq) processEventWithMissingState(ctx context.Context, e *gomatrixserverlib.Event, roomVersion gomatrixserverlib.RoomVersion) error {
|
||||
func (t *txnReq) processEventWithMissingState(
|
||||
ctx context.Context, e *gomatrixserverlib.Event, roomVersion gomatrixserverlib.RoomVersion,
|
||||
) error {
|
||||
// Do this with a fresh context, so that we keep working even if the
|
||||
// original request times out. With any luck, by the time the remote
|
||||
// side retries, we'll have fetched the missing state.
|
||||
|
|
@ -710,7 +716,9 @@ func (t *txnReq) processEventWithMissingState(ctx context.Context, e *gomatrixse
|
|||
respStates[i] = states[i].RespState
|
||||
}
|
||||
// There's more than one previous state - run them all through state res
|
||||
t.roomsMu.Lock(e.RoomID())
|
||||
resolvedState, err = t.resolveStatesAndCheck(gmectx, roomVersion, respStates, backwardsExtremity)
|
||||
t.roomsMu.Unlock(e.RoomID())
|
||||
if err != nil {
|
||||
util.GetLogger(ctx).WithError(err).Errorf("Failed to resolve state conflicts for event %s", backwardsExtremity.EventID())
|
||||
return err
|
||||
|
|
@ -778,7 +786,7 @@ func (t *txnReq) lookupStateAfterEvent(ctx context.Context, roomVersion gomatrix
|
|||
default:
|
||||
return nil, false, fmt.Errorf("t.lookupEvent: %w", err)
|
||||
}
|
||||
t.haveEvents[h.EventID()] = h
|
||||
t.cacheAndReturn(h)
|
||||
if h.StateKey() != nil {
|
||||
addedToState := false
|
||||
for i := range respState.StateEvents {
|
||||
|
|
@ -797,6 +805,14 @@ func (t *txnReq) lookupStateAfterEvent(ctx context.Context, roomVersion gomatrix
|
|||
return respState, false, nil
|
||||
}
|
||||
|
||||
func (t *txnReq) cacheAndReturn(ev *gomatrixserverlib.HeaderedEvent) *gomatrixserverlib.HeaderedEvent {
|
||||
if cached, exists := t.haveEvents[ev.EventID()]; exists {
|
||||
return cached
|
||||
}
|
||||
t.haveEvents[ev.EventID()] = ev
|
||||
return ev
|
||||
}
|
||||
|
||||
func (t *txnReq) lookupStateAfterEventLocally(ctx context.Context, roomID, eventID string) *gomatrixserverlib.RespState {
|
||||
var res api.QueryStateAfterEventsResponse
|
||||
err := t.rsAPI.QueryStateAfterEvents(ctx, &api.QueryStateAfterEventsRequest{
|
||||
|
|
@ -804,15 +820,21 @@ func (t *txnReq) lookupStateAfterEventLocally(ctx context.Context, roomID, event
|
|||
PrevEventIDs: []string{eventID},
|
||||
}, &res)
|
||||
if err != nil || !res.PrevEventsExist {
|
||||
util.GetLogger(ctx).WithError(err).Warnf("failed to query state after %s locally", eventID)
|
||||
util.GetLogger(ctx).WithField("room_id", roomID).WithError(err).Warnf("failed to query state after %s locally, prev exists=%v", eventID, res.PrevEventsExist)
|
||||
return nil
|
||||
}
|
||||
stateEvents := make([]*gomatrixserverlib.HeaderedEvent, len(res.StateEvents))
|
||||
for i, ev := range res.StateEvents {
|
||||
t.haveEvents[ev.EventID()] = res.StateEvents[i]
|
||||
// set the event from the haveEvents cache - this means we will share pointers with other prev_event branches for this
|
||||
// processEvent request, which is better for memory.
|
||||
stateEvents[i] = t.cacheAndReturn(ev)
|
||||
}
|
||||
// we should never access res.StateEvents again so we delete it here to make GC faster
|
||||
res.StateEvents = nil
|
||||
|
||||
var authEvents []*gomatrixserverlib.Event
|
||||
missingAuthEvents := map[string]bool{}
|
||||
for _, ev := range res.StateEvents {
|
||||
for _, ev := range stateEvents {
|
||||
for _, ae := range ev.AuthEventIDs() {
|
||||
if aev, ok := t.haveEvents[ae]; ok {
|
||||
authEvents = append(authEvents, aev.Unwrap())
|
||||
|
|
@ -837,14 +859,13 @@ func (t *txnReq) lookupStateAfterEventLocally(ctx context.Context, roomID, event
|
|||
return nil
|
||||
}
|
||||
for i := range queryRes.Events {
|
||||
evID := queryRes.Events[i].EventID()
|
||||
t.haveEvents[evID] = queryRes.Events[i]
|
||||
authEvents = append(authEvents, queryRes.Events[i].Unwrap())
|
||||
authEvents = append(authEvents, t.cacheAndReturn(queryRes.Events[i]).Unwrap())
|
||||
}
|
||||
queryRes.Events = nil
|
||||
}
|
||||
|
||||
return &gomatrixserverlib.RespState{
|
||||
StateEvents: gomatrixserverlib.UnwrapEventHeaders(res.StateEvents),
|
||||
StateEvents: gomatrixserverlib.UnwrapEventHeaders(stateEvents),
|
||||
AuthEvents: authEvents,
|
||||
}
|
||||
}
|
||||
|
|
@ -854,8 +875,6 @@ func (t *txnReq) lookupStateAfterEventLocally(ctx context.Context, roomID, event
|
|||
func (t *txnReq) lookupStateBeforeEvent(ctx context.Context, roomVersion gomatrixserverlib.RoomVersion, roomID, eventID string) (
|
||||
*gomatrixserverlib.RespState, error) {
|
||||
|
||||
util.GetLogger(ctx).Infof("lookupStateBeforeEvent %s", eventID)
|
||||
|
||||
// Attempt to fetch the missing state using /state_ids and /events
|
||||
return t.lookupMissingStateViaStateIDs(ctx, roomID, eventID, roomVersion)
|
||||
}
|
||||
|
|
@ -986,7 +1005,6 @@ Event:
|
|||
}
|
||||
}
|
||||
|
||||
// we processed everything!
|
||||
return newEvents, nil
|
||||
}
|
||||
|
||||
|
|
@ -1005,7 +1023,7 @@ func (t *txnReq) lookupMissingStateViaState(ctx context.Context, roomID, eventID
|
|||
|
||||
func (t *txnReq) lookupMissingStateViaStateIDs(ctx context.Context, roomID, eventID string, roomVersion gomatrixserverlib.RoomVersion) (
|
||||
*gomatrixserverlib.RespState, error) {
|
||||
util.GetLogger(ctx).Infof("lookupMissingStateViaStateIDs %s", eventID)
|
||||
util.GetLogger(ctx).WithField("room_id", roomID).Infof("lookupMissingStateViaStateIDs %s", eventID)
|
||||
// fetch the state event IDs at the time of the event
|
||||
stateIDs, err := t.federation.LookupStateIDs(ctx, t.Origin, roomID, eventID)
|
||||
if err != nil {
|
||||
|
|
@ -1034,14 +1052,16 @@ func (t *txnReq) lookupMissingStateViaStateIDs(ctx context.Context, roomID, even
|
|||
}
|
||||
for i := range queryRes.Events {
|
||||
evID := queryRes.Events[i].EventID()
|
||||
t.haveEvents[evID] = queryRes.Events[i]
|
||||
t.cacheAndReturn(queryRes.Events[i])
|
||||
if missing[evID] {
|
||||
delete(missing, evID)
|
||||
}
|
||||
}
|
||||
queryRes.Events = nil // allow it to be GCed
|
||||
|
||||
concurrentRequests := 8
|
||||
missingCount := len(missing)
|
||||
util.GetLogger(ctx).WithField("room_id", roomID).WithField("event_id", eventID).Infof("lookupMissingStateViaStateIDs missing %d/%d events", missingCount, len(wantIDs))
|
||||
|
||||
// If over 50% of the auth/state events from /state_ids are missing
|
||||
// then we'll just call /state instead, otherwise we'll just end up
|
||||
|
|
@ -1106,7 +1126,7 @@ func (t *txnReq) lookupMissingStateViaStateIDs(ctx context.Context, roomID, even
|
|||
return
|
||||
}
|
||||
haveEventsMutex.Lock()
|
||||
t.haveEvents[h.EventID()] = h
|
||||
t.cacheAndReturn(h)
|
||||
haveEventsMutex.Unlock()
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ import (
|
|||
"time"
|
||||
|
||||
eduAPI "github.com/matrix-org/dendrite/eduserver/api"
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/internal/test"
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
|
|
@ -370,6 +371,7 @@ func mustCreateTransaction(rsAPI api.RoomserverInternalAPI, fedClient txnFederat
|
|||
federation: fedClient,
|
||||
haveEvents: make(map[string]*gomatrixserverlib.HeaderedEvent),
|
||||
newEvents: make(map[string]bool),
|
||||
roomsMu: internal.NewMutexByRoom(),
|
||||
}
|
||||
t.PDUs = pdus
|
||||
t.Origin = testOrigin
|
||||
|
|
|
|||
|
|
@ -43,6 +43,7 @@ func NewInternalAPI(
|
|||
federation *gomatrixserverlib.FederationClient,
|
||||
rsAPI roomserverAPI.RoomserverInternalAPI,
|
||||
keyRing *gomatrixserverlib.KeyRing,
|
||||
resetBlacklist bool,
|
||||
) api.FederationSenderInternalAPI {
|
||||
cfg := &base.Cfg.FederationSender
|
||||
|
||||
|
|
@ -51,6 +52,10 @@ func NewInternalAPI(
|
|||
logrus.WithError(err).Panic("failed to connect to federation sender db")
|
||||
}
|
||||
|
||||
if resetBlacklist {
|
||||
_ = federationSenderDB.RemoveAllServersFromBlacklist()
|
||||
}
|
||||
|
||||
stats := &statistics.Statistics{
|
||||
DB: federationSenderDB,
|
||||
FailuresUntilBlacklist: cfg.FederationMaxRetries,
|
||||
|
|
|
|||
|
|
@ -572,6 +572,7 @@ func (r *FederationSenderInternalAPI) PerformServersAlive(
|
|||
response *api.PerformServersAliveResponse,
|
||||
) (err error) {
|
||||
for _, srv := range request.Servers {
|
||||
_ = r.db.RemoveServerFromBlacklist(srv)
|
||||
r.queues.RetryServer(srv)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -54,6 +54,7 @@ type Database interface {
|
|||
// these don't have contexts passed in as we want things to happen regardless of the request context
|
||||
AddServerToBlacklist(serverName gomatrixserverlib.ServerName) error
|
||||
RemoveServerFromBlacklist(serverName gomatrixserverlib.ServerName) error
|
||||
RemoveAllServersFromBlacklist() error
|
||||
IsServerBlacklisted(serverName gomatrixserverlib.ServerName) (bool, error)
|
||||
|
||||
AddOutboundPeek(ctx context.Context, serverName gomatrixserverlib.ServerName, roomID, peekID string, renewalInterval int64) error
|
||||
|
|
|
|||
|
|
@ -40,11 +40,15 @@ const selectBlacklistSQL = "" +
|
|||
const deleteBlacklistSQL = "" +
|
||||
"DELETE FROM federationsender_blacklist WHERE server_name = $1"
|
||||
|
||||
const deleteAllBlacklistSQL = "" +
|
||||
"TRUNCATE federationsender_blacklist"
|
||||
|
||||
type blacklistStatements struct {
|
||||
db *sql.DB
|
||||
insertBlacklistStmt *sql.Stmt
|
||||
selectBlacklistStmt *sql.Stmt
|
||||
deleteBlacklistStmt *sql.Stmt
|
||||
db *sql.DB
|
||||
insertBlacklistStmt *sql.Stmt
|
||||
selectBlacklistStmt *sql.Stmt
|
||||
deleteBlacklistStmt *sql.Stmt
|
||||
deleteAllBlacklistStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewPostgresBlacklistTable(db *sql.DB) (s *blacklistStatements, err error) {
|
||||
|
|
@ -65,11 +69,12 @@ func NewPostgresBlacklistTable(db *sql.DB) (s *blacklistStatements, err error) {
|
|||
if s.deleteBlacklistStmt, err = db.Prepare(deleteBlacklistSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.deleteAllBlacklistStmt, err = db.Prepare(deleteAllBlacklistSQL); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// insertRoom inserts the room if it didn't already exist.
|
||||
// If the room didn't exist then last_event_id is set to the empty string.
|
||||
func (s *blacklistStatements) InsertBlacklist(
|
||||
ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName,
|
||||
) error {
|
||||
|
|
@ -78,9 +83,6 @@ func (s *blacklistStatements) InsertBlacklist(
|
|||
return err
|
||||
}
|
||||
|
||||
// selectRoomForUpdate locks the row for the room and returns the last_event_id.
|
||||
// The row must already exist in the table. Callers can ensure that the row
|
||||
// exists by calling insertRoom first.
|
||||
func (s *blacklistStatements) SelectBlacklist(
|
||||
ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName,
|
||||
) (bool, error) {
|
||||
|
|
@ -96,8 +98,6 @@ func (s *blacklistStatements) SelectBlacklist(
|
|||
return res.Next(), nil
|
||||
}
|
||||
|
||||
// updateRoom updates the last_event_id for the room. selectRoomForUpdate should
|
||||
// have already been called earlier within the transaction.
|
||||
func (s *blacklistStatements) DeleteBlacklist(
|
||||
ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName,
|
||||
) error {
|
||||
|
|
@ -105,3 +105,11 @@ func (s *blacklistStatements) DeleteBlacklist(
|
|||
_, err := stmt.ExecContext(ctx, serverName)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *blacklistStatements) DeleteAllBlacklist(
|
||||
ctx context.Context, txn *sql.Tx,
|
||||
) error {
|
||||
stmt := sqlutil.TxStmt(txn, s.deleteAllBlacklistStmt)
|
||||
_, err := stmt.ExecContext(ctx)
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ CREATE INDEX IF NOT EXISTS federatonsender_joined_hosts_room_id_idx
|
|||
|
||||
const insertJoinedHostsSQL = "" +
|
||||
"INSERT INTO federationsender_joined_hosts (room_id, event_id, server_name)" +
|
||||
" VALUES ($1, $2, $3)"
|
||||
" VALUES ($1, $2, $3) ON CONFLICT DO NOTHING"
|
||||
|
||||
const deleteJoinedHostsSQL = "" +
|
||||
"DELETE FROM federationsender_joined_hosts WHERE event_id = ANY($1)"
|
||||
|
|
|
|||
|
|
@ -148,6 +148,12 @@ func (d *Database) RemoveServerFromBlacklist(serverName gomatrixserverlib.Server
|
|||
})
|
||||
}
|
||||
|
||||
func (d *Database) RemoveAllServersFromBlacklist() error {
|
||||
return d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
||||
return d.FederationSenderBlacklist.DeleteAllBlacklist(context.TODO(), txn)
|
||||
})
|
||||
}
|
||||
|
||||
func (d *Database) IsServerBlacklisted(serverName gomatrixserverlib.ServerName) (bool, error) {
|
||||
return d.FederationSenderBlacklist.SelectBlacklist(context.TODO(), nil, serverName)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -40,11 +40,15 @@ const selectBlacklistSQL = "" +
|
|||
const deleteBlacklistSQL = "" +
|
||||
"DELETE FROM federationsender_blacklist WHERE server_name = $1"
|
||||
|
||||
const deleteAllBlacklistSQL = "" +
|
||||
"DELETE FROM federationsender_blacklist"
|
||||
|
||||
type blacklistStatements struct {
|
||||
db *sql.DB
|
||||
insertBlacklistStmt *sql.Stmt
|
||||
selectBlacklistStmt *sql.Stmt
|
||||
deleteBlacklistStmt *sql.Stmt
|
||||
db *sql.DB
|
||||
insertBlacklistStmt *sql.Stmt
|
||||
selectBlacklistStmt *sql.Stmt
|
||||
deleteBlacklistStmt *sql.Stmt
|
||||
deleteAllBlacklistStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewSQLiteBlacklistTable(db *sql.DB) (s *blacklistStatements, err error) {
|
||||
|
|
@ -65,11 +69,12 @@ func NewSQLiteBlacklistTable(db *sql.DB) (s *blacklistStatements, err error) {
|
|||
if s.deleteBlacklistStmt, err = db.Prepare(deleteBlacklistSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.deleteAllBlacklistStmt, err = db.Prepare(deleteAllBlacklistSQL); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// insertRoom inserts the room if it didn't already exist.
|
||||
// If the room didn't exist then last_event_id is set to the empty string.
|
||||
func (s *blacklistStatements) InsertBlacklist(
|
||||
ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName,
|
||||
) error {
|
||||
|
|
@ -78,9 +83,6 @@ func (s *blacklistStatements) InsertBlacklist(
|
|||
return err
|
||||
}
|
||||
|
||||
// selectRoomForUpdate locks the row for the room and returns the last_event_id.
|
||||
// The row must already exist in the table. Callers can ensure that the row
|
||||
// exists by calling insertRoom first.
|
||||
func (s *blacklistStatements) SelectBlacklist(
|
||||
ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName,
|
||||
) (bool, error) {
|
||||
|
|
@ -96,8 +98,6 @@ func (s *blacklistStatements) SelectBlacklist(
|
|||
return res.Next(), nil
|
||||
}
|
||||
|
||||
// updateRoom updates the last_event_id for the room. selectRoomForUpdate should
|
||||
// have already been called earlier within the transaction.
|
||||
func (s *blacklistStatements) DeleteBlacklist(
|
||||
ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName,
|
||||
) error {
|
||||
|
|
@ -105,3 +105,11 @@ func (s *blacklistStatements) DeleteBlacklist(
|
|||
_, err := stmt.ExecContext(ctx, serverName)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *blacklistStatements) DeleteAllBlacklist(
|
||||
ctx context.Context, txn *sql.Tx,
|
||||
) error {
|
||||
stmt := sqlutil.TxStmt(txn, s.deleteAllBlacklistStmt)
|
||||
_, err := stmt.ExecContext(ctx)
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ CREATE INDEX IF NOT EXISTS federatonsender_joined_hosts_room_id_idx
|
|||
`
|
||||
|
||||
const insertJoinedHostsSQL = "" +
|
||||
"INSERT INTO federationsender_joined_hosts (room_id, event_id, server_name)" +
|
||||
"INSERT OR IGNORE INTO federationsender_joined_hosts (room_id, event_id, server_name)" +
|
||||
" VALUES ($1, $2, $3)"
|
||||
|
||||
const deleteJoinedHostsSQL = "" +
|
||||
|
|
|
|||
|
|
@ -60,6 +60,7 @@ type FederationSenderBlacklist interface {
|
|||
InsertBlacklist(ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName) error
|
||||
SelectBlacklist(ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName) (bool, error)
|
||||
DeleteBlacklist(ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName) error
|
||||
DeleteAllBlacklist(ctx context.Context, txn *sql.Tx) error
|
||||
}
|
||||
|
||||
type FederationSenderOutboundPeeks interface {
|
||||
|
|
|
|||
11
go.mod
11
go.mod
|
|
@ -7,6 +7,7 @@ require (
|
|||
github.com/getsentry/sentry-go v0.10.0
|
||||
github.com/gologme/log v1.2.0
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
github.com/hashicorp/golang-lru v0.5.4
|
||||
github.com/lib/pq v1.9.0
|
||||
github.com/libp2p/go-libp2p v0.13.0
|
||||
|
|
@ -17,15 +18,16 @@ require (
|
|||
github.com/libp2p/go-libp2p-kad-dht v0.11.1
|
||||
github.com/libp2p/go-libp2p-pubsub v0.4.1
|
||||
github.com/libp2p/go-libp2p-record v0.1.3
|
||||
github.com/lucas-clemente/quic-go v0.17.3
|
||||
github.com/lucas-clemente/quic-go v0.19.3
|
||||
github.com/matrix-org/dugong v0.0.0-20180820122854-51a565b5666b
|
||||
github.com/matrix-org/go-http-js-libp2p v0.0.0-20200518170932-783164aeeda4
|
||||
github.com/matrix-org/go-sqlite3-js v0.0.0-20200522092705-bc8506ccbcf3
|
||||
github.com/matrix-org/gomatrix v0.0.0-20200827122206-7dd5e2a05bcd
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20210302161955-6142fe3f8c2c
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20210525110027-8cb7699aa64a
|
||||
github.com/matrix-org/naffka v0.0.0-20201009174903-d26a3b9cb161
|
||||
github.com/matrix-org/pinecone v0.0.0-20210602111459-5cb0e6aa1a6a
|
||||
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4
|
||||
github.com/mattn/go-sqlite3 v1.14.6
|
||||
github.com/mattn/go-sqlite3 v1.14.7-0.20210414154423-1157a4212dcb
|
||||
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646
|
||||
github.com/ngrok/sqlmw v0.0.0-20200129213757-d5c93a81bec6
|
||||
github.com/opentracing/opentracing-go v1.2.0
|
||||
|
|
@ -40,9 +42,10 @@ require (
|
|||
github.com/yggdrasil-network/yggdrasil-go v0.3.15-0.20210218094457-e77ca8019daa
|
||||
go.uber.org/atomic v1.7.0
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83
|
||||
golang.org/x/mobile v0.0.0-20210220033013-bdb1ca9a1e08
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110
|
||||
gopkg.in/h2non/bimg.v1 v1.1.5
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
go 1.13
|
||||
go 1.14
|
||||
|
|
|
|||
111
go.sum
111
go.sum
|
|
@ -13,6 +13,7 @@ github.com/Arceliar/phony v0.0.0-20191006174943-d0c68492aca0 h1:p3puK8Sl2xK+2Fnn
|
|||
github.com/Arceliar/phony v0.0.0-20191006174943-d0c68492aca0/go.mod h1:6Lkn+/zJilRMsKmbmG1RPoamiArC6HS73xbwRyp3UyI=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno=
|
||||
github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
|
||||
|
|
@ -24,6 +25,9 @@ github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go
|
|||
github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc=
|
||||
github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w=
|
||||
github.com/RyanCarrier/dijkstra v1.0.0/go.mod h1:5agGUBNEtUAGIANmbw09fuO3a2htPEkc1jNH01qxCWA=
|
||||
github.com/RyanCarrier/dijkstra-1 v0.0.0-20170512020943-0e5801a26345/go.mod h1:OK4EvWJ441LQqGzed5NGB6vKBAE34n3z7iayPcEwr30=
|
||||
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/sarama v1.26.1/go.mod h1:NbSGBSSndYaIhRcBtY9V0U7AyH+x71bG668AuWys/yU=
|
||||
|
|
@ -36,11 +40,27 @@ github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/
|
|||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
||||
github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
|
||||
github.com/albertorestifo/dijkstra v0.0.0-20160910063646-aba76f725f72/go.mod h1:o+JdB7VetTHjLhU0N57x18B9voDBQe0paApdEAEoEfw=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c=
|
||||
github.com/anacrolix/envpprof v1.0.0/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c=
|
||||
github.com/anacrolix/envpprof v1.1.1 h1:sHQCyj7HtiSfaZAzL2rJrQdyS7odLqlwO6nhk/tG/j8=
|
||||
github.com/anacrolix/envpprof v1.1.1/go.mod h1:My7T5oSqVfEn4MD4Meczkw/f5lSIndGAKu/0SM/rkf4=
|
||||
github.com/anacrolix/log v0.3.0 h1:Btxh7GkT4JYWvWJ1uKOwgobf+7q/1eFQaDdCUXCtssw=
|
||||
github.com/anacrolix/log v0.3.0/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU=
|
||||
github.com/anacrolix/missinggo v1.1.2-0.20190815015349-b888af804467/go.mod h1:MBJu3Sk/k3ZfGYcS7z18gwfu72Ey/xopPFJJbTi5yIo=
|
||||
github.com/anacrolix/missinggo v1.2.1 h1:0IE3TqX5y5D0IxeMwTyIgqdDew4QrzcXaaEnJQyjHvw=
|
||||
github.com/anacrolix/missinggo v1.2.1/go.mod h1:J5cMhif8jPmFoC3+Uvob3OXXNIhOUikzMt+uUjeM21Y=
|
||||
github.com/anacrolix/missinggo/perf v1.0.0 h1:7ZOGYziGEBytW49+KmYGTaNfnwUqP1HBsy6BqESAJVw=
|
||||
github.com/anacrolix/missinggo/perf v1.0.0/go.mod h1:ljAFWkBuzkO12MQclXzZrosP5urunoLS0Cbvb4V0uMQ=
|
||||
github.com/anacrolix/sync v0.2.0 h1:oRe22/ZB+v7v/5Mbc4d2zE0AXEZy0trKyKLjqYOt6tY=
|
||||
github.com/anacrolix/sync v0.2.0/go.mod h1:BbecHL6jDSExojhNtgTFSBcdGerzNc64tz3DCOj/I0g=
|
||||
github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw=
|
||||
github.com/anacrolix/utp v0.1.0/go.mod h1:MDwc+vsGEq7RMw6lr2GKOEqjWny5hO5OZXRVNaBJ2Dk=
|
||||
github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
|
|
@ -63,6 +83,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
|||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
|
||||
github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
|
||||
github.com/bradfitz/iter v0.0.0-20190303215204-33e6a9893b0c/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
|
||||
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 h1:GKTyiRCL6zVf5wWaqKnf+7Qs6GbEPfd4iMOitWzXJx8=
|
||||
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8/go.mod h1:spo1JLcs67NmW1aVLEgtA8Yy1elc+X8y5SRW1sFW4Og=
|
||||
github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8=
|
||||
github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI=
|
||||
github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI=
|
||||
|
|
@ -119,7 +143,9 @@ github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70d
|
|||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q=
|
||||
|
|
@ -144,7 +170,6 @@ github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6 h1:u/UEqS66A5ckRmS4yNp
|
|||
github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ=
|
||||
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
|
||||
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
|
||||
github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
|
||||
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
|
||||
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
|
||||
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
|
||||
|
|
@ -152,8 +177,9 @@ github.com/frankban/quicktest v1.0.0/go.mod h1:R98jIehRai+d1/3Hv2//jOVCTJhW1VBav
|
|||
github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o=
|
||||
github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY=
|
||||
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc=
|
||||
github.com/getsentry/sentry-go v0.10.0 h1:6gwY+66NHKqyZrdi6O2jGdo7wGdo9b3B69E01NFgT5g=
|
||||
github.com/getsentry/sentry-go v0.10.0/go.mod h1:kELm/9iCblqUYh+ZRML7PNdCvEuw24wBvJPYyi86cws=
|
||||
|
|
@ -161,6 +187,8 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME
|
|||
github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
|
||||
github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM=
|
||||
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
|
||||
github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
||||
github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
|
||||
github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w=
|
||||
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||
|
|
@ -185,13 +213,15 @@ github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
|||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 h1:uHTyIjqVhYRhLbJ8nIiOJHkEZZ+5YoOsAbD3sk82NiE=
|
||||
github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.4.0 h1:Rd1kQnQu0Hq3qvJppYSG0HtP+f5LPPUiDswTLiEegLg=
|
||||
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc=
|
||||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
|
|
@ -213,6 +243,7 @@ github.com/gologme/log v0.0.0-20181207131047-4e5d8ccb38e8/go.mod h1:gq31gQ8wEHkR
|
|||
github.com/gologme/log v1.2.0 h1:Ya5Ip/KD6FX7uH0S31QO87nCCSucKtF44TLbTtO7V4c=
|
||||
github.com/gologme/log v1.2.0/go.mod h1:gq31gQ8wEHkR+WekdWsqDuf8pXTUZA9BnnzTuPz1Y9U=
|
||||
github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
|
||||
github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
|
|
@ -239,6 +270,7 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
|
|||
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
|
|
@ -291,6 +323,9 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p
|
|||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hjson/hjson-go v3.0.2-0.20200316202735-d5d0e8b0617d+incompatible/go.mod h1:qsetwF8NlsTsOTwZTApNlTCerV+b2GjYRRcIk4JMFio=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo=
|
||||
github.com/huandu/xstrings v1.2.0 h1:yPeWdRnmynF7p+lLYz0H2tthW9lqhMJrQV/U7yy4wX0=
|
||||
github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4=
|
||||
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
|
||||
github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo=
|
||||
github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc=
|
||||
|
|
@ -385,6 +420,7 @@ github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u
|
|||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
|
|
@ -640,8 +676,8 @@ github.com/libp2p/go-yamux/v2 v2.0.0 h1:vSGhAy5u6iHBq11ZDcyHH4Blcf9xlBhT4WQDoOE9
|
|||
github.com/libp2p/go-yamux/v2 v2.0.0/go.mod h1:NVWira5+sVUIU6tu1JWvaRn1dRnG+cawOJiflsAM+7U=
|
||||
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
||||
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
|
||||
github.com/lucas-clemente/quic-go v0.17.3 h1:jMX/MmDNCljfisgMmPGUcBJ+zUh9w3d3ia4YJjYS3TM=
|
||||
github.com/lucas-clemente/quic-go v0.17.3/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE=
|
||||
github.com/lucas-clemente/quic-go v0.19.3 h1:eCDQqvGBB+kCTkA0XrAFtNe81FMa0/fn4QSoeAbmiF4=
|
||||
github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8=
|
||||
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
||||
github.com/lxn/walk v0.0.0-20210112085537-c389da54e794/go.mod h1:E23UucZGqpuUANJooIbHWCufXvOcT6E7Stq81gU+CSQ=
|
||||
github.com/lxn/win v0.0.0-20201111105847-2a20daff6a55/go.mod h1:KxxjdtRkfNoYDCUP5ryK7XJJNTnpC8atvtmTheChOtk=
|
||||
|
|
@ -652,9 +688,11 @@ github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP
|
|||
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/marten-seemann/qpack v0.1.0/go.mod h1:LFt1NU/Ptjip0C2CPkhimBz5CGE3WGDAUWqna+CNTrI=
|
||||
github.com/marten-seemann/qtls v0.9.1 h1:O0YKQxNVPaiFgMng0suWEOY2Sb4LT2sRn9Qimq3Z1IQ=
|
||||
github.com/marten-seemann/qtls v0.9.1/go.mod h1:T1MmAdDPyISzxlK6kjRr0pcZFBVd1OZbBb/j3cvzHhk=
|
||||
github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc=
|
||||
github.com/marten-seemann/qtls v0.10.0 h1:ECsuYUKalRL240rRD4Ri33ISb7kAQ3qGDlrrl55b2pc=
|
||||
github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs=
|
||||
github.com/marten-seemann/qtls-go1-15 v0.1.1 h1:LIH6K34bPVttyXnUWixk0bzH6/N07VxbSabxn5A5gZQ=
|
||||
github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I=
|
||||
github.com/matrix-org/dugong v0.0.0-20180820122854-51a565b5666b h1:xpcmnpfUImRC4O2SAS/dmTcJENDXvGmLUzey76V1R3Q=
|
||||
github.com/matrix-org/dugong v0.0.0-20180820122854-51a565b5666b/go.mod h1:NgPCr+UavRGH6n5jmdX8DuqFZ4JiCWIJoZiuhTRLSUg=
|
||||
github.com/matrix-org/go-http-js-libp2p v0.0.0-20200518170932-783164aeeda4 h1:eqE5OnGx9ZMWmrRbD3KF/3KtTunw0iQulI7YxOIdxo4=
|
||||
|
|
@ -664,10 +702,12 @@ github.com/matrix-org/go-sqlite3-js v0.0.0-20200522092705-bc8506ccbcf3/go.mod h1
|
|||
github.com/matrix-org/gomatrix v0.0.0-20190528120928-7df988a63f26/go.mod h1:3fxX6gUjWyI/2Bt7J1OLhpCzOfO/bB3AiX0cJtEKud0=
|
||||
github.com/matrix-org/gomatrix v0.0.0-20200827122206-7dd5e2a05bcd h1:xVrqJK3xHREMNjwjljkAUaadalWc0rRbmVuQatzmgwg=
|
||||
github.com/matrix-org/gomatrix v0.0.0-20200827122206-7dd5e2a05bcd/go.mod h1:/gBX06Kw0exX1HrwmoBibFA98yBk/jxKpGVeyQbff+s=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20210302161955-6142fe3f8c2c h1:vW3jBp1PnZ4uUqyS+JatJRYIl73ZLHkUkRzf1JdGxOI=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20210302161955-6142fe3f8c2c/go.mod h1:JsAzE1Ll3+gDWS9JSUHPJiiyAksvOOnGWF2nXdg4ZzU=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20210525110027-8cb7699aa64a h1:pVhOeJpD0gv5boUnihefPDuYkQ6xSdEVbH5ld5Vvve0=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20210525110027-8cb7699aa64a/go.mod h1:JsAzE1Ll3+gDWS9JSUHPJiiyAksvOOnGWF2nXdg4ZzU=
|
||||
github.com/matrix-org/naffka v0.0.0-20201009174903-d26a3b9cb161 h1:h1XVh05pLoC+nJjP3GIpj5wUsuC8WdHP3He0RTkRJTs=
|
||||
github.com/matrix-org/naffka v0.0.0-20201009174903-d26a3b9cb161/go.mod h1:sjyPyRxKM5uw1nD2cJ6O2OxI6GOqyVBfNXqKjBZTBZE=
|
||||
github.com/matrix-org/pinecone v0.0.0-20210602111459-5cb0e6aa1a6a h1:BE/cfpyHO2ua1BK4Tibr+2oZCV3H1mC9G7g7Yvl1AmM=
|
||||
github.com/matrix-org/pinecone v0.0.0-20210602111459-5cb0e6aa1a6a/go.mod h1:UQzJS6UVyVwfkr+RLrdvBB1vLyECqe3fLYNcbRxv8SA=
|
||||
github.com/matrix-org/util v0.0.0-20190711121626-527ce5ddefc7/go.mod h1:vVQlW/emklohkZnOPwD3LrZUBqdfsbiyO3p1lNV8F6U=
|
||||
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4 h1:eCEHXWDv9Rm335MSuB49mFUK44bwZPFSDde3ORE3syk=
|
||||
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4/go.mod h1:vVQlW/emklohkZnOPwD3LrZUBqdfsbiyO3p1lNV8F6U=
|
||||
|
|
@ -684,9 +724,10 @@ github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcME
|
|||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-sqlite3 v1.14.2/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus=
|
||||
github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg=
|
||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/mattn/go-sqlite3 v1.14.7-0.20210414154423-1157a4212dcb h1:ax2vG2unlxsjwS7PMRo4FECIfAdQLowd6ejWYwPQhBo=
|
||||
github.com/mattn/go-sqlite3 v1.14.7-0.20210414154423-1157a4212dcb/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
|
||||
github.com/mattomatic/dijkstra v0.0.0-20130617153013-6f6d134eb237/go.mod h1:UOnLAUmVG5paym8pD3C4B9BQylUDC2vXFJJpT7JrlEA=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8=
|
||||
|
|
@ -725,6 +766,7 @@ github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjW
|
|||
github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
|
||||
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
|
||||
github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI=
|
||||
github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA=
|
||||
github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4=
|
||||
|
|
@ -789,6 +831,8 @@ github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy
|
|||
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms=
|
||||
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
|
||||
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
|
||||
github.com/neilalexander/utp v0.1.1-0.20210510143443-c1bac1cd577f h1:xcJVva0Ziw+Ud4AaY/g9OMNc7veEfsYVox3eItY2w8Q=
|
||||
github.com/neilalexander/utp v0.1.1-0.20210510143443-c1bac1cd577f/go.mod h1:ylsx0342RjGHjOoVKhR/wz/7Lhiusonihfj4QLxEMcU=
|
||||
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ=
|
||||
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8=
|
||||
github.com/ngrok/sqlmw v0.0.0-20200129213757-d5c93a81bec6 h1:evlcQnJY+v8XRRchV3hXzpHDl6GcEZeLXAhlH9Csdww=
|
||||
|
|
@ -803,16 +847,16 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
|
|||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
|
||||
github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
|
||||
github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg=
|
||||
github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
|
||||
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
|
||||
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
|
||||
|
|
@ -830,6 +874,7 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI
|
|||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
|
||||
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
|
||||
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
|
|
@ -887,6 +932,7 @@ github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR
|
|||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
|
||||
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
||||
github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
|
|
@ -920,6 +966,7 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf
|
|||
github.com/sirupsen/logrus v1.8.0 h1:nfhvjKcUMhBMVqbKHJlk5RPrrfYr/NMo3692g0dwfWU=
|
||||
github.com/sirupsen/logrus v1.8.0/go.mod h1:4GuYW9TZmE769R5STWrRakJc4UqQ3+QQ95fyz7ENv1A=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
|
|
@ -947,6 +994,7 @@ github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5J
|
|||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
|
|
@ -969,6 +1017,7 @@ github.com/tidwall/pretty v1.0.2/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhV
|
|||
github.com/tidwall/sjson v1.0.3/go.mod h1:bURseu1nuBkFpIES5cz6zBtjmYeOQmEESshn7VpF15Y=
|
||||
github.com/tidwall/sjson v1.1.5 h1:wsUceI/XDyZk3J1FUvuuYlK62zJv2HO2Pzb8A5EWdUE=
|
||||
github.com/tidwall/sjson v1.1.5/go.mod h1:VuJzsZnTowhSxWdOgsAnb886i4AjEyTkk7tNtsL7EYE=
|
||||
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U=
|
||||
github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
|
||||
|
|
@ -988,7 +1037,9 @@ github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV
|
|||
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
|
||||
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
|
||||
github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
|
||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||
github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k=
|
||||
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc=
|
||||
github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM=
|
||||
|
|
@ -1000,6 +1051,7 @@ github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 h1:
|
|||
github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI=
|
||||
github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow=
|
||||
github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg=
|
||||
github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||
github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE=
|
||||
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
|
||||
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
|
||||
|
|
@ -1074,6 +1126,9 @@ golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWP
|
|||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g=
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
|
|
@ -1081,8 +1136,13 @@ golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTk
|
|||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20210220033013-bdb1ca9a1e08 h1:h+GZ3ubjuWaQjGe8owMGcmMVCqs0xYJtRG5y2bpHaqU=
|
||||
golang.org/x/mobile v0.0.0-20210220033013-bdb1ca9a1e08/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
|
@ -1098,7 +1158,6 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r
|
|||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190228165749-92fc7df08ae7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
|
|
@ -1115,7 +1174,9 @@ golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLL
|
|||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw=
|
||||
|
|
@ -1153,12 +1214,14 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190526052359-791d8a0f4d09/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
|
@ -1168,14 +1231,16 @@ golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201018230417-eeed37f84f13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210105210732-16f7687f5001/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210309040221-94ec62e08169 h1:fpeMGRM6A+XFcw4RPCO8s8hH7ppgrGR22pSIjwM7YUI=
|
||||
golang.org/x/sys v0.0.0-20210309040221-94ec62e08169/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
|
@ -1198,6 +1263,7 @@ golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGm
|
|||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
|
|
@ -1207,9 +1273,11 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw
|
|||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114 h1:DnSr2mCsxyCE6ZgIkmcWUQY2R5cH/6wL7eIxEmQOMSE=
|
||||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69 h1:yBHHx+XZqXJBm6Exke3N7V9gnlsyXxoCPEb1yVenjfk=
|
||||
golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
|
@ -1217,6 +1285,7 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1N
|
|||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.zx2c4.com/wireguard v0.0.0-20210203165646-9c7bd73be2cc/go.mod h1:r0ExowOoGFfDoLDxx+M9SYbNVsoZ0xviLL+K4f2mt+A=
|
||||
golang.zx2c4.com/wireguard v0.0.0-20210212170059-7a0fb5bbb172/go.mod h1:r0ExowOoGFfDoLDxx+M9SYbNVsoZ0xviLL+K4f2mt+A=
|
||||
golang.zx2c4.com/wireguard v0.0.0-20210510202332-9844c74f67ec/go.mod h1:a057zjmoc00UN7gVkaJt2sXVK523kMJcogDTEvPIasg=
|
||||
golang.zx2c4.com/wireguard/windows v0.3.5/go.mod h1:ATrIFNoq3rsK735WJiQzfWYyNFc9xLBhMMjW9DWIvnU=
|
||||
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
|
|
@ -1263,6 +1332,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
|
|||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
|
|
@ -1302,6 +1372,7 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
|||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
|
||||
|
|
|
|||
38
internal/mutex.go
Normal file
38
internal/mutex.go
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
package internal
|
||||
|
||||
import "sync"
|
||||
|
||||
type MutexByRoom struct {
|
||||
mu *sync.Mutex // protects the map
|
||||
roomToMu map[string]*sync.Mutex
|
||||
}
|
||||
|
||||
func NewMutexByRoom() *MutexByRoom {
|
||||
return &MutexByRoom{
|
||||
mu: &sync.Mutex{},
|
||||
roomToMu: make(map[string]*sync.Mutex),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MutexByRoom) Lock(roomID string) {
|
||||
m.mu.Lock()
|
||||
roomMu := m.roomToMu[roomID]
|
||||
if roomMu == nil {
|
||||
roomMu = &sync.Mutex{}
|
||||
}
|
||||
m.roomToMu[roomID] = roomMu
|
||||
m.mu.Unlock()
|
||||
// don't lock inside m.mu else we can deadlock
|
||||
roomMu.Lock()
|
||||
}
|
||||
|
||||
func (m *MutexByRoom) Unlock(roomID string) {
|
||||
m.mu.Lock()
|
||||
roomMu := m.roomToMu[roomID]
|
||||
if roomMu == nil {
|
||||
panic("MutexByRoom: Unlock before Lock")
|
||||
}
|
||||
m.mu.Unlock()
|
||||
|
||||
roomMu.Unlock()
|
||||
}
|
||||
|
|
@ -72,10 +72,8 @@ func init() {
|
|||
// we guarantee we will get around to it. Also, more users on a given server does not increase the number of requests
|
||||
// (as /keys/query allows multiple users to be specified) so being stuck behind matrix.org won't materially be any worse
|
||||
// than being stuck behind foo.bar
|
||||
// In the event that the query fails, the worker spins up a short-lived goroutine whose sole purpose is to inject the server
|
||||
// name back into the channel after a certain amount of time. If in the interim the device lists have been updated, then
|
||||
// the database query will return no stale lists. Reinjection into the channel continues until success or the server terminates,
|
||||
// when it will be reloaded on startup.
|
||||
// In the event that the query fails, a lock is acquired and the server name along with the time to wait before retrying is
|
||||
// set in a map. A restarter goroutine periodically probes this map and injects servers which are ready to be retried.
|
||||
type DeviceListUpdater struct {
|
||||
// A map from user_id to a mutex. Used when we are missing prev IDs so we don't make more than 1
|
||||
// request to the remote server and race.
|
||||
|
|
@ -297,42 +295,38 @@ func (u *DeviceListUpdater) clearChannel(userID string) {
|
|||
}
|
||||
|
||||
func (u *DeviceListUpdater) worker(ch chan gomatrixserverlib.ServerName) {
|
||||
// It's possible to get many of the same server name in the channel, so in order
|
||||
// to prevent processing the same server over and over we keep track of when we
|
||||
// last made a request to the server. If we get the server name during the cooloff
|
||||
// period, we'll ignore the poke.
|
||||
lastProcessed := make(map[gomatrixserverlib.ServerName]time.Time)
|
||||
// this can't be too long else sytest will give up trying to do a test
|
||||
cooloffPeriod := 500 * time.Millisecond
|
||||
shouldProcess := func(srv gomatrixserverlib.ServerName) bool {
|
||||
// we should process requests when now is after the last process time + cooloff
|
||||
return time.Now().After(lastProcessed[srv].Add(cooloffPeriod))
|
||||
}
|
||||
|
||||
// on failure, spin up a short-lived goroutine to inject the server name again.
|
||||
scheduledRetries := make(map[gomatrixserverlib.ServerName]time.Time)
|
||||
inject := func(srv gomatrixserverlib.ServerName, duration time.Duration) {
|
||||
time.Sleep(duration)
|
||||
ch <- srv
|
||||
}
|
||||
|
||||
for serverName := range ch {
|
||||
if !shouldProcess(serverName) {
|
||||
if time.Now().Before(scheduledRetries[serverName]) {
|
||||
// do not inject into the channel as we know there will be a sleeping goroutine
|
||||
// which will do it after the cooloff period expires
|
||||
continue
|
||||
} else {
|
||||
scheduledRetries[serverName] = time.Now().Add(cooloffPeriod)
|
||||
go inject(serverName, cooloffPeriod)
|
||||
continue
|
||||
retries := make(map[gomatrixserverlib.ServerName]time.Time)
|
||||
retriesMu := &sync.Mutex{}
|
||||
// restarter goroutine which will inject failed servers into ch when it is time
|
||||
go func() {
|
||||
for {
|
||||
var serversToRetry []gomatrixserverlib.ServerName
|
||||
time.Sleep(time.Second)
|
||||
retriesMu.Lock()
|
||||
now := time.Now()
|
||||
for srv, retryAt := range retries {
|
||||
if now.After(retryAt) {
|
||||
serversToRetry = append(serversToRetry, srv)
|
||||
}
|
||||
}
|
||||
for _, srv := range serversToRetry {
|
||||
delete(retries, srv)
|
||||
}
|
||||
retriesMu.Unlock()
|
||||
for _, srv := range serversToRetry {
|
||||
ch <- srv
|
||||
}
|
||||
}
|
||||
lastProcessed[serverName] = time.Now()
|
||||
}()
|
||||
for serverName := range ch {
|
||||
waitTime, shouldRetry := u.processServer(serverName)
|
||||
if shouldRetry {
|
||||
scheduledRetries[serverName] = time.Now().Add(waitTime)
|
||||
go inject(serverName, waitTime)
|
||||
retriesMu.Lock()
|
||||
_, exists := retries[serverName]
|
||||
if !exists {
|
||||
retries[serverName] = time.Now().Add(waitTime)
|
||||
}
|
||||
retriesMu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -380,7 +374,7 @@ func (u *DeviceListUpdater) processServer(serverName gomatrixserverlib.ServerNam
|
|||
}
|
||||
}
|
||||
if failCount > 0 {
|
||||
logger.WithField("total", len(userIDs)).WithField("failed", failCount).Error("failed to query device keys for some users")
|
||||
logger.WithField("total", len(userIDs)).WithField("failed", failCount).WithField("wait", waitTime).Error("failed to query device keys for some users")
|
||||
}
|
||||
for _, userID := range userIDs {
|
||||
// always clear the channel to unblock Update calls regardless of success/failure
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ import (
|
|||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/lib/pq"
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
"github.com/matrix-org/dendrite/keyserver/api"
|
||||
|
|
@ -47,7 +48,7 @@ const upsertKeysSQL = "" +
|
|||
" DO UPDATE SET key_json = $6"
|
||||
|
||||
const selectKeysSQL = "" +
|
||||
"SELECT key_id, algorithm, key_json FROM keyserver_one_time_keys WHERE user_id=$1 AND device_id=$2"
|
||||
"SELECT concat(algorithm, ':', key_id) as algorithmwithid, key_json FROM keyserver_one_time_keys WHERE user_id=$1 AND device_id=$2 AND concat(algorithm, ':', key_id) = ANY($3);"
|
||||
|
||||
const selectKeysCountSQL = "" +
|
||||
"SELECT algorithm, COUNT(key_id) FROM keyserver_one_time_keys WHERE user_id=$1 AND device_id=$2 GROUP BY algorithm"
|
||||
|
|
@ -94,29 +95,22 @@ func NewPostgresOneTimeKeysTable(db *sql.DB) (tables.OneTimeKeys, error) {
|
|||
}
|
||||
|
||||
func (s *oneTimeKeysStatements) SelectOneTimeKeys(ctx context.Context, userID, deviceID string, keyIDsWithAlgorithms []string) (map[string]json.RawMessage, error) {
|
||||
rows, err := s.selectKeysStmt.QueryContext(ctx, userID, deviceID)
|
||||
rows, err := s.selectKeysStmt.QueryContext(ctx, userID, deviceID, pq.Array(keyIDsWithAlgorithms))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer internal.CloseAndLogIfError(ctx, rows, "selectKeysStmt: rows.close() failed")
|
||||
|
||||
wantSet := make(map[string]bool, len(keyIDsWithAlgorithms))
|
||||
for _, ka := range keyIDsWithAlgorithms {
|
||||
wantSet[ka] = true
|
||||
}
|
||||
|
||||
result := make(map[string]json.RawMessage)
|
||||
var (
|
||||
algorithmWithID string
|
||||
keyJSONStr string
|
||||
)
|
||||
for rows.Next() {
|
||||
var keyID string
|
||||
var algorithm string
|
||||
var keyJSONStr string
|
||||
if err := rows.Scan(&keyID, &algorithm, &keyJSONStr); err != nil {
|
||||
if err := rows.Scan(&algorithmWithID, &keyJSONStr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keyIDWithAlgo := algorithm + ":" + keyID
|
||||
if wantSet[keyIDWithAlgo] {
|
||||
result[keyIDWithAlgo] = json.RawMessage(keyJSONStr)
|
||||
}
|
||||
result[algorithmWithID] = json.RawMessage(keyJSONStr)
|
||||
}
|
||||
return result, rows.Err()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -147,7 +147,8 @@ func (r *uploadRequest) doUpload(
|
|||
// r.storeFileAndMetadata(ctx, tmpDir, ...)
|
||||
// before you return from doUpload else we will leak a temp file. We could make this nicer with a `WithTransaction` style of
|
||||
// nested function to guarantee either storage or cleanup.
|
||||
hash, bytesWritten, tmpDir, err := fileutils.WriteTempFile(ctx, reqReader, cfg.AbsBasePath)
|
||||
lr := io.LimitReader(reqReader, int64(*cfg.MaxFileSizeBytes)+1)
|
||||
hash, bytesWritten, tmpDir, err := fileutils.WriteTempFile(ctx, lr, cfg.AbsBasePath)
|
||||
if err != nil {
|
||||
r.Logger.WithError(err).WithFields(log.Fields{
|
||||
"MaxFileSizeBytes": *cfg.MaxFileSizeBytes,
|
||||
|
|
@ -158,6 +159,12 @@ func (r *uploadRequest) doUpload(
|
|||
}
|
||||
}
|
||||
|
||||
// Check if temp file size exceeds max file size configuration
|
||||
if bytesWritten > types.FileSizeBytes(*cfg.MaxFileSizeBytes) {
|
||||
fileutils.RemoveDir(tmpDir, r.Logger) // delete temp file
|
||||
return requestEntityTooLargeJSONResponse(*cfg.MaxFileSizeBytes)
|
||||
}
|
||||
|
||||
// Look up the media by the file hash. If we already have the file but under a
|
||||
// different media ID then we won't upload the file again - instead we'll just
|
||||
// add a new metadata entry that refers to the same file.
|
||||
|
|
@ -219,26 +226,17 @@ func (r *uploadRequest) doUpload(
|
|||
)
|
||||
}
|
||||
|
||||
func requestEntityTooLargeJSONResponse(maxFileSizeBytes config.FileSizeBytes) *util.JSONResponse {
|
||||
return &util.JSONResponse{
|
||||
Code: http.StatusRequestEntityTooLarge,
|
||||
JSON: jsonerror.Unknown(fmt.Sprintf("HTTP Content-Length is greater than the maximum allowed upload size (%v).", maxFileSizeBytes)),
|
||||
}
|
||||
}
|
||||
|
||||
// Validate validates the uploadRequest fields
|
||||
func (r *uploadRequest) Validate(maxFileSizeBytes config.FileSizeBytes) *util.JSONResponse {
|
||||
if r.MediaMetadata.FileSizeBytes < 1 {
|
||||
return &util.JSONResponse{
|
||||
Code: http.StatusLengthRequired,
|
||||
JSON: jsonerror.Unknown("HTTP Content-Length request header must be greater than zero."),
|
||||
}
|
||||
}
|
||||
if maxFileSizeBytes > 0 && r.MediaMetadata.FileSizeBytes > types.FileSizeBytes(maxFileSizeBytes) {
|
||||
return &util.JSONResponse{
|
||||
Code: http.StatusRequestEntityTooLarge,
|
||||
JSON: jsonerror.Unknown(fmt.Sprintf("HTTP Content-Length is greater than the maximum allowed upload size (%v).", maxFileSizeBytes)),
|
||||
}
|
||||
}
|
||||
// TODO: Check if the Content-Type is a valid type?
|
||||
if r.MediaMetadata.ContentType == "" {
|
||||
return &util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.Unknown("HTTP Content-Type request header must be set."),
|
||||
}
|
||||
return requestEntityTooLargeJSONResponse(maxFileSizeBytes)
|
||||
}
|
||||
if strings.HasPrefix(string(r.MediaMetadata.UploadName), "~") {
|
||||
return &util.JSONResponse{
|
||||
|
|
|
|||
132
mediaapi/routing/upload_test.go
Normal file
132
mediaapi/routing/upload_test.go
Normal file
|
|
@ -0,0 +1,132 @@
|
|||
package routing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/matrix-org/dendrite/mediaapi/fileutils"
|
||||
"github.com/matrix-org/dendrite/mediaapi/storage"
|
||||
"github.com/matrix-org/dendrite/mediaapi/types"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func Test_uploadRequest_doUpload(t *testing.T) {
|
||||
type fields struct {
|
||||
MediaMetadata *types.MediaMetadata
|
||||
Logger *log.Entry
|
||||
}
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
reqReader io.Reader
|
||||
cfg *config.MediaAPI
|
||||
db storage.Database
|
||||
activeThumbnailGeneration *types.ActiveThumbnailGeneration
|
||||
}
|
||||
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get current working directory: %v", err)
|
||||
}
|
||||
|
||||
maxSize := config.FileSizeBytes(8)
|
||||
logger := log.New().WithField("mediaapi", "test")
|
||||
testdataPath := filepath.Join(wd, "./testdata")
|
||||
|
||||
cfg := &config.MediaAPI{
|
||||
MaxFileSizeBytes: &maxSize,
|
||||
BasePath: config.Path(testdataPath),
|
||||
AbsBasePath: config.Path(testdataPath),
|
||||
DynamicThumbnails: false,
|
||||
}
|
||||
|
||||
// create testdata folder and remove when done
|
||||
_ = os.Mkdir(testdataPath, os.ModePerm)
|
||||
defer fileutils.RemoveDir(types.Path(testdataPath), nil)
|
||||
|
||||
db, err := storage.Open(&config.DatabaseOptions{
|
||||
ConnectionString: "file::memory:?cache=shared",
|
||||
MaxOpenConnections: 100,
|
||||
MaxIdleConnections: 2,
|
||||
ConnMaxLifetimeSeconds: -1,
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("error opening mediaapi database: %v", err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want *util.JSONResponse
|
||||
}{
|
||||
{
|
||||
name: "upload ok",
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
reqReader: strings.NewReader("test"),
|
||||
cfg: cfg,
|
||||
db: db,
|
||||
},
|
||||
fields: fields{
|
||||
Logger: logger,
|
||||
MediaMetadata: &types.MediaMetadata{
|
||||
MediaID: "1337",
|
||||
UploadName: "test ok",
|
||||
},
|
||||
},
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
name: "upload ok (exact size)",
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
reqReader: strings.NewReader("testtest"),
|
||||
cfg: cfg,
|
||||
db: db,
|
||||
},
|
||||
fields: fields{
|
||||
Logger: logger,
|
||||
MediaMetadata: &types.MediaMetadata{
|
||||
MediaID: "1338",
|
||||
UploadName: "test ok (exact size)",
|
||||
},
|
||||
},
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
name: "upload not ok",
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
reqReader: strings.NewReader("test test test"),
|
||||
cfg: cfg,
|
||||
db: db,
|
||||
},
|
||||
fields: fields{
|
||||
Logger: logger,
|
||||
MediaMetadata: &types.MediaMetadata{
|
||||
MediaID: "1339",
|
||||
UploadName: "test fail",
|
||||
},
|
||||
},
|
||||
want: requestEntityTooLargeJSONResponse(maxSize),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
r := &uploadRequest{
|
||||
MediaMetadata: tt.fields.MediaMetadata,
|
||||
Logger: tt.fields.Logger,
|
||||
}
|
||||
if got := r.doUpload(tt.args.ctx, tt.args.reqReader, tt.args.cfg, tt.args.db, tt.args.activeThumbnailGeneration); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("doUpload() = %+v, want %+v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -24,6 +24,7 @@ import (
|
|||
|
||||
func LoadFromGoose() {
|
||||
goose.AddMigration(UpAddForgottenColumn, DownAddForgottenColumn)
|
||||
goose.AddMigration(UpStateBlocksRefactor, DownStateBlocksRefactor)
|
||||
}
|
||||
|
||||
func LoadAddForgottenColumn(m *sqlutil.Migrations) {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,223 @@
|
|||
// Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package deltas
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
"github.com/lib/pq"
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
"github.com/matrix-org/dendrite/roomserver/types"
|
||||
"github.com/matrix-org/util"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type stateSnapshotData struct {
|
||||
StateSnapshotNID types.StateSnapshotNID
|
||||
RoomNID types.RoomNID
|
||||
}
|
||||
|
||||
type stateBlockData struct {
|
||||
stateSnapshotData
|
||||
StateBlockNID types.StateBlockNID
|
||||
EventNIDs types.EventNIDs
|
||||
}
|
||||
|
||||
func LoadStateBlocksRefactor(m *sqlutil.Migrations) {
|
||||
m.AddMigration(UpStateBlocksRefactor, DownStateBlocksRefactor)
|
||||
}
|
||||
|
||||
// nolint:gocyclo
|
||||
func UpStateBlocksRefactor(tx *sql.Tx) error {
|
||||
logrus.Warn("Performing state storage upgrade. Please wait, this may take some time!")
|
||||
defer logrus.Warn("State storage upgrade complete")
|
||||
|
||||
var snapshotcount int
|
||||
var maxsnapshotid int
|
||||
var maxblockid int
|
||||
if err := tx.QueryRow(`SELECT COUNT(DISTINCT state_snapshot_nid) FROM roomserver_state_snapshots;`).Scan(&snapshotcount); err != nil {
|
||||
return fmt.Errorf("tx.QueryRow.Scan (count snapshots): %w", err)
|
||||
}
|
||||
if err := tx.QueryRow(`SELECT COALESCE(MAX(state_snapshot_nid),0) FROM roomserver_state_snapshots;`).Scan(&maxsnapshotid); err != nil {
|
||||
return fmt.Errorf("tx.QueryRow.Scan (count snapshots): %w", err)
|
||||
}
|
||||
if err := tx.QueryRow(`SELECT COALESCE(MAX(state_block_nid),0) FROM roomserver_state_block;`).Scan(&maxblockid); err != nil {
|
||||
return fmt.Errorf("tx.QueryRow.Scan (count snapshots): %w", err)
|
||||
}
|
||||
maxsnapshotid++
|
||||
maxblockid++
|
||||
|
||||
if _, err := tx.Exec(`ALTER TABLE roomserver_state_block RENAME TO _roomserver_state_block;`); err != nil {
|
||||
return fmt.Errorf("tx.Exec: %w", err)
|
||||
}
|
||||
if _, err := tx.Exec(`ALTER TABLE roomserver_state_snapshots RENAME TO _roomserver_state_snapshots;`); err != nil {
|
||||
return fmt.Errorf("tx.Exec: %w", err)
|
||||
}
|
||||
// We create new sequences starting with the maximum state snapshot and block NIDs.
|
||||
// This means that all newly created snapshots and blocks by the migration will have
|
||||
// NIDs higher than these values, so that when we come to update the references to
|
||||
// these NIDs using UPDATE statements, we can guarantee we are only ever updating old
|
||||
// values and not accidentally overwriting new ones.
|
||||
if _, err := tx.Exec(fmt.Sprintf(`CREATE SEQUENCE roomserver_state_block_nid_sequence START WITH %d;`, maxblockid)); err != nil {
|
||||
return fmt.Errorf("tx.Exec: %w", err)
|
||||
}
|
||||
if _, err := tx.Exec(fmt.Sprintf(`CREATE SEQUENCE roomserver_state_snapshot_nid_sequence START WITH %d;`, maxsnapshotid)); err != nil {
|
||||
return fmt.Errorf("tx.Exec: %w", err)
|
||||
}
|
||||
_, err := tx.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS roomserver_state_block (
|
||||
state_block_nid bigint PRIMARY KEY DEFAULT nextval('roomserver_state_block_nid_sequence'),
|
||||
state_block_hash BYTEA UNIQUE,
|
||||
event_nids bigint[] NOT NULL
|
||||
);
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("tx.Exec (create blocks table): %w", err)
|
||||
}
|
||||
_, err = tx.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS roomserver_state_snapshots (
|
||||
state_snapshot_nid bigint PRIMARY KEY DEFAULT nextval('roomserver_state_snapshot_nid_sequence'),
|
||||
state_snapshot_hash BYTEA UNIQUE,
|
||||
room_nid bigint NOT NULL,
|
||||
state_block_nids bigint[] NOT NULL
|
||||
);
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("tx.Exec (create snapshots table): %w", err)
|
||||
}
|
||||
logrus.Warn("New tables created...")
|
||||
|
||||
batchsize := 100
|
||||
for batchoffset := 0; batchoffset < snapshotcount; batchoffset += batchsize {
|
||||
var snapshotrows *sql.Rows
|
||||
snapshotrows, err = tx.Query(`
|
||||
SELECT
|
||||
state_snapshot_nid,
|
||||
room_nid,
|
||||
state_block_nid,
|
||||
ARRAY_AGG(event_nid) AS event_nids
|
||||
FROM (
|
||||
SELECT
|
||||
_roomserver_state_snapshots.state_snapshot_nid,
|
||||
_roomserver_state_snapshots.room_nid,
|
||||
_roomserver_state_block.state_block_nid,
|
||||
_roomserver_state_block.event_nid
|
||||
FROM
|
||||
_roomserver_state_snapshots
|
||||
JOIN _roomserver_state_block ON _roomserver_state_block.state_block_nid = ANY (_roomserver_state_snapshots.state_block_nids)
|
||||
WHERE
|
||||
_roomserver_state_snapshots.state_snapshot_nid = ANY ( SELECT DISTINCT
|
||||
_roomserver_state_snapshots.state_snapshot_nid
|
||||
FROM
|
||||
_roomserver_state_snapshots
|
||||
LIMIT $1 OFFSET $2)) AS _roomserver_state_block
|
||||
GROUP BY
|
||||
state_snapshot_nid,
|
||||
room_nid,
|
||||
state_block_nid;
|
||||
`, batchsize, batchoffset)
|
||||
if err != nil {
|
||||
return fmt.Errorf("tx.Query: %w", err)
|
||||
}
|
||||
|
||||
logrus.Warnf("Rewriting snapshots %d-%d of %d...", batchoffset, batchoffset+batchsize, snapshotcount)
|
||||
var snapshots []stateBlockData
|
||||
|
||||
for snapshotrows.Next() {
|
||||
var snapshot stateBlockData
|
||||
var eventsarray pq.Int64Array
|
||||
if err = snapshotrows.Scan(&snapshot.StateSnapshotNID, &snapshot.RoomNID, &snapshot.StateBlockNID, &eventsarray); err != nil {
|
||||
return fmt.Errorf("rows.Scan: %w", err)
|
||||
}
|
||||
for _, e := range eventsarray {
|
||||
snapshot.EventNIDs = append(snapshot.EventNIDs, types.EventNID(e))
|
||||
}
|
||||
snapshot.EventNIDs = snapshot.EventNIDs[:util.SortAndUnique(snapshot.EventNIDs)]
|
||||
snapshots = append(snapshots, snapshot)
|
||||
}
|
||||
|
||||
if err = snapshotrows.Close(); err != nil {
|
||||
return fmt.Errorf("snapshots.Close: %w", err)
|
||||
}
|
||||
|
||||
newsnapshots := map[stateSnapshotData]types.StateBlockNIDs{}
|
||||
|
||||
for _, snapshot := range snapshots {
|
||||
var eventsarray pq.Int64Array
|
||||
for _, e := range snapshot.EventNIDs {
|
||||
eventsarray = append(eventsarray, int64(e))
|
||||
}
|
||||
|
||||
var blocknid types.StateBlockNID
|
||||
err = tx.QueryRow(`
|
||||
INSERT INTO roomserver_state_block (state_block_hash, event_nids)
|
||||
VALUES ($1, $2)
|
||||
ON CONFLICT (state_block_hash) DO UPDATE SET event_nids=$2
|
||||
RETURNING state_block_nid
|
||||
`, snapshot.EventNIDs.Hash(), eventsarray).Scan(&blocknid)
|
||||
if err != nil {
|
||||
return fmt.Errorf("tx.QueryRow.Scan (insert new block with %d events): %w", len(eventsarray), err)
|
||||
}
|
||||
index := stateSnapshotData{snapshot.StateSnapshotNID, snapshot.RoomNID}
|
||||
newsnapshots[index] = append(newsnapshots[index], blocknid)
|
||||
}
|
||||
|
||||
for snapshotdata, newblocks := range newsnapshots {
|
||||
var newblocksarray pq.Int64Array
|
||||
for _, b := range newblocks {
|
||||
newblocksarray = append(newblocksarray, int64(b))
|
||||
}
|
||||
|
||||
var newNID types.StateSnapshotNID
|
||||
err = tx.QueryRow(`
|
||||
INSERT INTO roomserver_state_snapshots (state_snapshot_hash, room_nid, state_block_nids)
|
||||
VALUES ($1, $2, $3)
|
||||
ON CONFLICT (state_snapshot_hash) DO UPDATE SET room_nid=$2
|
||||
RETURNING state_snapshot_nid
|
||||
`, newblocks.Hash(), snapshotdata.RoomNID, newblocksarray).Scan(&newNID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("tx.QueryRow.Scan (insert new snapshot): %w", err)
|
||||
}
|
||||
|
||||
if _, err = tx.Exec(`UPDATE roomserver_events SET state_snapshot_nid=$1 WHERE state_snapshot_nid=$2 AND state_snapshot_nid<$3`, newNID, snapshotdata.StateSnapshotNID, maxsnapshotid); err != nil {
|
||||
return fmt.Errorf("tx.Exec (update events): %w", err)
|
||||
}
|
||||
|
||||
if _, err = tx.Exec(`UPDATE roomserver_rooms SET state_snapshot_nid=$1 WHERE state_snapshot_nid=$2 AND state_snapshot_nid<$3`, newNID, snapshotdata.StateSnapshotNID, maxsnapshotid); err != nil {
|
||||
return fmt.Errorf("tx.Exec (update rooms): %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if _, err = tx.Exec(`
|
||||
DROP TABLE _roomserver_state_snapshots;
|
||||
DROP SEQUENCE roomserver_state_snapshot_nid_seq;
|
||||
`); err != nil {
|
||||
return fmt.Errorf("tx.Exec (delete old snapshot table): %w", err)
|
||||
}
|
||||
if _, err = tx.Exec(`
|
||||
DROP TABLE _roomserver_state_block;
|
||||
DROP SEQUENCE roomserver_state_block_nid_seq;
|
||||
`); err != nil {
|
||||
return fmt.Errorf("tx.Exec (delete old block table): %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func DownStateBlocksRefactor(tx *sql.Tx) error {
|
||||
panic("Downgrading state storage is not supported")
|
||||
}
|
||||
|
|
@ -59,12 +59,14 @@ type eventJSONStatements struct {
|
|||
bulkSelectEventJSONStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewPostgresEventJSONTable(db *sql.DB) (tables.EventJSON, error) {
|
||||
s := &eventJSONStatements{}
|
||||
func createEventJSONTable(db *sql.DB) error {
|
||||
_, err := db.Exec(eventJSONSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func prepareEventJSONTable(db *sql.DB) (tables.EventJSON, error) {
|
||||
s := &eventJSONStatements{}
|
||||
|
||||
return s, shared.StatementList{
|
||||
{&s.insertEventJSONStmt, insertEventJSONSQL},
|
||||
{&s.bulkSelectEventJSONStmt, bulkSelectEventJSONSQL},
|
||||
|
|
|
|||
|
|
@ -77,12 +77,14 @@ type eventStateKeyStatements struct {
|
|||
bulkSelectEventStateKeyStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewPostgresEventStateKeysTable(db *sql.DB) (tables.EventStateKeys, error) {
|
||||
s := &eventStateKeyStatements{}
|
||||
func createEventStateKeysTable(db *sql.DB) error {
|
||||
_, err := db.Exec(eventStateKeysSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func prepareEventStateKeysTable(db *sql.DB) (tables.EventStateKeys, error) {
|
||||
s := &eventStateKeyStatements{}
|
||||
|
||||
return s, shared.StatementList{
|
||||
{&s.insertEventStateKeyNIDStmt, insertEventStateKeyNIDSQL},
|
||||
{&s.selectEventStateKeyNIDStmt, selectEventStateKeyNIDSQL},
|
||||
|
|
|
|||
|
|
@ -100,12 +100,13 @@ type eventTypeStatements struct {
|
|||
bulkSelectEventTypeNIDStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewPostgresEventTypesTable(db *sql.DB) (tables.EventTypes, error) {
|
||||
s := &eventTypeStatements{}
|
||||
func createEventTypesTable(db *sql.DB) error {
|
||||
_, err := db.Exec(eventTypesSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func prepareEventTypesTable(db *sql.DB) (tables.EventTypes, error) {
|
||||
s := &eventTypeStatements{}
|
||||
|
||||
return s, shared.StatementList{
|
||||
{&s.insertEventTypeNIDStmt, insertEventTypeNIDSQL},
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ import (
|
|||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/lib/pq"
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
|
|
@ -88,6 +89,16 @@ const bulkSelectStateEventByIDSQL = "" +
|
|||
" WHERE event_id = ANY($1)" +
|
||||
" ORDER BY event_type_nid, event_state_key_nid ASC"
|
||||
|
||||
// Bulk look up of events by event NID, optionally filtering by the event type
|
||||
// or event state key NIDs if provided. (The CARDINALITY check will return true
|
||||
// if the provided arrays are empty, ergo no filtering).
|
||||
const bulkSelectStateEventByNIDSQL = "" +
|
||||
"SELECT event_type_nid, event_state_key_nid, event_nid FROM roomserver_events" +
|
||||
" WHERE event_nid = ANY($1)" +
|
||||
" AND (CARDINALITY($2::bigint[]) = 0 OR event_type_nid = ANY($2))" +
|
||||
" AND (CARDINALITY($3::bigint[]) = 0 OR event_state_key_nid = ANY($3))" +
|
||||
" ORDER BY event_type_nid, event_state_key_nid ASC"
|
||||
|
||||
const bulkSelectStateAtEventByIDSQL = "" +
|
||||
"SELECT event_type_nid, event_state_key_nid, event_nid, state_snapshot_nid, is_rejected FROM roomserver_events" +
|
||||
" WHERE event_id = ANY($1)"
|
||||
|
|
@ -127,6 +138,7 @@ type eventStatements struct {
|
|||
insertEventStmt *sql.Stmt
|
||||
selectEventStmt *sql.Stmt
|
||||
bulkSelectStateEventByIDStmt *sql.Stmt
|
||||
bulkSelectStateEventByNIDStmt *sql.Stmt
|
||||
bulkSelectStateAtEventByIDStmt *sql.Stmt
|
||||
updateEventStateStmt *sql.Stmt
|
||||
selectEventSentToOutputStmt *sql.Stmt
|
||||
|
|
@ -140,17 +152,19 @@ type eventStatements struct {
|
|||
selectRoomNIDsForEventNIDsStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewPostgresEventsTable(db *sql.DB) (tables.Events, error) {
|
||||
s := &eventStatements{}
|
||||
func createEventsTable(db *sql.DB) error {
|
||||
_, err := db.Exec(eventsSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func prepareEventsTable(db *sql.DB) (tables.Events, error) {
|
||||
s := &eventStatements{}
|
||||
|
||||
return s, shared.StatementList{
|
||||
{&s.insertEventStmt, insertEventSQL},
|
||||
{&s.selectEventStmt, selectEventSQL},
|
||||
{&s.bulkSelectStateEventByIDStmt, bulkSelectStateEventByIDSQL},
|
||||
{&s.bulkSelectStateEventByNIDStmt, bulkSelectStateEventByNIDSQL},
|
||||
{&s.bulkSelectStateAtEventByIDStmt, bulkSelectStateAtEventByIDSQL},
|
||||
{&s.updateEventStateStmt, updateEventStateSQL},
|
||||
{&s.updateEventSentToOutputStmt, updateEventSentToOutputSQL},
|
||||
|
|
@ -238,6 +252,42 @@ func (s *eventStatements) BulkSelectStateEventByID(
|
|||
return results, nil
|
||||
}
|
||||
|
||||
// bulkSelectStateEventByNID lookups a list of state events by event NID.
|
||||
// If any of the requested events are missing from the database it returns a types.MissingEventError
|
||||
func (s *eventStatements) BulkSelectStateEventByNID(
|
||||
ctx context.Context, eventNIDs []types.EventNID,
|
||||
stateKeyTuples []types.StateKeyTuple,
|
||||
) ([]types.StateEntry, error) {
|
||||
tuples := stateKeyTupleSorter(stateKeyTuples)
|
||||
sort.Sort(tuples)
|
||||
eventTypeNIDArray, eventStateKeyNIDArray := tuples.typesAndStateKeysAsArrays()
|
||||
rows, err := s.bulkSelectStateEventByNIDStmt.QueryContext(ctx, eventNIDsAsArray(eventNIDs), eventTypeNIDArray, eventStateKeyNIDArray)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer internal.CloseAndLogIfError(ctx, rows, "bulkSelectStateEventByID: rows.close() failed")
|
||||
// We know that we will only get as many results as event IDs
|
||||
// because of the unique constraint on event IDs.
|
||||
// So we can allocate an array of the correct size now.
|
||||
// We might get fewer results than IDs so we adjust the length of the slice before returning it.
|
||||
results := make([]types.StateEntry, len(eventNIDs))
|
||||
i := 0
|
||||
for ; rows.Next(); i++ {
|
||||
result := &results[i]
|
||||
if err = rows.Scan(
|
||||
&result.EventTypeNID,
|
||||
&result.EventStateKeyNID,
|
||||
&result.EventNID,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err = rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return results[:i], nil
|
||||
}
|
||||
|
||||
// bulkSelectStateAtEventByID lookups the state at a list of events by event ID.
|
||||
// If any of the requested events are missing from the database it returns a types.MissingEventError.
|
||||
// If we do not have the state for any of the requested events it returns a types.MissingEventError.
|
||||
|
|
|
|||
|
|
@ -82,12 +82,13 @@ type inviteStatements struct {
|
|||
updateInviteRetiredStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewPostgresInvitesTable(db *sql.DB) (tables.Invites, error) {
|
||||
s := &inviteStatements{}
|
||||
func createInvitesTable(db *sql.DB) error {
|
||||
_, err := db.Exec(inviteSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func prepareInvitesTable(db *sql.DB) (tables.Invites, error) {
|
||||
s := &inviteStatements{}
|
||||
|
||||
return s, shared.StatementList{
|
||||
{&s.insertInviteEventStmt, insertInviteEventSQL},
|
||||
|
|
|
|||
|
|
@ -139,12 +139,13 @@ type membershipStatements struct {
|
|||
updateMembershipForgetRoomStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewPostgresMembershipTable(db *sql.DB) (tables.Membership, error) {
|
||||
s := &membershipStatements{}
|
||||
func createMembershipTable(db *sql.DB) error {
|
||||
_, err := db.Exec(membershipSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func prepareMembershipTable(db *sql.DB) (tables.Membership, error) {
|
||||
s := &membershipStatements{}
|
||||
|
||||
return s, shared.StatementList{
|
||||
{&s.insertMembershipStmt, insertMembershipSQL},
|
||||
|
|
@ -162,11 +163,6 @@ func NewPostgresMembershipTable(db *sql.DB) (tables.Membership, error) {
|
|||
}.Prepare(db)
|
||||
}
|
||||
|
||||
func (s *membershipStatements) execSchema(db *sql.DB) error {
|
||||
_, err := db.Exec(membershipSchema)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *membershipStatements) InsertMembership(
|
||||
ctx context.Context,
|
||||
txn *sql.Tx, roomNID types.RoomNID, targetUserNID types.EventStateKeyNID,
|
||||
|
|
|
|||
|
|
@ -65,12 +65,13 @@ type previousEventStatements struct {
|
|||
selectPreviousEventExistsStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewPostgresPreviousEventsTable(db *sql.DB) (tables.PreviousEvents, error) {
|
||||
s := &previousEventStatements{}
|
||||
func createPrevEventsTable(db *sql.DB) error {
|
||||
_, err := db.Exec(previousEventSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func preparePrevEventsTable(db *sql.DB) (tables.PreviousEvents, error) {
|
||||
s := &previousEventStatements{}
|
||||
|
||||
return s, shared.StatementList{
|
||||
{&s.insertPreviousEventStmt, insertPreviousEventSQL},
|
||||
|
|
|
|||
|
|
@ -50,12 +50,14 @@ type publishedStatements struct {
|
|||
selectPublishedStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewPostgresPublishedTable(db *sql.DB) (tables.Published, error) {
|
||||
s := &publishedStatements{}
|
||||
func createPublishedTable(db *sql.DB) error {
|
||||
_, err := db.Exec(publishedSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func preparePublishedTable(db *sql.DB) (tables.Published, error) {
|
||||
s := &publishedStatements{}
|
||||
|
||||
return s, shared.StatementList{
|
||||
{&s.upsertPublishedStmt, upsertPublishedSQL},
|
||||
{&s.selectAllPublishedStmt, selectAllPublishedSQL},
|
||||
|
|
|
|||
|
|
@ -60,12 +60,13 @@ type redactionStatements struct {
|
|||
markRedactionValidatedStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewPostgresRedactionsTable(db *sql.DB) (tables.Redactions, error) {
|
||||
s := &redactionStatements{}
|
||||
func createRedactionsTable(db *sql.DB) error {
|
||||
_, err := db.Exec(redactionsSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func prepareRedactionsTable(db *sql.DB) (tables.Redactions, error) {
|
||||
s := &redactionStatements{}
|
||||
|
||||
return s, shared.StatementList{
|
||||
{&s.insertRedactionStmt, insertRedactionSQL},
|
||||
|
|
|
|||
|
|
@ -62,12 +62,14 @@ type roomAliasesStatements struct {
|
|||
deleteRoomAliasStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewPostgresRoomAliasesTable(db *sql.DB) (tables.RoomAliases, error) {
|
||||
s := &roomAliasesStatements{}
|
||||
func createRoomAliasesTable(db *sql.DB) error {
|
||||
_, err := db.Exec(roomAliasesSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func prepareRoomAliasesTable(db *sql.DB) (tables.RoomAliases, error) {
|
||||
s := &roomAliasesStatements{}
|
||||
|
||||
return s, shared.StatementList{
|
||||
{&s.insertRoomAliasStmt, insertRoomAliasSQL},
|
||||
{&s.selectRoomIDFromAliasStmt, selectRoomIDFromAliasSQL},
|
||||
|
|
|
|||
|
|
@ -96,12 +96,14 @@ type roomStatements struct {
|
|||
bulkSelectRoomNIDsStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewPostgresRoomsTable(db *sql.DB) (tables.Rooms, error) {
|
||||
s := &roomStatements{}
|
||||
func createRoomsTable(db *sql.DB) error {
|
||||
_, err := db.Exec(roomsSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func prepareRoomsTable(db *sql.DB) (tables.Rooms, error) {
|
||||
s := &roomStatements{}
|
||||
|
||||
return s, shared.StatementList{
|
||||
{&s.insertRoomNIDStmt, insertRoomNIDSQL},
|
||||
{&s.selectRoomNIDStmt, selectRoomNIDSQL},
|
||||
|
|
|
|||
|
|
@ -41,141 +41,88 @@ const stateDataSchema = `
|
|||
-- which in turn makes it easier to merge state data blocks.
|
||||
CREATE SEQUENCE IF NOT EXISTS roomserver_state_block_nid_seq;
|
||||
CREATE TABLE IF NOT EXISTS roomserver_state_block (
|
||||
-- Local numeric ID for this state data.
|
||||
state_block_nid bigint NOT NULL,
|
||||
event_type_nid bigint NOT NULL,
|
||||
event_state_key_nid bigint NOT NULL,
|
||||
event_nid bigint NOT NULL,
|
||||
UNIQUE (state_block_nid, event_type_nid, event_state_key_nid)
|
||||
-- The state snapshot NID that identifies this snapshot.
|
||||
state_block_nid bigint PRIMARY KEY DEFAULT nextval('roomserver_state_block_nid_seq'),
|
||||
-- The hash of the state block, which is used to enforce uniqueness. The hash is
|
||||
-- generated in Dendrite and passed through to the database, as a btree index over
|
||||
-- this column is cheap and fits within the maximum index size.
|
||||
state_block_hash BYTEA UNIQUE,
|
||||
-- The event NIDs contained within the state block.
|
||||
event_nids bigint[] NOT NULL
|
||||
);
|
||||
`
|
||||
|
||||
// Insert a new state block. If we conflict on the hash column then
|
||||
// we must perform an update so that the RETURNING statement returns the
|
||||
// ID of the row that we conflicted with, so that we can then refer to
|
||||
// the original block.
|
||||
const insertStateDataSQL = "" +
|
||||
"INSERT INTO roomserver_state_block (state_block_nid, event_type_nid, event_state_key_nid, event_nid)" +
|
||||
" VALUES ($1, $2, $3, $4)"
|
||||
"INSERT INTO roomserver_state_block (state_block_hash, event_nids)" +
|
||||
" VALUES ($1, $2)" +
|
||||
" ON CONFLICT (state_block_hash) DO UPDATE SET event_nids=$2" +
|
||||
" RETURNING state_block_nid"
|
||||
|
||||
const selectNextStateBlockNIDSQL = "" +
|
||||
"SELECT nextval('roomserver_state_block_nid_seq')"
|
||||
|
||||
// Bulk state lookup by numeric state block ID.
|
||||
// Sort by the state_block_nid, event_type_nid, event_state_key_nid
|
||||
// This means that all the entries for a given state_block_nid will appear
|
||||
// together in the list and those entries will sorted by event_type_nid
|
||||
// and event_state_key_nid. This property makes it easier to merge two
|
||||
// state data blocks together.
|
||||
const bulkSelectStateBlockEntriesSQL = "" +
|
||||
"SELECT state_block_nid, event_type_nid, event_state_key_nid, event_nid" +
|
||||
" FROM roomserver_state_block WHERE state_block_nid = ANY($1)" +
|
||||
" ORDER BY state_block_nid, event_type_nid, event_state_key_nid"
|
||||
|
||||
// Bulk state lookup by numeric state block ID.
|
||||
// Filters the rows in each block to the requested types and state keys.
|
||||
// We would like to restrict to particular type state key pairs but we are
|
||||
// restricted by the query language to pull the cross product of a list
|
||||
// of types and a list state_keys. So we have to filter the result in the
|
||||
// application to restrict it to the list of event types and state keys we
|
||||
// actually wanted.
|
||||
const bulkSelectFilteredStateBlockEntriesSQL = "" +
|
||||
"SELECT state_block_nid, event_type_nid, event_state_key_nid, event_nid" +
|
||||
" FROM roomserver_state_block WHERE state_block_nid = ANY($1)" +
|
||||
" AND event_type_nid = ANY($2) AND event_state_key_nid = ANY($3)" +
|
||||
" ORDER BY state_block_nid, event_type_nid, event_state_key_nid"
|
||||
"SELECT state_block_nid, event_nids" +
|
||||
" FROM roomserver_state_block WHERE state_block_nid = ANY($1)"
|
||||
|
||||
type stateBlockStatements struct {
|
||||
insertStateDataStmt *sql.Stmt
|
||||
selectNextStateBlockNIDStmt *sql.Stmt
|
||||
bulkSelectStateBlockEntriesStmt *sql.Stmt
|
||||
bulkSelectFilteredStateBlockEntriesStmt *sql.Stmt
|
||||
insertStateDataStmt *sql.Stmt
|
||||
bulkSelectStateBlockEntriesStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewPostgresStateBlockTable(db *sql.DB) (tables.StateBlock, error) {
|
||||
s := &stateBlockStatements{}
|
||||
func createStateBlockTable(db *sql.DB) error {
|
||||
_, err := db.Exec(stateDataSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func prepareStateBlockTable(db *sql.DB) (tables.StateBlock, error) {
|
||||
s := &stateBlockStatements{}
|
||||
|
||||
return s, shared.StatementList{
|
||||
{&s.insertStateDataStmt, insertStateDataSQL},
|
||||
{&s.selectNextStateBlockNIDStmt, selectNextStateBlockNIDSQL},
|
||||
{&s.bulkSelectStateBlockEntriesStmt, bulkSelectStateBlockEntriesSQL},
|
||||
{&s.bulkSelectFilteredStateBlockEntriesStmt, bulkSelectFilteredStateBlockEntriesSQL},
|
||||
}.Prepare(db)
|
||||
}
|
||||
|
||||
func (s *stateBlockStatements) BulkInsertStateData(
|
||||
ctx context.Context,
|
||||
txn *sql.Tx,
|
||||
entries []types.StateEntry,
|
||||
) (types.StateBlockNID, error) {
|
||||
stateBlockNID, err := s.selectNextStateBlockNID(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
entries types.StateEntries,
|
||||
) (id types.StateBlockNID, err error) {
|
||||
entries = entries[:util.SortAndUnique(entries)]
|
||||
var nids types.EventNIDs
|
||||
for _, e := range entries {
|
||||
nids = append(nids, e.EventNID)
|
||||
}
|
||||
for _, entry := range entries {
|
||||
_, err := s.insertStateDataStmt.ExecContext(
|
||||
ctx,
|
||||
int64(stateBlockNID),
|
||||
int64(entry.EventTypeNID),
|
||||
int64(entry.EventStateKeyNID),
|
||||
int64(entry.EventNID),
|
||||
)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return stateBlockNID, nil
|
||||
}
|
||||
|
||||
func (s *stateBlockStatements) selectNextStateBlockNID(
|
||||
ctx context.Context,
|
||||
) (types.StateBlockNID, error) {
|
||||
var stateBlockNID int64
|
||||
err := s.selectNextStateBlockNIDStmt.QueryRowContext(ctx).Scan(&stateBlockNID)
|
||||
return types.StateBlockNID(stateBlockNID), err
|
||||
err = s.insertStateDataStmt.QueryRowContext(
|
||||
ctx, nids.Hash(), eventNIDsAsArray(nids),
|
||||
).Scan(&id)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *stateBlockStatements) BulkSelectStateBlockEntries(
|
||||
ctx context.Context, stateBlockNIDs []types.StateBlockNID,
|
||||
) ([]types.StateEntryList, error) {
|
||||
nids := make([]int64, len(stateBlockNIDs))
|
||||
for i := range stateBlockNIDs {
|
||||
nids[i] = int64(stateBlockNIDs[i])
|
||||
}
|
||||
rows, err := s.bulkSelectStateBlockEntriesStmt.QueryContext(ctx, pq.Int64Array(nids))
|
||||
ctx context.Context, stateBlockNIDs types.StateBlockNIDs,
|
||||
) ([][]types.EventNID, error) {
|
||||
rows, err := s.bulkSelectStateBlockEntriesStmt.QueryContext(ctx, stateBlockNIDsAsArray(stateBlockNIDs))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer internal.CloseAndLogIfError(ctx, rows, "bulkSelectStateBlockEntries: rows.close() failed")
|
||||
|
||||
results := make([]types.StateEntryList, len(stateBlockNIDs))
|
||||
// current is a pointer to the StateEntryList to append the state entries to.
|
||||
var current *types.StateEntryList
|
||||
results := make([][]types.EventNID, len(stateBlockNIDs))
|
||||
i := 0
|
||||
for rows.Next() {
|
||||
var (
|
||||
stateBlockNID int64
|
||||
eventTypeNID int64
|
||||
eventStateKeyNID int64
|
||||
eventNID int64
|
||||
entry types.StateEntry
|
||||
)
|
||||
if err = rows.Scan(
|
||||
&stateBlockNID, &eventTypeNID, &eventStateKeyNID, &eventNID,
|
||||
); err != nil {
|
||||
for ; rows.Next(); i++ {
|
||||
var stateBlockNID types.StateBlockNID
|
||||
var result pq.Int64Array
|
||||
if err = rows.Scan(&stateBlockNID, &result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entry.EventTypeNID = types.EventTypeNID(eventTypeNID)
|
||||
entry.EventStateKeyNID = types.EventStateKeyNID(eventStateKeyNID)
|
||||
entry.EventNID = types.EventNID(eventNID)
|
||||
if current == nil || types.StateBlockNID(stateBlockNID) != current.StateBlockNID {
|
||||
// The state entry row is for a different state data block to the current one.
|
||||
// So we start appending to the next entry in the list.
|
||||
current = &results[i]
|
||||
current.StateBlockNID = types.StateBlockNID(stateBlockNID)
|
||||
i++
|
||||
r := []types.EventNID{}
|
||||
for _, e := range result {
|
||||
r = append(r, types.EventNID(e))
|
||||
}
|
||||
current.StateEntries = append(current.StateEntries, entry)
|
||||
results[i] = r
|
||||
}
|
||||
if err = rows.Err(); err != nil {
|
||||
return nil, err
|
||||
|
|
@ -186,71 +133,6 @@ func (s *stateBlockStatements) BulkSelectStateBlockEntries(
|
|||
return results, err
|
||||
}
|
||||
|
||||
func (s *stateBlockStatements) BulkSelectFilteredStateBlockEntries(
|
||||
ctx context.Context,
|
||||
stateBlockNIDs []types.StateBlockNID,
|
||||
stateKeyTuples []types.StateKeyTuple,
|
||||
) ([]types.StateEntryList, error) {
|
||||
tuples := stateKeyTupleSorter(stateKeyTuples)
|
||||
// Sort the tuples so that we can run binary search against them as we filter the rows returned by the db.
|
||||
sort.Sort(tuples)
|
||||
|
||||
eventTypeNIDArray, eventStateKeyNIDArray := tuples.typesAndStateKeysAsArrays()
|
||||
rows, err := s.bulkSelectFilteredStateBlockEntriesStmt.QueryContext(
|
||||
ctx,
|
||||
stateBlockNIDsAsArray(stateBlockNIDs),
|
||||
eventTypeNIDArray,
|
||||
eventStateKeyNIDArray,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer internal.CloseAndLogIfError(ctx, rows, "bulkSelectFilteredStateBlockEntries: rows.close() failed")
|
||||
|
||||
var results []types.StateEntryList
|
||||
var current types.StateEntryList
|
||||
for rows.Next() {
|
||||
var (
|
||||
stateBlockNID int64
|
||||
eventTypeNID int64
|
||||
eventStateKeyNID int64
|
||||
eventNID int64
|
||||
entry types.StateEntry
|
||||
)
|
||||
if err := rows.Scan(
|
||||
&stateBlockNID, &eventTypeNID, &eventStateKeyNID, &eventNID,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entry.EventTypeNID = types.EventTypeNID(eventTypeNID)
|
||||
entry.EventStateKeyNID = types.EventStateKeyNID(eventStateKeyNID)
|
||||
entry.EventNID = types.EventNID(eventNID)
|
||||
|
||||
// We can use binary search here because we sorted the tuples earlier
|
||||
if !tuples.contains(entry.StateKeyTuple) {
|
||||
// The select will return the cross product of types and state keys.
|
||||
// So we need to check if type of the entry is in the list.
|
||||
continue
|
||||
}
|
||||
|
||||
if types.StateBlockNID(stateBlockNID) != current.StateBlockNID {
|
||||
// The state entry row is for a different state data block to the current one.
|
||||
// So we append the current entry to the results and start adding to a new one.
|
||||
// The first time through the loop current will be empty.
|
||||
if current.StateEntries != nil {
|
||||
results = append(results, current)
|
||||
}
|
||||
current = types.StateEntryList{StateBlockNID: types.StateBlockNID(stateBlockNID)}
|
||||
}
|
||||
current.StateEntries = append(current.StateEntries, entry)
|
||||
}
|
||||
// Add the last entry to the list if it is not empty.
|
||||
if current.StateEntries != nil {
|
||||
results = append(results, current)
|
||||
}
|
||||
return results, rows.Err()
|
||||
}
|
||||
|
||||
func stateBlockNIDsAsArray(stateBlockNIDs []types.StateBlockNID) pq.Int64Array {
|
||||
nids := make([]int64, len(stateBlockNIDs))
|
||||
for i := range stateBlockNIDs {
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/roomserver/storage/shared"
|
||||
"github.com/matrix-org/dendrite/roomserver/storage/tables"
|
||||
"github.com/matrix-org/dendrite/roomserver/types"
|
||||
"github.com/matrix-org/util"
|
||||
)
|
||||
|
||||
const stateSnapshotSchema = `
|
||||
|
|
@ -40,19 +41,29 @@ const stateSnapshotSchema = `
|
|||
-- the full state under single state_block_nid.
|
||||
CREATE SEQUENCE IF NOT EXISTS roomserver_state_snapshot_nid_seq;
|
||||
CREATE TABLE IF NOT EXISTS roomserver_state_snapshots (
|
||||
-- Local numeric ID for the state.
|
||||
state_snapshot_nid bigint PRIMARY KEY DEFAULT nextval('roomserver_state_snapshot_nid_seq'),
|
||||
-- Local numeric ID of the room this state is for.
|
||||
-- Unused in normal operation, but useful for background work or ad-hoc debugging.
|
||||
room_nid bigint NOT NULL,
|
||||
-- List of state_block_nids, stored sorted by state_block_nid.
|
||||
state_block_nids bigint[] NOT NULL
|
||||
-- The state snapshot NID that identifies this snapshot.
|
||||
state_snapshot_nid bigint PRIMARY KEY DEFAULT nextval('roomserver_state_snapshot_nid_seq'),
|
||||
-- The hash of the state snapshot, which is used to enforce uniqueness. The hash is
|
||||
-- generated in Dendrite and passed through to the database, as a btree index over
|
||||
-- this column is cheap and fits within the maximum index size.
|
||||
state_snapshot_hash BYTEA UNIQUE,
|
||||
-- The room NID that the snapshot belongs to.
|
||||
room_nid bigint NOT NULL,
|
||||
-- The state blocks contained within this snapshot.
|
||||
state_block_nids bigint[] NOT NULL
|
||||
);
|
||||
`
|
||||
|
||||
// Insert a new state snapshot. If we conflict on the hash column then
|
||||
// we must perform an update so that the RETURNING statement returns the
|
||||
// ID of the row that we conflicted with, so that we can then refer to
|
||||
// the original snapshot.
|
||||
const insertStateSQL = "" +
|
||||
"INSERT INTO roomserver_state_snapshots (room_nid, state_block_nids)" +
|
||||
" VALUES ($1, $2)" +
|
||||
"INSERT INTO roomserver_state_snapshots (state_snapshot_hash, room_nid, state_block_nids)" +
|
||||
" VALUES ($1, $2, $3)" +
|
||||
" ON CONFLICT (state_snapshot_hash) DO UPDATE SET room_nid=$2" +
|
||||
// Performing an update, above, ensures that the RETURNING statement
|
||||
// below will always return a valid state snapshot ID
|
||||
" RETURNING state_snapshot_nid"
|
||||
|
||||
// Bulk state data NID lookup.
|
||||
|
|
@ -67,12 +78,13 @@ type stateSnapshotStatements struct {
|
|||
bulkSelectStateBlockNIDsStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewPostgresStateSnapshotTable(db *sql.DB) (tables.StateSnapshot, error) {
|
||||
s := &stateSnapshotStatements{}
|
||||
func createStateSnapshotTable(db *sql.DB) error {
|
||||
_, err := db.Exec(stateSnapshotSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func prepareStateSnapshotTable(db *sql.DB) (tables.StateSnapshot, error) {
|
||||
s := &stateSnapshotStatements{}
|
||||
|
||||
return s, shared.StatementList{
|
||||
{&s.insertStateStmt, insertStateSQL},
|
||||
|
|
@ -81,13 +93,15 @@ func NewPostgresStateSnapshotTable(db *sql.DB) (tables.StateSnapshot, error) {
|
|||
}
|
||||
|
||||
func (s *stateSnapshotStatements) InsertState(
|
||||
ctx context.Context, txn *sql.Tx, roomNID types.RoomNID, stateBlockNIDs []types.StateBlockNID,
|
||||
ctx context.Context, txn *sql.Tx, roomNID types.RoomNID, nids types.StateBlockNIDs,
|
||||
) (stateNID types.StateSnapshotNID, err error) {
|
||||
nids := make([]int64, len(stateBlockNIDs))
|
||||
for i := range stateBlockNIDs {
|
||||
nids[i] = int64(stateBlockNIDs[i])
|
||||
nids = nids[:util.SortAndUnique(nids)]
|
||||
var id int64
|
||||
err = sqlutil.TxStmt(txn, s.insertStateStmt).QueryRowContext(ctx, nids.Hash(), int64(roomNID), stateBlockNIDsAsArray(nids)).Scan(&id)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = sqlutil.TxStmt(txn, s.insertStateStmt).QueryRowContext(ctx, int64(roomNID), pq.Int64Array(nids)).Scan(&stateNID)
|
||||
stateNID = types.StateSnapshotNID(id)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ package postgres
|
|||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
// Import the postgres database driver.
|
||||
_ "github.com/lib/pq"
|
||||
|
|
@ -39,20 +40,25 @@ func Open(dbProperties *config.DatabaseOptions, cache caching.RoomServerCaches)
|
|||
var db *sql.DB
|
||||
var err error
|
||||
if db, err = sqlutil.Open(dbProperties); err != nil {
|
||||
return nil, fmt.Errorf("sqlutil.Open: %w", err)
|
||||
}
|
||||
|
||||
// Create the tables.
|
||||
if err := d.create(db); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create tables before executing migrations so we don't fail if the table is missing,
|
||||
// and THEN prepare statements so we don't fail due to referencing new columns
|
||||
ms := membershipStatements{}
|
||||
if err := ms.execSchema(db); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Then execute the migrations. By this point the tables are created with the latest
|
||||
// schemas.
|
||||
m := sqlutil.NewMigrations()
|
||||
deltas.LoadAddForgottenColumn(m)
|
||||
deltas.LoadStateBlocksRefactor(m)
|
||||
if err := m.RunDeltas(db, dbProperties); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Then prepare the statements. Now that the migrations have run, any columns referred
|
||||
// to in the database code should now exist.
|
||||
if err := d.prepare(db, cache); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -60,61 +66,107 @@ func Open(dbProperties *config.DatabaseOptions, cache caching.RoomServerCaches)
|
|||
return &d, nil
|
||||
}
|
||||
|
||||
// nolint: gocyclo
|
||||
func (d *Database) prepare(db *sql.DB, cache caching.RoomServerCaches) (err error) {
|
||||
eventStateKeys, err := NewPostgresEventStateKeysTable(db)
|
||||
func (d *Database) create(db *sql.DB) error {
|
||||
if err := createEventStateKeysTable(db); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createEventTypesTable(db); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createEventJSONTable(db); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createEventsTable(db); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createRoomsTable(db); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createTransactionsTable(db); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createStateBlockTable(db); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createStateSnapshotTable(db); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createPrevEventsTable(db); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createRoomAliasesTable(db); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createInvitesTable(db); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createMembershipTable(db); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createPublishedTable(db); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createRedactionsTable(db); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Database) prepare(db *sql.DB, cache caching.RoomServerCaches) error {
|
||||
eventStateKeys, err := prepareEventStateKeysTable(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
eventTypes, err := NewPostgresEventTypesTable(db)
|
||||
eventTypes, err := prepareEventTypesTable(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
eventJSON, err := NewPostgresEventJSONTable(db)
|
||||
eventJSON, err := prepareEventJSONTable(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
events, err := NewPostgresEventsTable(db)
|
||||
events, err := prepareEventsTable(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rooms, err := NewPostgresRoomsTable(db)
|
||||
rooms, err := prepareRoomsTable(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
transactions, err := NewPostgresTransactionsTable(db)
|
||||
transactions, err := prepareTransactionsTable(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stateBlock, err := NewPostgresStateBlockTable(db)
|
||||
stateBlock, err := prepareStateBlockTable(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stateSnapshot, err := NewPostgresStateSnapshotTable(db)
|
||||
stateSnapshot, err := prepareStateSnapshotTable(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
roomAliases, err := NewPostgresRoomAliasesTable(db)
|
||||
prevEvents, err := preparePrevEventsTable(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
prevEvents, err := NewPostgresPreviousEventsTable(db)
|
||||
roomAliases, err := prepareRoomAliasesTable(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
invites, err := NewPostgresInvitesTable(db)
|
||||
invites, err := prepareInvitesTable(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
membership, err := NewPostgresMembershipTable(db)
|
||||
membership, err := prepareMembershipTable(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
published, err := NewPostgresPublishedTable(db)
|
||||
published, err := preparePublishedTable(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
redactions, err := NewPostgresRedactionsTable(db)
|
||||
redactions, err := prepareRedactionsTable(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -54,12 +54,13 @@ type transactionStatements struct {
|
|||
selectTransactionEventIDStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewPostgresTransactionsTable(db *sql.DB) (tables.Transactions, error) {
|
||||
s := &transactionStatements{}
|
||||
func createTransactionsTable(db *sql.DB) error {
|
||||
_, err := db.Exec(transactionsSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func prepareTransactionsTable(db *sql.DB) (tables.Transactions, error) {
|
||||
s := &transactionStatements{}
|
||||
|
||||
return s, shared.StatementList{
|
||||
{&s.insertTransactionStmt, insertTransactionSQL},
|
||||
|
|
|
|||
|
|
@ -118,9 +118,24 @@ func (d *Database) StateEntriesForTuples(
|
|||
stateBlockNIDs []types.StateBlockNID,
|
||||
stateKeyTuples []types.StateKeyTuple,
|
||||
) ([]types.StateEntryList, error) {
|
||||
return d.StateBlockTable.BulkSelectFilteredStateBlockEntries(
|
||||
ctx, stateBlockNIDs, stateKeyTuples,
|
||||
entries, err := d.StateBlockTable.BulkSelectStateBlockEntries(
|
||||
ctx, stateBlockNIDs,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("d.StateBlockTable.BulkSelectStateBlockEntries: %w", err)
|
||||
}
|
||||
lists := []types.StateEntryList{}
|
||||
for i, entry := range entries {
|
||||
entries, err := d.EventsTable.BulkSelectStateEventByNID(ctx, entry, stateKeyTuples)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("d.EventsTable.BulkSelectStateEventByNID: %w", err)
|
||||
}
|
||||
lists = append(lists, types.StateEntryList{
|
||||
StateBlockNID: stateBlockNIDs[i],
|
||||
StateEntries: entries,
|
||||
})
|
||||
}
|
||||
return lists, nil
|
||||
}
|
||||
|
||||
func (d *Database) RoomInfo(ctx context.Context, roomID string) (*types.RoomInfo, error) {
|
||||
|
|
@ -141,8 +156,28 @@ func (d *Database) AddState(
|
|||
stateBlockNIDs []types.StateBlockNID,
|
||||
state []types.StateEntry,
|
||||
) (stateNID types.StateSnapshotNID, err error) {
|
||||
if len(stateBlockNIDs) > 0 {
|
||||
// Check to see if the event already appears in any of the existing state
|
||||
// blocks. If it does then we should not add it again, as this will just
|
||||
// result in excess state blocks and snapshots.
|
||||
// TODO: Investigate why this is happening - probably input_events.go!
|
||||
blocks, berr := d.StateBlockTable.BulkSelectStateBlockEntries(ctx, stateBlockNIDs)
|
||||
if berr != nil {
|
||||
return 0, fmt.Errorf("d.StateBlockTable.BulkSelectStateBlockEntries: %w", berr)
|
||||
}
|
||||
for i := len(state) - 1; i >= 0; i-- {
|
||||
for _, events := range blocks {
|
||||
for _, event := range events {
|
||||
if state[i].EventNID == event {
|
||||
state = append(state[:i], state[i+1:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
err = d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
||||
if len(state) > 0 {
|
||||
// If there's any state left to add then let's add new blocks.
|
||||
var stateBlockNID types.StateBlockNID
|
||||
stateBlockNID, err = d.StateBlockTable.BulkInsertStateData(ctx, txn, state)
|
||||
if err != nil {
|
||||
|
|
@ -237,7 +272,24 @@ func (d *Database) StateBlockNIDs(
|
|||
func (d *Database) StateEntries(
|
||||
ctx context.Context, stateBlockNIDs []types.StateBlockNID,
|
||||
) ([]types.StateEntryList, error) {
|
||||
return d.StateBlockTable.BulkSelectStateBlockEntries(ctx, stateBlockNIDs)
|
||||
entries, err := d.StateBlockTable.BulkSelectStateBlockEntries(
|
||||
ctx, stateBlockNIDs,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("d.StateBlockTable.BulkSelectStateBlockEntries: %w", err)
|
||||
}
|
||||
lists := make([]types.StateEntryList, 0, len(entries))
|
||||
for i, entry := range entries {
|
||||
eventNIDs, err := d.EventsTable.BulkSelectStateEventByNID(ctx, entry, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("d.EventsTable.BulkSelectStateEventByNID: %w", err)
|
||||
}
|
||||
lists = append(lists, types.StateEntryList{
|
||||
StateBlockNID: stateBlockNIDs[i],
|
||||
StateEntries: eventNIDs,
|
||||
})
|
||||
}
|
||||
return lists, nil
|
||||
}
|
||||
|
||||
func (d *Database) SetRoomAlias(ctx context.Context, alias string, roomID string, creatorUserID string) error {
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@ import (
|
|||
|
||||
func LoadFromGoose() {
|
||||
goose.AddMigration(UpAddForgottenColumn, DownAddForgottenColumn)
|
||||
goose.AddMigration(UpStateBlocksRefactor, DownStateBlocksRefactor)
|
||||
}
|
||||
|
||||
func LoadAddForgottenColumn(m *sqlutil.Migrations) {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,168 @@
|
|||
// Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package deltas
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
"github.com/matrix-org/dendrite/roomserver/types"
|
||||
"github.com/matrix-org/util"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func LoadStateBlocksRefactor(m *sqlutil.Migrations) {
|
||||
m.AddMigration(UpStateBlocksRefactor, DownStateBlocksRefactor)
|
||||
}
|
||||
|
||||
func UpStateBlocksRefactor(tx *sql.Tx) error {
|
||||
logrus.Warn("Performing state storage upgrade. Please wait, this may take some time!")
|
||||
defer logrus.Warn("State storage upgrade complete")
|
||||
|
||||
var maxsnapshotid int
|
||||
var maxblockid int
|
||||
if err := tx.QueryRow(`SELECT IFNULL(MAX(state_snapshot_nid),0) FROM roomserver_state_snapshots;`).Scan(&maxsnapshotid); err != nil {
|
||||
return fmt.Errorf("tx.QueryRow.Scan (count snapshots): %w", err)
|
||||
}
|
||||
if err := tx.QueryRow(`SELECT IFNULL(MAX(state_block_nid),0) FROM roomserver_state_block;`).Scan(&maxblockid); err != nil {
|
||||
return fmt.Errorf("tx.QueryRow.Scan (count snapshots): %w", err)
|
||||
}
|
||||
maxsnapshotid++
|
||||
maxblockid++
|
||||
|
||||
if _, err := tx.Exec(`ALTER TABLE roomserver_state_block RENAME TO _roomserver_state_block;`); err != nil {
|
||||
return fmt.Errorf("tx.Exec: %w", err)
|
||||
}
|
||||
if _, err := tx.Exec(`ALTER TABLE roomserver_state_snapshots RENAME TO _roomserver_state_snapshots;`); err != nil {
|
||||
return fmt.Errorf("tx.Exec: %w", err)
|
||||
}
|
||||
_, err := tx.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS roomserver_state_block (
|
||||
state_block_nid INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
state_block_hash BLOB UNIQUE,
|
||||
event_nids TEXT NOT NULL DEFAULT '[]'
|
||||
);
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("tx.Exec: %w", err)
|
||||
}
|
||||
_, err = tx.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS roomserver_state_snapshots (
|
||||
state_snapshot_nid INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
state_snapshot_hash BLOB UNIQUE,
|
||||
room_nid INTEGER NOT NULL,
|
||||
state_block_nids TEXT NOT NULL DEFAULT '[]'
|
||||
);
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("tx.Exec: %w", err)
|
||||
}
|
||||
snapshotrows, err := tx.Query(`SELECT state_snapshot_nid, room_nid, state_block_nids FROM _roomserver_state_snapshots;`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("tx.Query: %w", err)
|
||||
}
|
||||
defer internal.CloseAndLogIfError(context.TODO(), snapshotrows, "rows.close() failed")
|
||||
for snapshotrows.Next() {
|
||||
var snapshot types.StateSnapshotNID
|
||||
var room types.RoomNID
|
||||
var jsonblocks string
|
||||
var blocks []types.StateBlockNID
|
||||
if err = snapshotrows.Scan(&snapshot, &room, &jsonblocks); err != nil {
|
||||
return fmt.Errorf("rows.Scan: %w", err)
|
||||
}
|
||||
if err = json.Unmarshal([]byte(jsonblocks), &blocks); err != nil {
|
||||
return fmt.Errorf("json.Unmarshal: %w", err)
|
||||
}
|
||||
|
||||
var newblocks types.StateBlockNIDs
|
||||
for _, block := range blocks {
|
||||
if err = func() error {
|
||||
blockrows, berr := tx.Query(`SELECT event_nid FROM _roomserver_state_block WHERE state_block_nid = $1`, block)
|
||||
if berr != nil {
|
||||
return fmt.Errorf("tx.Query (event nids from old block): %w", berr)
|
||||
}
|
||||
defer internal.CloseAndLogIfError(context.TODO(), blockrows, "rows.close() failed")
|
||||
events := types.EventNIDs{}
|
||||
for blockrows.Next() {
|
||||
var event types.EventNID
|
||||
if err = blockrows.Scan(&event); err != nil {
|
||||
return fmt.Errorf("rows.Scan: %w", err)
|
||||
}
|
||||
events = append(events, event)
|
||||
}
|
||||
events = events[:util.SortAndUnique(events)]
|
||||
eventjson, eerr := json.Marshal(events)
|
||||
if eerr != nil {
|
||||
return fmt.Errorf("json.Marshal: %w", eerr)
|
||||
}
|
||||
|
||||
var blocknid types.StateBlockNID
|
||||
err = tx.QueryRow(`
|
||||
INSERT INTO roomserver_state_block (state_block_nid, state_block_hash, event_nids)
|
||||
VALUES ($1, $2, $3)
|
||||
ON CONFLICT (state_block_hash) DO UPDATE SET event_nids=$3
|
||||
RETURNING state_block_nid
|
||||
`, maxblockid, events.Hash(), eventjson).Scan(&blocknid)
|
||||
if err != nil {
|
||||
return fmt.Errorf("tx.QueryRow.Scan (insert new block): %w", err)
|
||||
}
|
||||
maxblockid++
|
||||
newblocks = append(newblocks, blocknid)
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newblocksjson, jerr := json.Marshal(newblocks)
|
||||
if jerr != nil {
|
||||
return fmt.Errorf("json.Marshal (new blocks): %w", jerr)
|
||||
}
|
||||
var newsnapshot types.StateSnapshotNID
|
||||
err = tx.QueryRow(`
|
||||
INSERT INTO roomserver_state_snapshots (state_snapshot_nid, state_snapshot_hash, room_nid, state_block_nids)
|
||||
VALUES ($1, $2, $3, $4)
|
||||
ON CONFLICT (state_snapshot_hash) DO UPDATE SET room_nid=$3
|
||||
RETURNING state_snapshot_nid
|
||||
`, maxsnapshotid, newblocks.Hash(), room, newblocksjson).Scan(&newsnapshot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("tx.QueryRow.Scan (insert new snapshot): %w", err)
|
||||
}
|
||||
maxsnapshotid++
|
||||
if _, err = tx.Exec(`UPDATE roomserver_events SET state_snapshot_nid=$1 WHERE state_snapshot_nid=$2 AND state_snapshot_nid<$3`, newsnapshot, snapshot, maxsnapshotid); err != nil {
|
||||
return fmt.Errorf("tx.Exec (update events): %w", err)
|
||||
}
|
||||
if _, err = tx.Exec(`UPDATE roomserver_rooms SET state_snapshot_nid=$1 WHERE state_snapshot_nid=$2 AND state_snapshot_nid<$3`, newsnapshot, snapshot, maxsnapshotid); err != nil {
|
||||
return fmt.Errorf("tx.Exec (update rooms): %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if _, err = tx.Exec(`DROP TABLE _roomserver_state_snapshots;`); err != nil {
|
||||
return fmt.Errorf("tx.Exec (delete old snapshot table): %w", err)
|
||||
}
|
||||
if _, err = tx.Exec(`DROP TABLE _roomserver_state_block;`); err != nil {
|
||||
return fmt.Errorf("tx.Exec (delete old block table): %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func DownStateBlocksRefactor(tx *sql.Tx) error {
|
||||
panic("Downgrading state storage is not supported")
|
||||
}
|
||||
|
|
@ -53,14 +53,16 @@ type eventJSONStatements struct {
|
|||
bulkSelectEventJSONStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewSqliteEventJSONTable(db *sql.DB) (tables.EventJSON, error) {
|
||||
func createEventJSONTable(db *sql.DB) error {
|
||||
_, err := db.Exec(eventJSONSchema)
|
||||
return err
|
||||
}
|
||||
|
||||
func prepareEventJSONTable(db *sql.DB) (tables.EventJSON, error) {
|
||||
s := &eventJSONStatements{
|
||||
db: db,
|
||||
}
|
||||
_, err := db.Exec(eventJSONSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s, shared.StatementList{
|
||||
{&s.insertEventJSONStmt, insertEventJSONSQL},
|
||||
{&s.bulkSelectEventJSONStmt, bulkSelectEventJSONSQL},
|
||||
|
|
|
|||
|
|
@ -70,14 +70,16 @@ type eventStateKeyStatements struct {
|
|||
bulkSelectEventStateKeyStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewSqliteEventStateKeysTable(db *sql.DB) (tables.EventStateKeys, error) {
|
||||
func createEventStateKeysTable(db *sql.DB) error {
|
||||
_, err := db.Exec(eventStateKeysSchema)
|
||||
return err
|
||||
}
|
||||
|
||||
func prepareEventStateKeysTable(db *sql.DB) (tables.EventStateKeys, error) {
|
||||
s := &eventStateKeyStatements{
|
||||
db: db,
|
||||
}
|
||||
_, err := db.Exec(eventStateKeysSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s, shared.StatementList{
|
||||
{&s.insertEventStateKeyNIDStmt, insertEventStateKeyNIDSQL},
|
||||
{&s.selectEventStateKeyNIDStmt, selectEventStateKeyNIDSQL},
|
||||
|
|
|
|||
|
|
@ -85,14 +85,15 @@ type eventTypeStatements struct {
|
|||
bulkSelectEventTypeNIDStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewSqliteEventTypesTable(db *sql.DB) (tables.EventTypes, error) {
|
||||
func createEventTypesTable(db *sql.DB) error {
|
||||
_, err := db.Exec(eventTypesSchema)
|
||||
return err
|
||||
}
|
||||
|
||||
func prepareEventTypesTable(db *sql.DB) (tables.EventTypes, error) {
|
||||
s := &eventTypeStatements{
|
||||
db: db,
|
||||
}
|
||||
_, err := db.Exec(eventTypesSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s, shared.StatementList{
|
||||
{&s.insertEventTypeNIDStmt, insertEventTypeNIDSQL},
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ import (
|
|||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
|
|
@ -63,6 +64,11 @@ const bulkSelectStateEventByIDSQL = "" +
|
|||
" WHERE event_id IN ($1)" +
|
||||
" ORDER BY event_type_nid, event_state_key_nid ASC"
|
||||
|
||||
const bulkSelectStateEventByNIDSQL = "" +
|
||||
"SELECT event_type_nid, event_state_key_nid, event_nid FROM roomserver_events" +
|
||||
" WHERE event_nid IN ($1)"
|
||||
// Rest of query is built by BulkSelectStateEventByNID
|
||||
|
||||
const bulkSelectStateAtEventByIDSQL = "" +
|
||||
"SELECT event_type_nid, event_state_key_nid, event_nid, state_snapshot_nid, is_rejected FROM roomserver_events" +
|
||||
" WHERE event_id IN ($1)"
|
||||
|
|
@ -115,14 +121,15 @@ type eventStatements struct {
|
|||
//selectRoomNIDsForEventNIDsStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewSqliteEventsTable(db *sql.DB) (tables.Events, error) {
|
||||
func createEventsTable(db *sql.DB) error {
|
||||
_, err := db.Exec(eventsSchema)
|
||||
return err
|
||||
}
|
||||
|
||||
func prepareEventsTable(db *sql.DB) (tables.Events, error) {
|
||||
s := &eventStatements{
|
||||
db: db,
|
||||
}
|
||||
_, err := db.Exec(eventsSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s, shared.StatementList{
|
||||
{&s.insertEventStmt, insertEventSQL},
|
||||
|
|
@ -232,6 +239,61 @@ func (s *eventStatements) BulkSelectStateEventByID(
|
|||
return results, err
|
||||
}
|
||||
|
||||
// bulkSelectStateEventByID lookups a list of state events by event ID.
|
||||
// If any of the requested events are missing from the database it returns a types.MissingEventError
|
||||
func (s *eventStatements) BulkSelectStateEventByNID(
|
||||
ctx context.Context, eventNIDs []types.EventNID,
|
||||
stateKeyTuples []types.StateKeyTuple,
|
||||
) ([]types.StateEntry, error) {
|
||||
tuples := stateKeyTupleSorter(stateKeyTuples)
|
||||
sort.Sort(tuples)
|
||||
eventTypeNIDArray, eventStateKeyNIDArray := tuples.typesAndStateKeysAsArrays()
|
||||
params := make([]interface{}, 0, len(eventNIDs)+len(eventTypeNIDArray)+len(eventStateKeyNIDArray))
|
||||
selectOrig := strings.Replace(bulkSelectStateEventByNIDSQL, "($1)", sqlutil.QueryVariadic(len(eventNIDs)), 1)
|
||||
for _, v := range eventNIDs {
|
||||
params = append(params, v)
|
||||
}
|
||||
if len(eventTypeNIDArray) > 0 {
|
||||
selectOrig += " AND event_type_nid IN " + sqlutil.QueryVariadicOffset(len(eventTypeNIDArray), len(params))
|
||||
for _, v := range eventTypeNIDArray {
|
||||
params = append(params, v)
|
||||
}
|
||||
}
|
||||
if len(eventStateKeyNIDArray) > 0 {
|
||||
selectOrig += " AND event_state_key_nid IN " + sqlutil.QueryVariadicOffset(len(eventStateKeyNIDArray), len(params))
|
||||
for _, v := range eventStateKeyNIDArray {
|
||||
params = append(params, v)
|
||||
}
|
||||
}
|
||||
selectOrig += " ORDER BY event_type_nid, event_state_key_nid ASC"
|
||||
selectStmt, err := s.db.Prepare(selectOrig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("s.db.Prepare: %w", err)
|
||||
}
|
||||
rows, err := selectStmt.QueryContext(ctx, params...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("selectStmt.QueryContext: %w", err)
|
||||
}
|
||||
defer internal.CloseAndLogIfError(ctx, rows, "bulkSelectStateEventByID: rows.close() failed")
|
||||
// We know that we will only get as many results as event IDs
|
||||
// because of the unique constraint on event IDs.
|
||||
// So we can allocate an array of the correct size now.
|
||||
// We might get fewer results than IDs so we adjust the length of the slice before returning it.
|
||||
results := make([]types.StateEntry, len(eventNIDs))
|
||||
i := 0
|
||||
for ; rows.Next(); i++ {
|
||||
result := &results[i]
|
||||
if err = rows.Scan(
|
||||
&result.EventTypeNID,
|
||||
&result.EventStateKeyNID,
|
||||
&result.EventNID,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return results[:i], err
|
||||
}
|
||||
|
||||
// bulkSelectStateAtEventByID lookups the state at a list of events by event ID.
|
||||
// If any of the requested events are missing from the database it returns a types.MissingEventError.
|
||||
// If we do not have the state for any of the requested events it returns a types.MissingEventError.
|
||||
|
|
|
|||
|
|
@ -70,14 +70,15 @@ type inviteStatements struct {
|
|||
selectInvitesAboutToRetireStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewSqliteInvitesTable(db *sql.DB) (tables.Invites, error) {
|
||||
func createInvitesTable(db *sql.DB) error {
|
||||
_, err := db.Exec(inviteSchema)
|
||||
return err
|
||||
}
|
||||
|
||||
func prepareInvitesTable(db *sql.DB) (tables.Invites, error) {
|
||||
s := &inviteStatements{
|
||||
db: db,
|
||||
}
|
||||
_, err := db.Exec(inviteSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s, shared.StatementList{
|
||||
{&s.insertInviteEventStmt, insertInviteEventSQL},
|
||||
|
|
|
|||
|
|
@ -115,7 +115,12 @@ type membershipStatements struct {
|
|||
updateMembershipForgetRoomStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewSqliteMembershipTable(db *sql.DB) (tables.Membership, error) {
|
||||
func createMembershipTable(db *sql.DB) error {
|
||||
_, err := db.Exec(membershipSchema)
|
||||
return err
|
||||
}
|
||||
|
||||
func prepareMembershipTable(db *sql.DB) (tables.Membership, error) {
|
||||
s := &membershipStatements{
|
||||
db: db,
|
||||
}
|
||||
|
|
@ -135,11 +140,6 @@ func NewSqliteMembershipTable(db *sql.DB) (tables.Membership, error) {
|
|||
}.Prepare(db)
|
||||
}
|
||||
|
||||
func (s *membershipStatements) execSchema(db *sql.DB) error {
|
||||
_, err := db.Exec(membershipSchema)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *membershipStatements) InsertMembership(
|
||||
ctx context.Context, txn *sql.Tx,
|
||||
roomNID types.RoomNID, targetUserNID types.EventStateKeyNID,
|
||||
|
|
|
|||
|
|
@ -71,14 +71,15 @@ type previousEventStatements struct {
|
|||
selectPreviousEventExistsStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewSqlitePrevEventsTable(db *sql.DB) (tables.PreviousEvents, error) {
|
||||
func createPrevEventsTable(db *sql.DB) error {
|
||||
_, err := db.Exec(previousEventSchema)
|
||||
return err
|
||||
}
|
||||
|
||||
func preparePrevEventsTable(db *sql.DB) (tables.PreviousEvents, error) {
|
||||
s := &previousEventStatements{
|
||||
db: db,
|
||||
}
|
||||
_, err := db.Exec(previousEventSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s, shared.StatementList{
|
||||
{&s.insertPreviousEventStmt, insertPreviousEventSQL},
|
||||
|
|
|
|||
|
|
@ -50,14 +50,16 @@ type publishedStatements struct {
|
|||
selectPublishedStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewSqlitePublishedTable(db *sql.DB) (tables.Published, error) {
|
||||
func createPublishedTable(db *sql.DB) error {
|
||||
_, err := db.Exec(publishedSchema)
|
||||
return err
|
||||
}
|
||||
|
||||
func preparePublishedTable(db *sql.DB) (tables.Published, error) {
|
||||
s := &publishedStatements{
|
||||
db: db,
|
||||
}
|
||||
_, err := db.Exec(publishedSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s, shared.StatementList{
|
||||
{&s.upsertPublishedStmt, upsertPublishedSQL},
|
||||
{&s.selectAllPublishedStmt, selectAllPublishedSQL},
|
||||
|
|
|
|||
|
|
@ -59,14 +59,15 @@ type redactionStatements struct {
|
|||
markRedactionValidatedStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewSqliteRedactionsTable(db *sql.DB) (tables.Redactions, error) {
|
||||
func createRedactionsTable(db *sql.DB) error {
|
||||
_, err := db.Exec(redactionsSchema)
|
||||
return err
|
||||
}
|
||||
|
||||
func prepareRedactionsTable(db *sql.DB) (tables.Redactions, error) {
|
||||
s := &redactionStatements{
|
||||
db: db,
|
||||
}
|
||||
_, err := db.Exec(redactionsSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s, shared.StatementList{
|
||||
{&s.insertRedactionStmt, insertRedactionSQL},
|
||||
|
|
|
|||
|
|
@ -64,14 +64,16 @@ type roomAliasesStatements struct {
|
|||
deleteRoomAliasStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewSqliteRoomAliasesTable(db *sql.DB) (tables.RoomAliases, error) {
|
||||
func createRoomAliasesTable(db *sql.DB) error {
|
||||
_, err := db.Exec(roomAliasesSchema)
|
||||
return err
|
||||
}
|
||||
|
||||
func prepareRoomAliasesTable(db *sql.DB) (tables.RoomAliases, error) {
|
||||
s := &roomAliasesStatements{
|
||||
db: db,
|
||||
}
|
||||
_, err := db.Exec(roomAliasesSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s, shared.StatementList{
|
||||
{&s.insertRoomAliasStmt, insertRoomAliasSQL},
|
||||
{&s.selectRoomIDFromAliasStmt, selectRoomIDFromAliasSQL},
|
||||
|
|
|
|||
|
|
@ -86,14 +86,16 @@ type roomStatements struct {
|
|||
selectRoomIDsStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewSqliteRoomsTable(db *sql.DB) (tables.Rooms, error) {
|
||||
func createRoomsTable(db *sql.DB) error {
|
||||
_, err := db.Exec(roomsSchema)
|
||||
return err
|
||||
}
|
||||
|
||||
func prepareRoomsTable(db *sql.DB) (tables.Rooms, error) {
|
||||
s := &roomStatements{
|
||||
db: db,
|
||||
}
|
||||
_, err := db.Exec(roomsSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s, shared.StatementList{
|
||||
{&s.insertRoomNIDStmt, insertRoomNIDSQL},
|
||||
{&s.selectRoomNIDStmt, selectRoomNIDSQL},
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ package sqlite3
|
|||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
|
@ -32,228 +33,113 @@ import (
|
|||
|
||||
const stateDataSchema = `
|
||||
CREATE TABLE IF NOT EXISTS roomserver_state_block (
|
||||
state_block_nid INTEGER NOT NULL,
|
||||
event_type_nid INTEGER NOT NULL,
|
||||
event_state_key_nid INTEGER NOT NULL,
|
||||
event_nid INTEGER NOT NULL,
|
||||
UNIQUE (state_block_nid, event_type_nid, event_state_key_nid)
|
||||
-- The state snapshot NID that identifies this snapshot.
|
||||
state_block_nid INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
-- The hash of the state block, which is used to enforce uniqueness. The hash is
|
||||
-- generated in Dendrite and passed through to the database, as a btree index over
|
||||
-- this column is cheap and fits within the maximum index size.
|
||||
state_block_hash BLOB UNIQUE,
|
||||
-- The event NIDs contained within the state block, encoded as JSON.
|
||||
event_nids TEXT NOT NULL DEFAULT '[]'
|
||||
);
|
||||
`
|
||||
|
||||
const insertStateDataSQL = "" +
|
||||
"INSERT INTO roomserver_state_block (state_block_nid, event_type_nid, event_state_key_nid, event_nid)" +
|
||||
" VALUES ($1, $2, $3, $4)"
|
||||
|
||||
const selectNextStateBlockNIDSQL = `
|
||||
SELECT IFNULL(MAX(state_block_nid), 0) + 1 FROM roomserver_state_block
|
||||
// Insert a new state block. If we conflict on the hash column then
|
||||
// we must perform an update so that the RETURNING statement returns the
|
||||
// ID of the row that we conflicted with, so that we can then refer to
|
||||
// the original block.
|
||||
const insertStateDataSQL = `
|
||||
INSERT INTO roomserver_state_block (state_block_hash, event_nids)
|
||||
VALUES ($1, $2)
|
||||
ON CONFLICT (state_block_hash) DO UPDATE SET event_nids=$2
|
||||
RETURNING state_block_nid
|
||||
`
|
||||
|
||||
// Bulk state lookup by numeric state block ID.
|
||||
// Sort by the state_block_nid, event_type_nid, event_state_key_nid
|
||||
// This means that all the entries for a given state_block_nid will appear
|
||||
// together in the list and those entries will sorted by event_type_nid
|
||||
// and event_state_key_nid. This property makes it easier to merge two
|
||||
// state data blocks together.
|
||||
const bulkSelectStateBlockEntriesSQL = "" +
|
||||
"SELECT state_block_nid, event_type_nid, event_state_key_nid, event_nid" +
|
||||
" FROM roomserver_state_block WHERE state_block_nid IN ($1)" +
|
||||
" ORDER BY state_block_nid, event_type_nid, event_state_key_nid"
|
||||
|
||||
// Bulk state lookup by numeric state block ID.
|
||||
// Filters the rows in each block to the requested types and state keys.
|
||||
// We would like to restrict to particular type state key pairs but we are
|
||||
// restricted by the query language to pull the cross product of a list
|
||||
// of types and a list state_keys. So we have to filter the result in the
|
||||
// application to restrict it to the list of event types and state keys we
|
||||
// actually wanted.
|
||||
const bulkSelectFilteredStateBlockEntriesSQL = "" +
|
||||
"SELECT state_block_nid, event_type_nid, event_state_key_nid, event_nid" +
|
||||
" FROM roomserver_state_block WHERE state_block_nid IN ($1)" +
|
||||
" AND event_type_nid IN ($2) AND event_state_key_nid IN ($3)" +
|
||||
" ORDER BY state_block_nid, event_type_nid, event_state_key_nid"
|
||||
"SELECT state_block_nid, event_nids" +
|
||||
" FROM roomserver_state_block WHERE state_block_nid IN ($1)"
|
||||
|
||||
type stateBlockStatements struct {
|
||||
db *sql.DB
|
||||
insertStateDataStmt *sql.Stmt
|
||||
selectNextStateBlockNIDStmt *sql.Stmt
|
||||
bulkSelectStateBlockEntriesStmt *sql.Stmt
|
||||
bulkSelectFilteredStateBlockEntriesStmt *sql.Stmt
|
||||
db *sql.DB
|
||||
insertStateDataStmt *sql.Stmt
|
||||
bulkSelectStateBlockEntriesStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewSqliteStateBlockTable(db *sql.DB) (tables.StateBlock, error) {
|
||||
func createStateBlockTable(db *sql.DB) error {
|
||||
_, err := db.Exec(stateDataSchema)
|
||||
return err
|
||||
}
|
||||
|
||||
func prepareStateBlockTable(db *sql.DB) (tables.StateBlock, error) {
|
||||
s := &stateBlockStatements{
|
||||
db: db,
|
||||
}
|
||||
_, err := db.Exec(stateDataSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s, shared.StatementList{
|
||||
{&s.insertStateDataStmt, insertStateDataSQL},
|
||||
{&s.selectNextStateBlockNIDStmt, selectNextStateBlockNIDSQL},
|
||||
{&s.bulkSelectStateBlockEntriesStmt, bulkSelectStateBlockEntriesSQL},
|
||||
{&s.bulkSelectFilteredStateBlockEntriesStmt, bulkSelectFilteredStateBlockEntriesSQL},
|
||||
}.Prepare(db)
|
||||
}
|
||||
|
||||
func (s *stateBlockStatements) BulkInsertStateData(
|
||||
ctx context.Context, txn *sql.Tx,
|
||||
entries []types.StateEntry,
|
||||
) (types.StateBlockNID, error) {
|
||||
if len(entries) == 0 {
|
||||
return 0, nil
|
||||
ctx context.Context,
|
||||
txn *sql.Tx,
|
||||
entries types.StateEntries,
|
||||
) (id types.StateBlockNID, err error) {
|
||||
entries = entries[:util.SortAndUnique(entries)]
|
||||
var nids types.EventNIDs
|
||||
for _, e := range entries {
|
||||
nids = append(nids, e.EventNID)
|
||||
}
|
||||
var stateBlockNID types.StateBlockNID
|
||||
err := sqlutil.TxStmt(txn, s.selectNextStateBlockNIDStmt).QueryRowContext(ctx).Scan(&stateBlockNID)
|
||||
js, err := json.Marshal(nids)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return 0, fmt.Errorf("json.Marshal: %w", err)
|
||||
}
|
||||
for _, entry := range entries {
|
||||
_, err = sqlutil.TxStmt(txn, s.insertStateDataStmt).ExecContext(
|
||||
ctx,
|
||||
int64(stateBlockNID),
|
||||
int64(entry.EventTypeNID),
|
||||
int64(entry.EventStateKeyNID),
|
||||
int64(entry.EventNID),
|
||||
)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return stateBlockNID, err
|
||||
err = s.insertStateDataStmt.QueryRowContext(
|
||||
ctx, nids.Hash(), js,
|
||||
).Scan(&id)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *stateBlockStatements) BulkSelectStateBlockEntries(
|
||||
ctx context.Context, stateBlockNIDs []types.StateBlockNID,
|
||||
) ([]types.StateEntryList, error) {
|
||||
nids := make([]interface{}, len(stateBlockNIDs))
|
||||
for k, v := range stateBlockNIDs {
|
||||
nids[k] = v
|
||||
ctx context.Context, stateBlockNIDs types.StateBlockNIDs,
|
||||
) ([][]types.EventNID, error) {
|
||||
intfs := make([]interface{}, len(stateBlockNIDs))
|
||||
for i := range stateBlockNIDs {
|
||||
intfs[i] = int64(stateBlockNIDs[i])
|
||||
}
|
||||
selectOrig := strings.Replace(bulkSelectStateBlockEntriesSQL, "($1)", sqlutil.QueryVariadic(len(nids)), 1)
|
||||
selectOrig := strings.Replace(bulkSelectStateBlockEntriesSQL, "($1)", sqlutil.QueryVariadic(len(intfs)), 1)
|
||||
selectStmt, err := s.db.Prepare(selectOrig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rows, err := selectStmt.QueryContext(ctx, nids...)
|
||||
rows, err := selectStmt.QueryContext(ctx, intfs...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer internal.CloseAndLogIfError(ctx, rows, "bulkSelectStateBlockEntries: rows.close() failed")
|
||||
|
||||
results := make([]types.StateEntryList, len(stateBlockNIDs))
|
||||
// current is a pointer to the StateEntryList to append the state entries to.
|
||||
var current *types.StateEntryList
|
||||
results := make([][]types.EventNID, len(stateBlockNIDs))
|
||||
i := 0
|
||||
for rows.Next() {
|
||||
var (
|
||||
stateBlockNID int64
|
||||
eventTypeNID int64
|
||||
eventStateKeyNID int64
|
||||
eventNID int64
|
||||
entry types.StateEntry
|
||||
)
|
||||
if err := rows.Scan(
|
||||
&stateBlockNID, &eventTypeNID, &eventStateKeyNID, &eventNID,
|
||||
); err != nil {
|
||||
for ; rows.Next(); i++ {
|
||||
var stateBlockNID types.StateBlockNID
|
||||
var result json.RawMessage
|
||||
if err = rows.Scan(&stateBlockNID, &result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entry.EventTypeNID = types.EventTypeNID(eventTypeNID)
|
||||
entry.EventStateKeyNID = types.EventStateKeyNID(eventStateKeyNID)
|
||||
entry.EventNID = types.EventNID(eventNID)
|
||||
if current == nil || types.StateBlockNID(stateBlockNID) != current.StateBlockNID {
|
||||
// The state entry row is for a different state data block to the current one.
|
||||
// So we start appending to the next entry in the list.
|
||||
current = &results[i]
|
||||
current.StateBlockNID = types.StateBlockNID(stateBlockNID)
|
||||
i++
|
||||
r := []types.EventNID{}
|
||||
if err = json.Unmarshal(result, &r); err != nil {
|
||||
return nil, fmt.Errorf("json.Unmarshal: %w", err)
|
||||
}
|
||||
current.StateEntries = append(current.StateEntries, entry)
|
||||
results[i] = r
|
||||
}
|
||||
if i != len(nids) {
|
||||
return nil, fmt.Errorf("storage: state data NIDs missing from the database (%d != %d)", i, len(nids))
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func (s *stateBlockStatements) BulkSelectFilteredStateBlockEntries(
|
||||
ctx context.Context,
|
||||
stateBlockNIDs []types.StateBlockNID,
|
||||
stateKeyTuples []types.StateKeyTuple,
|
||||
) ([]types.StateEntryList, error) {
|
||||
tuples := stateKeyTupleSorter(stateKeyTuples)
|
||||
// Sort the tuples so that we can run binary search against them as we filter the rows returned by the db.
|
||||
sort.Sort(tuples)
|
||||
|
||||
eventTypeNIDArray, eventStateKeyNIDArray := tuples.typesAndStateKeysAsArrays()
|
||||
sqlStatement := strings.Replace(bulkSelectFilteredStateBlockEntriesSQL, "($1)", sqlutil.QueryVariadic(len(stateBlockNIDs)), 1)
|
||||
sqlStatement = strings.Replace(sqlStatement, "($2)", sqlutil.QueryVariadicOffset(len(eventTypeNIDArray), len(stateBlockNIDs)), 1)
|
||||
sqlStatement = strings.Replace(sqlStatement, "($3)", sqlutil.QueryVariadicOffset(len(eventStateKeyNIDArray), len(stateBlockNIDs)+len(eventTypeNIDArray)), 1)
|
||||
|
||||
var params []interface{}
|
||||
for _, val := range stateBlockNIDs {
|
||||
params = append(params, int64(val))
|
||||
}
|
||||
for _, val := range eventTypeNIDArray {
|
||||
params = append(params, val)
|
||||
}
|
||||
for _, val := range eventStateKeyNIDArray {
|
||||
params = append(params, val)
|
||||
}
|
||||
|
||||
rows, err := s.db.QueryContext(
|
||||
ctx,
|
||||
sqlStatement,
|
||||
params...,
|
||||
)
|
||||
if err != nil {
|
||||
if err = rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer internal.CloseAndLogIfError(ctx, rows, "bulkSelectFilteredStateBlockEntries: rows.close() failed")
|
||||
|
||||
var results []types.StateEntryList
|
||||
var current types.StateEntryList
|
||||
for rows.Next() {
|
||||
var (
|
||||
stateBlockNID int64
|
||||
eventTypeNID int64
|
||||
eventStateKeyNID int64
|
||||
eventNID int64
|
||||
entry types.StateEntry
|
||||
)
|
||||
if err := rows.Scan(
|
||||
&stateBlockNID, &eventTypeNID, &eventStateKeyNID, &eventNID,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entry.EventTypeNID = types.EventTypeNID(eventTypeNID)
|
||||
entry.EventStateKeyNID = types.EventStateKeyNID(eventStateKeyNID)
|
||||
entry.EventNID = types.EventNID(eventNID)
|
||||
|
||||
// We can use binary search here because we sorted the tuples earlier
|
||||
if !tuples.contains(entry.StateKeyTuple) {
|
||||
// The select will return the cross product of types and state keys.
|
||||
// So we need to check if type of the entry is in the list.
|
||||
continue
|
||||
}
|
||||
|
||||
if types.StateBlockNID(stateBlockNID) != current.StateBlockNID {
|
||||
// The state entry row is for a different state data block to the current one.
|
||||
// So we append the current entry to the results and start adding to a new one.
|
||||
// The first time through the loop current will be empty.
|
||||
if current.StateEntries != nil {
|
||||
results = append(results, current)
|
||||
}
|
||||
current = types.StateEntryList{StateBlockNID: types.StateBlockNID(stateBlockNID)}
|
||||
}
|
||||
current.StateEntries = append(current.StateEntries, entry)
|
||||
if i != len(stateBlockNIDs) {
|
||||
return nil, fmt.Errorf("storage: state data NIDs missing from the database (%d != %d)", len(results), len(stateBlockNIDs))
|
||||
}
|
||||
// Add the last entry to the list if it is not empty.
|
||||
if current.StateEntries != nil {
|
||||
results = append(results, current)
|
||||
}
|
||||
return results, nil
|
||||
return results, err
|
||||
}
|
||||
|
||||
type stateKeyTupleSorter []types.StateKeyTuple
|
||||
|
|
|
|||
|
|
@ -27,19 +27,34 @@ import (
|
|||
"github.com/matrix-org/dendrite/roomserver/storage/shared"
|
||||
"github.com/matrix-org/dendrite/roomserver/storage/tables"
|
||||
"github.com/matrix-org/dendrite/roomserver/types"
|
||||
"github.com/matrix-org/util"
|
||||
)
|
||||
|
||||
const stateSnapshotSchema = `
|
||||
CREATE TABLE IF NOT EXISTS roomserver_state_snapshots (
|
||||
-- The state snapshot NID that identifies this snapshot.
|
||||
state_snapshot_nid INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
-- The hash of the state snapshot, which is used to enforce uniqueness. The hash is
|
||||
-- generated in Dendrite and passed through to the database, as a btree index over
|
||||
-- this column is cheap and fits within the maximum index size.
|
||||
state_snapshot_hash BLOB UNIQUE,
|
||||
-- The room NID that the snapshot belongs to.
|
||||
room_nid INTEGER NOT NULL,
|
||||
-- The state blocks contained within this snapshot, encoded as JSON.
|
||||
state_block_nids TEXT NOT NULL DEFAULT '[]'
|
||||
);
|
||||
`
|
||||
|
||||
// Insert a new state snapshot. If we conflict on the hash column then
|
||||
// we must perform an update so that the RETURNING statement returns the
|
||||
// ID of the row that we conflicted with, so that we can then refer to
|
||||
// the original snapshot.
|
||||
const insertStateSQL = `
|
||||
INSERT INTO roomserver_state_snapshots (room_nid, state_block_nids)
|
||||
VALUES ($1, $2);`
|
||||
INSERT INTO roomserver_state_snapshots (state_snapshot_hash, room_nid, state_block_nids)
|
||||
VALUES ($1, $2, $3)
|
||||
ON CONFLICT (state_snapshot_hash) DO UPDATE SET room_nid=$2
|
||||
RETURNING state_snapshot_nid
|
||||
`
|
||||
|
||||
// Bulk state data NID lookup.
|
||||
// Sorting by state_snapshot_nid means we can use binary search over the result
|
||||
|
|
@ -54,14 +69,15 @@ type stateSnapshotStatements struct {
|
|||
bulkSelectStateBlockNIDsStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewSqliteStateSnapshotTable(db *sql.DB) (tables.StateSnapshot, error) {
|
||||
func createStateSnapshotTable(db *sql.DB) error {
|
||||
_, err := db.Exec(stateSnapshotSchema)
|
||||
return err
|
||||
}
|
||||
|
||||
func prepareStateSnapshotTable(db *sql.DB) (tables.StateSnapshot, error) {
|
||||
s := &stateSnapshotStatements{
|
||||
db: db,
|
||||
}
|
||||
_, err := db.Exec(stateSnapshotSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s, shared.StatementList{
|
||||
{&s.insertStateStmt, insertStateSQL},
|
||||
|
|
@ -70,22 +86,20 @@ func NewSqliteStateSnapshotTable(db *sql.DB) (tables.StateSnapshot, error) {
|
|||
}
|
||||
|
||||
func (s *stateSnapshotStatements) InsertState(
|
||||
ctx context.Context, txn *sql.Tx, roomNID types.RoomNID, stateBlockNIDs []types.StateBlockNID,
|
||||
ctx context.Context, txn *sql.Tx, roomNID types.RoomNID, stateBlockNIDs types.StateBlockNIDs,
|
||||
) (stateNID types.StateSnapshotNID, err error) {
|
||||
stateBlockNIDs = stateBlockNIDs[:util.SortAndUnique(stateBlockNIDs)]
|
||||
stateBlockNIDsJSON, err := json.Marshal(stateBlockNIDs)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
insertStmt := sqlutil.TxStmt(txn, s.insertStateStmt)
|
||||
res, err := insertStmt.ExecContext(ctx, int64(roomNID), string(stateBlockNIDsJSON))
|
||||
var id int64
|
||||
err = insertStmt.QueryRowContext(ctx, stateBlockNIDs.Hash(), int64(roomNID), string(stateBlockNIDsJSON)).Scan(&id)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
lastRowID, err := res.LastInsertId()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
stateNID = types.StateSnapshotNID(lastRowID)
|
||||
stateNID = types.StateSnapshotNID(id)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -53,17 +53,22 @@ func Open(dbProperties *config.DatabaseOptions, cache caching.RoomServerCaches)
|
|||
// which it will never obtain.
|
||||
db.SetMaxOpenConns(20)
|
||||
|
||||
// Create tables before executing migrations so we don't fail if the table is missing,
|
||||
// and THEN prepare statements so we don't fail due to referencing new columns
|
||||
ms := membershipStatements{}
|
||||
if err := ms.execSchema(db); err != nil {
|
||||
// Create the tables.
|
||||
if err := d.create(db); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Then execute the migrations. By this point the tables are created with the latest
|
||||
// schemas.
|
||||
m := sqlutil.NewMigrations()
|
||||
deltas.LoadAddForgottenColumn(m)
|
||||
deltas.LoadStateBlocksRefactor(m)
|
||||
if err := m.RunDeltas(db, dbProperties); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Then prepare the statements. Now that the migrations have run, any columns referred
|
||||
// to in the database code should now exist.
|
||||
if err := d.prepare(db, cache); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -71,62 +76,107 @@ func Open(dbProperties *config.DatabaseOptions, cache caching.RoomServerCaches)
|
|||
return &d, nil
|
||||
}
|
||||
|
||||
// nolint: gocyclo
|
||||
func (d *Database) create(db *sql.DB) error {
|
||||
if err := createEventStateKeysTable(db); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createEventTypesTable(db); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createEventJSONTable(db); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createEventsTable(db); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createRoomsTable(db); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createTransactionsTable(db); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createStateBlockTable(db); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createStateSnapshotTable(db); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createPrevEventsTable(db); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createRoomAliasesTable(db); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createInvitesTable(db); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createMembershipTable(db); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createPublishedTable(db); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createRedactionsTable(db); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Database) prepare(db *sql.DB, cache caching.RoomServerCaches) error {
|
||||
var err error
|
||||
eventStateKeys, err := NewSqliteEventStateKeysTable(db)
|
||||
eventStateKeys, err := prepareEventStateKeysTable(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
eventTypes, err := NewSqliteEventTypesTable(db)
|
||||
eventTypes, err := prepareEventTypesTable(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
eventJSON, err := NewSqliteEventJSONTable(db)
|
||||
eventJSON, err := prepareEventJSONTable(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
events, err := NewSqliteEventsTable(db)
|
||||
events, err := prepareEventsTable(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rooms, err := NewSqliteRoomsTable(db)
|
||||
rooms, err := prepareRoomsTable(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
transactions, err := NewSqliteTransactionsTable(db)
|
||||
transactions, err := prepareTransactionsTable(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stateBlock, err := NewSqliteStateBlockTable(db)
|
||||
stateBlock, err := prepareStateBlockTable(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stateSnapshot, err := NewSqliteStateSnapshotTable(db)
|
||||
stateSnapshot, err := prepareStateSnapshotTable(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
prevEvents, err := NewSqlitePrevEventsTable(db)
|
||||
prevEvents, err := preparePrevEventsTable(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
roomAliases, err := NewSqliteRoomAliasesTable(db)
|
||||
roomAliases, err := prepareRoomAliasesTable(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
invites, err := NewSqliteInvitesTable(db)
|
||||
invites, err := prepareInvitesTable(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
membership, err := NewSqliteMembershipTable(db)
|
||||
membership, err := prepareMembershipTable(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
published, err := NewSqlitePublishedTable(db)
|
||||
published, err := preparePublishedTable(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
redactions, err := NewSqliteRedactionsTable(db)
|
||||
redactions, err := prepareRedactionsTable(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -49,14 +49,15 @@ type transactionStatements struct {
|
|||
selectTransactionEventIDStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewSqliteTransactionsTable(db *sql.DB) (tables.Transactions, error) {
|
||||
func createTransactionsTable(db *sql.DB) error {
|
||||
_, err := db.Exec(transactionsSchema)
|
||||
return err
|
||||
}
|
||||
|
||||
func prepareTransactionsTable(db *sql.DB) (tables.Transactions, error) {
|
||||
s := &transactionStatements{
|
||||
db: db,
|
||||
}
|
||||
_, err := db.Exec(transactionsSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s, shared.StatementList{
|
||||
{&s.insertTransactionStmt, insertTransactionSQL},
|
||||
|
|
|
|||
|
|
@ -43,6 +43,7 @@ type Events interface {
|
|||
// bulkSelectStateEventByID lookups a list of state events by event ID.
|
||||
// If any of the requested events are missing from the database it returns a types.MissingEventError
|
||||
BulkSelectStateEventByID(ctx context.Context, eventIDs []string) ([]types.StateEntry, error)
|
||||
BulkSelectStateEventByNID(ctx context.Context, eventNIDs []types.EventNID, stateKeyTuples []types.StateKeyTuple) ([]types.StateEntry, error)
|
||||
// BulkSelectStateAtEventByID lookups the state at a list of events by event ID.
|
||||
// If any of the requested events are missing from the database it returns a types.MissingEventError.
|
||||
// If we do not have the state for any of the requested events it returns a types.MissingEventError.
|
||||
|
|
@ -81,14 +82,14 @@ type Transactions interface {
|
|||
}
|
||||
|
||||
type StateSnapshot interface {
|
||||
InsertState(ctx context.Context, txn *sql.Tx, roomNID types.RoomNID, stateBlockNIDs []types.StateBlockNID) (stateNID types.StateSnapshotNID, err error)
|
||||
InsertState(ctx context.Context, txn *sql.Tx, roomNID types.RoomNID, stateBlockNIDs types.StateBlockNIDs) (stateNID types.StateSnapshotNID, err error)
|
||||
BulkSelectStateBlockNIDs(ctx context.Context, stateNIDs []types.StateSnapshotNID) ([]types.StateBlockNIDList, error)
|
||||
}
|
||||
|
||||
type StateBlock interface {
|
||||
BulkInsertStateData(ctx context.Context, txn *sql.Tx, entries []types.StateEntry) (types.StateBlockNID, error)
|
||||
BulkSelectStateBlockEntries(ctx context.Context, stateBlockNIDs []types.StateBlockNID) ([]types.StateEntryList, error)
|
||||
BulkSelectFilteredStateBlockEntries(ctx context.Context, stateBlockNIDs []types.StateBlockNID, stateKeyTuples []types.StateKeyTuple) ([]types.StateEntryList, error)
|
||||
BulkInsertStateData(ctx context.Context, txn *sql.Tx, entries types.StateEntries) (types.StateBlockNID, error)
|
||||
BulkSelectStateBlockEntries(ctx context.Context, stateBlockNIDs types.StateBlockNIDs) ([][]types.EventNID, error)
|
||||
//BulkSelectFilteredStateBlockEntries(ctx context.Context, stateBlockNIDs []types.StateBlockNID, stateKeyTuples []types.StateKeyTuple) ([]types.StateEntryList, error)
|
||||
}
|
||||
|
||||
type RoomAliases interface {
|
||||
|
|
|
|||
|
|
@ -16,9 +16,11 @@
|
|||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"sort"
|
||||
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"golang.org/x/crypto/blake2b"
|
||||
)
|
||||
|
||||
// EventTypeNID is a numeric ID for an event type.
|
||||
|
|
@ -40,6 +42,38 @@ type StateSnapshotNID int64
|
|||
// These blocks of state data are combined to form the actual state.
|
||||
type StateBlockNID int64
|
||||
|
||||
// EventNIDs is used to sort and dedupe event NIDs.
|
||||
type EventNIDs []EventNID
|
||||
|
||||
func (a EventNIDs) Len() int { return len(a) }
|
||||
func (a EventNIDs) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a EventNIDs) Less(i, j int) bool { return a[i] < a[j] }
|
||||
|
||||
func (a EventNIDs) Hash() []byte {
|
||||
j, err := json.Marshal(a)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
h := blake2b.Sum256(j)
|
||||
return h[:]
|
||||
}
|
||||
|
||||
// StateBlockNIDs is used to sort and dedupe state block NIDs.
|
||||
type StateBlockNIDs []StateBlockNID
|
||||
|
||||
func (a StateBlockNIDs) Len() int { return len(a) }
|
||||
func (a StateBlockNIDs) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a StateBlockNIDs) Less(i, j int) bool { return a[i] < a[j] }
|
||||
|
||||
func (a StateBlockNIDs) Hash() []byte {
|
||||
j, err := json.Marshal(a)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
h := blake2b.Sum256(j)
|
||||
return h[:]
|
||||
}
|
||||
|
||||
// A StateKeyTuple is a pair of a numeric event type and a numeric state key.
|
||||
// It is used to lookup state entries.
|
||||
type StateKeyTuple struct {
|
||||
|
|
@ -65,6 +99,12 @@ type StateEntry struct {
|
|||
EventNID EventNID
|
||||
}
|
||||
|
||||
type StateEntries []StateEntry
|
||||
|
||||
func (a StateEntries) Len() int { return len(a) }
|
||||
func (a StateEntries) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a StateEntries) Less(i, j int) bool { return a[i].EventNID < a[j].EventNID }
|
||||
|
||||
// LessThan returns true if this state entry is less than the other state entry.
|
||||
// The ordering is arbitrary and is used to implement binary search and to efficiently deduplicate entries.
|
||||
func (a StateEntry) LessThan(b StateEntry) bool {
|
||||
|
|
|
|||
|
|
@ -280,7 +280,7 @@ func (b *BaseDendrite) KeyServerHTTPClient() keyserverAPI.KeyInternalAPI {
|
|||
// CreateAccountsDB creates a new instance of the accounts database. Should only
|
||||
// be called once per component.
|
||||
func (b *BaseDendrite) CreateAccountsDB() accounts.Database {
|
||||
db, err := accounts.NewDatabase(&b.Cfg.UserAPI.AccountDatabase, b.Cfg.Global.ServerName, b.Cfg.UserAPI.BCryptCost)
|
||||
db, err := accounts.NewDatabase(&b.Cfg.UserAPI.AccountDatabase, b.Cfg.Global.ServerName, b.Cfg.UserAPI.BCryptCost, b.Cfg.UserAPI.OpenIDTokenLifetimeMS)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Panicf("failed to connect to accounts db")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -101,7 +101,7 @@ func (a *ApplicationService) IsInterestedInRoomID(
|
|||
) bool {
|
||||
if namespaceSlice, ok := a.NamespaceMap["rooms"]; ok {
|
||||
for _, namespace := range namespaceSlice {
|
||||
if namespace.RegexpObject.MatchString(roomID) {
|
||||
if namespace.RegexpObject != nil && namespace.RegexpObject.MatchString(roomID) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
|
@ -222,6 +222,10 @@ func setupRegexps(asAPI *AppServiceAPI, derived *Derived) (err error) {
|
|||
case "aliases":
|
||||
appendExclusiveNamespaceRegexs(&exclusiveAliasStrings, namespaceSlice)
|
||||
}
|
||||
|
||||
if err = compileNamespaceRegexes(namespaceSlice); err != nil {
|
||||
return fmt.Errorf("invalid regex in appservice %q, namespace %q: %w", appservice.ID, key, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -258,18 +262,31 @@ func setupRegexps(asAPI *AppServiceAPI, derived *Derived) (err error) {
|
|||
func appendExclusiveNamespaceRegexs(
|
||||
exclusiveStrings *[]string, namespaces []ApplicationServiceNamespace,
|
||||
) {
|
||||
for index, namespace := range namespaces {
|
||||
for _, namespace := range namespaces {
|
||||
if namespace.Exclusive {
|
||||
// We append parenthesis to later separate each regex when we compile
|
||||
// i.e. "app1.*", "app2.*" -> "(app1.*)|(app2.*)"
|
||||
*exclusiveStrings = append(*exclusiveStrings, "("+namespace.Regex+")")
|
||||
}
|
||||
|
||||
// Compile this regex into a Regexp object for later use
|
||||
namespaces[index].RegexpObject, _ = regexp.Compile(namespace.Regex)
|
||||
}
|
||||
}
|
||||
|
||||
// compileNamespaceRegexes turns strings into regex objects and complains
|
||||
// if some of there are bad
|
||||
func compileNamespaceRegexes(namespaces []ApplicationServiceNamespace) (err error) {
|
||||
for index, namespace := range namespaces {
|
||||
// Compile this regex into a Regexp object for later use
|
||||
r, err := regexp.Compile(namespace.Regex)
|
||||
if err != nil {
|
||||
return fmt.Errorf("regex at namespace %d: %w", index, err)
|
||||
}
|
||||
|
||||
namespaces[index].RegexpObject = r
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkErrors checks for any configuration errors amongst the loaded
|
||||
// application services according to the application service spec.
|
||||
func checkErrors(config *AppServiceAPI, derived *Derived) (err error) {
|
||||
|
|
|
|||
|
|
@ -10,6 +10,9 @@ type UserAPI struct {
|
|||
// The cost when hashing passwords.
|
||||
BCryptCost int `yaml:"bcrypt_cost"`
|
||||
|
||||
// The length of time an OpenID token is condidered valid in milliseconds
|
||||
OpenIDTokenLifetimeMS int64 `yaml:"openid_token_lifetime_ms"`
|
||||
|
||||
// The Account database stores the login details and account information
|
||||
// for local users. It is accessed by the UserAPI.
|
||||
AccountDatabase DatabaseOptions `yaml:"account_database"`
|
||||
|
|
@ -18,6 +21,8 @@ type UserAPI struct {
|
|||
DeviceDatabase DatabaseOptions `yaml:"device_database"`
|
||||
}
|
||||
|
||||
const DefaultOpenIDTokenLifetimeMS = 3600000 // 60 minutes
|
||||
|
||||
func (c *UserAPI) Defaults() {
|
||||
c.InternalAPI.Listen = "http://localhost:7781"
|
||||
c.InternalAPI.Connect = "http://localhost:7781"
|
||||
|
|
@ -26,6 +31,7 @@ func (c *UserAPI) Defaults() {
|
|||
c.AccountDatabase.ConnectionString = "file:userapi_accounts.db"
|
||||
c.DeviceDatabase.ConnectionString = "file:userapi_devices.db"
|
||||
c.BCryptCost = bcrypt.DefaultCost
|
||||
c.OpenIDTokenLifetimeMS = DefaultOpenIDTokenLifetimeMS
|
||||
}
|
||||
|
||||
func (c *UserAPI) Verify(configErrs *ConfigErrors, isMonolith bool) {
|
||||
|
|
@ -33,4 +39,5 @@ func (c *UserAPI) Verify(configErrs *ConfigErrors, isMonolith bool) {
|
|||
checkURL(configErrs, "user_api.internal_api.connect", string(c.InternalAPI.Connect))
|
||||
checkNotEmpty(configErrs, "user_api.account_database.connection_string", string(c.AccountDatabase.ConnectionString))
|
||||
checkNotEmpty(configErrs, "user_api.device_database.connection_string", string(c.DeviceDatabase.ConnectionString))
|
||||
checkPositive(configErrs, "user_api.openid_token_lifetime_ms", c.OpenIDTokenLifetimeMS)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -524,6 +524,9 @@ func (u *testUserAPI) PerformLastSeenUpdate(ctx context.Context, req *userapi.Pe
|
|||
func (u *testUserAPI) PerformAccountDeactivation(ctx context.Context, req *userapi.PerformAccountDeactivationRequest, res *userapi.PerformAccountDeactivationResponse) error {
|
||||
return nil
|
||||
}
|
||||
func (u *testUserAPI) PerformOpenIDTokenCreation(ctx context.Context, req *userapi.PerformOpenIDTokenCreationRequest, res *userapi.PerformOpenIDTokenCreationResponse) error {
|
||||
return nil
|
||||
}
|
||||
func (u *testUserAPI) QueryProfile(ctx context.Context, req *userapi.QueryProfileRequest, res *userapi.QueryProfileResponse) error {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -548,6 +551,9 @@ func (u *testUserAPI) QueryDeviceInfos(ctx context.Context, req *userapi.QueryDe
|
|||
func (u *testUserAPI) QuerySearchProfiles(ctx context.Context, req *userapi.QuerySearchProfilesRequest, res *userapi.QuerySearchProfilesResponse) error {
|
||||
return nil
|
||||
}
|
||||
func (u *testUserAPI) QueryOpenIDToken(ctx context.Context, req *userapi.QueryOpenIDTokenRequest, res *userapi.QueryOpenIDTokenResponse) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type testRoomserverAPI struct {
|
||||
// use a trace API as it implements method stubs so we don't need to have them here.
|
||||
|
|
|
|||
|
|
@ -39,9 +39,9 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
ConstCreateEventContentKey = "org.matrix.msc1772.type"
|
||||
ConstSpaceChildEventType = "org.matrix.msc1772.space.child"
|
||||
ConstSpaceParentEventType = "org.matrix.msc1772.space.parent"
|
||||
ConstCreateEventContentKey = "type"
|
||||
ConstSpaceChildEventType = "m.space.child"
|
||||
ConstSpaceParentEventType = "m.space.parent"
|
||||
)
|
||||
|
||||
// Defaults sets the request defaults
|
||||
|
|
|
|||
|
|
@ -367,6 +367,9 @@ func (u *testUserAPI) PerformLastSeenUpdate(ctx context.Context, req *userapi.Pe
|
|||
func (u *testUserAPI) PerformAccountDeactivation(ctx context.Context, req *userapi.PerformAccountDeactivationRequest, res *userapi.PerformAccountDeactivationResponse) error {
|
||||
return nil
|
||||
}
|
||||
func (u *testUserAPI) PerformOpenIDTokenCreation(ctx context.Context, req *userapi.PerformOpenIDTokenCreationRequest, res *userapi.PerformOpenIDTokenCreationResponse) error {
|
||||
return nil
|
||||
}
|
||||
func (u *testUserAPI) QueryProfile(ctx context.Context, req *userapi.QueryProfileRequest, res *userapi.QueryProfileResponse) error {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -391,6 +394,9 @@ func (u *testUserAPI) QueryDeviceInfos(ctx context.Context, req *userapi.QueryDe
|
|||
func (u *testUserAPI) QuerySearchProfiles(ctx context.Context, req *userapi.QuerySearchProfilesRequest, res *userapi.QuerySearchProfilesResponse) error {
|
||||
return nil
|
||||
}
|
||||
func (u *testUserAPI) QueryOpenIDToken(ctx context.Context, req *userapi.QueryOpenIDTokenRequest, res *userapi.QueryOpenIDTokenResponse) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type testRoomserverAPI struct {
|
||||
// use a trace API as it implements method stubs so we don't need to have them here.
|
||||
|
|
|
|||
|
|
@ -156,7 +156,7 @@ func (rp *RequestPool) OnIncomingSyncRequest(req *http.Request, device *userapi.
|
|||
|
||||
currentPos := rp.Notifier.CurrentPosition()
|
||||
|
||||
if !rp.shouldReturnImmediately(syncReq) {
|
||||
if !rp.shouldReturnImmediately(syncReq, currentPos) {
|
||||
timer := time.NewTimer(syncReq.Timeout) // case of timeout=0 is handled above
|
||||
defer timer.Stop()
|
||||
|
||||
|
|
@ -303,8 +303,8 @@ func (rp *RequestPool) OnIncomingKeyChangeRequest(req *http.Request, device *use
|
|||
// shouldReturnImmediately returns whether the /sync request is an initial sync,
|
||||
// or timeout=0, or full_state=true, in any of the cases the request should
|
||||
// return immediately.
|
||||
func (rp *RequestPool) shouldReturnImmediately(syncReq *types.SyncRequest) bool {
|
||||
if syncReq.Since.IsEmpty() || syncReq.Timeout == 0 || syncReq.WantFullState {
|
||||
func (rp *RequestPool) shouldReturnImmediately(syncReq *types.SyncRequest, currentPos types.StreamingToken) bool {
|
||||
if currentPos.IsAfter(syncReq.Since) || syncReq.Timeout == 0 || syncReq.WantFullState {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
|
|
|||
|
|
@ -517,3 +517,6 @@ AS can set avatar for ghosted users
|
|||
AS can set displayname for ghosted users
|
||||
Ghost user must register before joining room
|
||||
Inviting an AS-hosted user asks the AS server
|
||||
Can generate a openid access_token that can be exchanged for information about a user
|
||||
Invalid openid access tokens are rejected
|
||||
Requests to userinfo without access tokens are rejected
|
||||
|
|
|
|||
|
|
@ -32,12 +32,14 @@ type UserInternalAPI interface {
|
|||
PerformLastSeenUpdate(ctx context.Context, req *PerformLastSeenUpdateRequest, res *PerformLastSeenUpdateResponse) error
|
||||
PerformDeviceUpdate(ctx context.Context, req *PerformDeviceUpdateRequest, res *PerformDeviceUpdateResponse) error
|
||||
PerformAccountDeactivation(ctx context.Context, req *PerformAccountDeactivationRequest, res *PerformAccountDeactivationResponse) error
|
||||
PerformOpenIDTokenCreation(ctx context.Context, req *PerformOpenIDTokenCreationRequest, res *PerformOpenIDTokenCreationResponse) error
|
||||
QueryProfile(ctx context.Context, req *QueryProfileRequest, res *QueryProfileResponse) error
|
||||
QueryAccessToken(ctx context.Context, req *QueryAccessTokenRequest, res *QueryAccessTokenResponse) error
|
||||
QueryDevices(ctx context.Context, req *QueryDevicesRequest, res *QueryDevicesResponse) error
|
||||
QueryAccountData(ctx context.Context, req *QueryAccountDataRequest, res *QueryAccountDataResponse) error
|
||||
QueryDeviceInfos(ctx context.Context, req *QueryDeviceInfosRequest, res *QueryDeviceInfosResponse) error
|
||||
QuerySearchProfiles(ctx context.Context, req *QuerySearchProfilesRequest, res *QuerySearchProfilesResponse) error
|
||||
QueryOpenIDToken(ctx context.Context, req *QueryOpenIDTokenRequest, res *QueryOpenIDTokenResponse) error
|
||||
}
|
||||
|
||||
// InputAccountDataRequest is the request for InputAccountData
|
||||
|
|
@ -226,6 +228,27 @@ type PerformAccountDeactivationResponse struct {
|
|||
AccountDeactivated bool
|
||||
}
|
||||
|
||||
// PerformOpenIDTokenCreationRequest is the request for PerformOpenIDTokenCreation
|
||||
type PerformOpenIDTokenCreationRequest struct {
|
||||
UserID string
|
||||
}
|
||||
|
||||
// PerformOpenIDTokenCreationResponse is the response for PerformOpenIDTokenCreation
|
||||
type PerformOpenIDTokenCreationResponse struct {
|
||||
Token OpenIDToken
|
||||
}
|
||||
|
||||
// QueryOpenIDTokenRequest is the request for QueryOpenIDToken
|
||||
type QueryOpenIDTokenRequest struct {
|
||||
Token string
|
||||
}
|
||||
|
||||
// QueryOpenIDTokenResponse is the response for QueryOpenIDToken
|
||||
type QueryOpenIDTokenResponse struct {
|
||||
Sub string // The Matrix User ID that generated the token
|
||||
ExpiresAtMS int64
|
||||
}
|
||||
|
||||
// Device represents a client's device (mobile, web, etc)
|
||||
type Device struct {
|
||||
ID string
|
||||
|
|
@ -256,6 +279,24 @@ type Account struct {
|
|||
// TODO: Associations (e.g. with application services)
|
||||
}
|
||||
|
||||
// OpenIDToken represents an OpenID token
|
||||
type OpenIDToken struct {
|
||||
Token string
|
||||
UserID string
|
||||
ExpiresAtMS int64
|
||||
}
|
||||
|
||||
// OpenIDTokenInfo represents the attributes associated with an issued OpenID token
|
||||
type OpenIDTokenAttributes struct {
|
||||
UserID string
|
||||
ExpiresAtMS int64
|
||||
}
|
||||
|
||||
// UserInfo is for returning information about the user an OpenID token was issued for
|
||||
type UserInfo struct {
|
||||
Sub string // The Matrix user's ID who generated the token
|
||||
}
|
||||
|
||||
// ErrorForbidden is an error indicating that the supplied access token is forbidden
|
||||
type ErrorForbidden struct {
|
||||
Message string
|
||||
|
|
|
|||
|
|
@ -414,3 +414,31 @@ func (a *UserInternalAPI) PerformAccountDeactivation(ctx context.Context, req *a
|
|||
res.AccountDeactivated = err == nil
|
||||
return err
|
||||
}
|
||||
|
||||
// PerformOpenIDTokenCreation creates a new token that a relying party uses to authenticate a user
|
||||
func (a *UserInternalAPI) PerformOpenIDTokenCreation(ctx context.Context, req *api.PerformOpenIDTokenCreationRequest, res *api.PerformOpenIDTokenCreationResponse) error {
|
||||
token := util.RandomString(24)
|
||||
|
||||
exp, err := a.AccountDB.CreateOpenIDToken(ctx, token, req.UserID)
|
||||
|
||||
res.Token = api.OpenIDToken{
|
||||
Token: token,
|
||||
UserID: req.UserID,
|
||||
ExpiresAtMS: exp,
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// QueryOpenIDToken validates that the OpenID token was issued for the user, the replying party uses this for validation
|
||||
func (a *UserInternalAPI) QueryOpenIDToken(ctx context.Context, req *api.QueryOpenIDTokenRequest, res *api.QueryOpenIDTokenResponse) error {
|
||||
openIDTokenAttrs, err := a.AccountDB.GetOpenIDTokenAttributes(ctx, req.Token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res.Sub = openIDTokenAttrs.UserID
|
||||
res.ExpiresAtMS = openIDTokenAttrs.ExpiresAtMS
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -35,6 +35,7 @@ const (
|
|||
PerformLastSeenUpdatePath = "/userapi/performLastSeenUpdate"
|
||||
PerformDeviceUpdatePath = "/userapi/performDeviceUpdate"
|
||||
PerformAccountDeactivationPath = "/userapi/performAccountDeactivation"
|
||||
PerformOpenIDTokenCreationPath = "/userapi/performOpenIDTokenCreation"
|
||||
|
||||
QueryProfilePath = "/userapi/queryProfile"
|
||||
QueryAccessTokenPath = "/userapi/queryAccessToken"
|
||||
|
|
@ -42,6 +43,7 @@ const (
|
|||
QueryAccountDataPath = "/userapi/queryAccountData"
|
||||
QueryDeviceInfosPath = "/userapi/queryDeviceInfos"
|
||||
QuerySearchProfilesPath = "/userapi/querySearchProfiles"
|
||||
QueryOpenIDTokenPath = "/userapi/queryOpenIDToken"
|
||||
)
|
||||
|
||||
// NewUserAPIClient creates a UserInternalAPI implemented by talking to a HTTP POST API.
|
||||
|
|
@ -148,6 +150,14 @@ func (h *httpUserInternalAPI) PerformAccountDeactivation(ctx context.Context, re
|
|||
return httputil.PostJSON(ctx, span, h.httpClient, apiURL, req, res)
|
||||
}
|
||||
|
||||
func (h *httpUserInternalAPI) PerformOpenIDTokenCreation(ctx context.Context, request *api.PerformOpenIDTokenCreationRequest, response *api.PerformOpenIDTokenCreationResponse) error {
|
||||
span, ctx := opentracing.StartSpanFromContext(ctx, "PerformOpenIDTokenCreation")
|
||||
defer span.Finish()
|
||||
|
||||
apiURL := h.apiURL + PerformOpenIDTokenCreationPath
|
||||
return httputil.PostJSON(ctx, span, h.httpClient, apiURL, request, response)
|
||||
}
|
||||
|
||||
func (h *httpUserInternalAPI) QueryProfile(
|
||||
ctx context.Context,
|
||||
request *api.QueryProfileRequest,
|
||||
|
|
@ -207,3 +217,11 @@ func (h *httpUserInternalAPI) QuerySearchProfiles(ctx context.Context, req *api.
|
|||
apiURL := h.apiURL + QuerySearchProfilesPath
|
||||
return httputil.PostJSON(ctx, span, h.httpClient, apiURL, req, res)
|
||||
}
|
||||
|
||||
func (h *httpUserInternalAPI) QueryOpenIDToken(ctx context.Context, req *api.QueryOpenIDTokenRequest, res *api.QueryOpenIDTokenResponse) error {
|
||||
span, ctx := opentracing.StartSpanFromContext(ctx, "QueryOpenIDToken")
|
||||
defer span.Finish()
|
||||
|
||||
apiURL := h.apiURL + QueryOpenIDTokenPath
|
||||
return httputil.PostJSON(ctx, span, h.httpClient, apiURL, req, res)
|
||||
}
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue