mirror of
https://github.com/matrix-org/dendrite.git
synced 2024-11-22 06:11:55 -06:00
Merge branch 'main' of github.com:matrix-org/dendrite into s7evink/resolve-state
This commit is contained in:
commit
f14517ae43
|
@ -14,6 +14,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/matrix-org/dendrite/federationapi/statistics"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/matrix-org/dendrite/appservice"
|
||||
|
@ -32,6 +33,10 @@ import (
|
|||
"github.com/matrix-org/dendrite/test/testrig"
|
||||
)
|
||||
|
||||
var testIsBlacklistedOrBackingOff = func(s spec.ServerName) (*statistics.ServerStatistics, error) {
|
||||
return &statistics.ServerStatistics{}, nil
|
||||
}
|
||||
|
||||
func TestAppserviceInternalAPI(t *testing.T) {
|
||||
|
||||
// Set expected results
|
||||
|
@ -144,7 +149,7 @@ func TestAppserviceInternalAPI(t *testing.T) {
|
|||
cm := sqlutil.NewConnectionManager(ctx, cfg.Global.DatabaseOptions)
|
||||
rsAPI := roomserver.NewInternalAPI(ctx, cfg, cm, &natsInstance, caches, caching.DisableMetrics)
|
||||
rsAPI.SetFederationAPI(nil, nil)
|
||||
usrAPI := userapi.NewInternalAPI(ctx, cfg, cm, &natsInstance, rsAPI, nil)
|
||||
usrAPI := userapi.NewInternalAPI(ctx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
asAPI := appservice.NewInternalAPI(ctx, cfg, &natsInstance, usrAPI, rsAPI)
|
||||
|
||||
runCases(t, asAPI)
|
||||
|
@ -239,7 +244,7 @@ func TestAppserviceInternalAPI_UnixSocket_Simple(t *testing.T) {
|
|||
cm := sqlutil.NewConnectionManager(ctx, cfg.Global.DatabaseOptions)
|
||||
rsAPI := roomserver.NewInternalAPI(ctx, cfg, cm, &natsInstance, caches, caching.DisableMetrics)
|
||||
rsAPI.SetFederationAPI(nil, nil)
|
||||
usrAPI := userapi.NewInternalAPI(ctx, cfg, cm, &natsInstance, rsAPI, nil)
|
||||
usrAPI := userapi.NewInternalAPI(ctx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
asAPI := appservice.NewInternalAPI(ctx, cfg, &natsInstance, usrAPI, rsAPI)
|
||||
|
||||
t.Run("UserIDExists", func(t *testing.T) {
|
||||
|
@ -378,7 +383,7 @@ func TestRoomserverConsumerOneInvite(t *testing.T) {
|
|||
// Create required internal APIs
|
||||
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, natsInstance, caches, caching.DisableMetrics)
|
||||
rsAPI.SetFederationAPI(nil, nil)
|
||||
usrAPI := userapi.NewInternalAPI(processCtx, cfg, cm, natsInstance, rsAPI, nil)
|
||||
usrAPI := userapi.NewInternalAPI(processCtx, cfg, cm, natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
// start the consumer
|
||||
appservice.NewInternalAPI(processCtx, cfg, natsInstance, usrAPI, rsAPI)
|
||||
|
||||
|
|
|
@ -38,6 +38,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/dendrite/userapi"
|
||||
"github.com/matrix-org/gomatrixserverlib/spec"
|
||||
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
|
||||
|
@ -190,13 +191,13 @@ func startup() {
|
|||
serverKeyAPI := &signing.YggdrasilKeys{}
|
||||
keyRing := serverKeyAPI.KeyRing()
|
||||
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, federation)
|
||||
fedSenderAPI := federationapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, federation, rsAPI, caches, keyRing, true)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, federation, caching.EnableMetrics, fedSenderAPI.IsBlacklistedOrBackingOff)
|
||||
|
||||
asQuery := appservice.NewInternalAPI(
|
||||
processCtx, cfg, &natsInstance, userAPI, rsAPI,
|
||||
)
|
||||
rsAPI.SetAppserviceAPI(asQuery)
|
||||
fedSenderAPI := federationapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, federation, rsAPI, caches, keyRing, true)
|
||||
rsAPI.SetFederationAPI(fedSenderAPI, keyRing)
|
||||
|
||||
monolith := setup.Monolith{
|
||||
|
|
|
@ -216,7 +216,7 @@ func (m *DendriteMonolith) Start() {
|
|||
processCtx, cfg, cm, &natsInstance, federation, rsAPI, caches, keyRing, true,
|
||||
)
|
||||
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, federation)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, federation, caching.EnableMetrics, fsAPI.IsBlacklistedOrBackingOff)
|
||||
|
||||
asAPI := appservice.NewInternalAPI(processCtx, cfg, &natsInstance, userAPI, rsAPI)
|
||||
rsAPI.SetAppserviceAPI(asAPI)
|
||||
|
|
|
@ -45,7 +45,7 @@ func TestAdminCreateToken(t *testing.T) {
|
|||
caches := caching.NewRistrettoCache(128*1024*1024, time.Hour, caching.DisableMetrics)
|
||||
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.DisableMetrics)
|
||||
rsAPI.SetFederationAPI(nil, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
AddPublicRoutes(processCtx, routers, cfg, &natsInstance, nil, rsAPI, nil, nil, nil, userAPI, nil, nil, caching.DisableMetrics)
|
||||
accessTokens := map[*test.User]userDevice{
|
||||
aliceAdmin: {},
|
||||
|
@ -196,7 +196,7 @@ func TestAdminListRegistrationTokens(t *testing.T) {
|
|||
caches := caching.NewRistrettoCache(128*1024*1024, time.Hour, caching.DisableMetrics)
|
||||
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.DisableMetrics)
|
||||
rsAPI.SetFederationAPI(nil, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
AddPublicRoutes(processCtx, routers, cfg, &natsInstance, nil, rsAPI, nil, nil, nil, userAPI, nil, nil, caching.DisableMetrics)
|
||||
accessTokens := map[*test.User]userDevice{
|
||||
aliceAdmin: {},
|
||||
|
@ -314,7 +314,7 @@ func TestAdminGetRegistrationToken(t *testing.T) {
|
|||
caches := caching.NewRistrettoCache(128*1024*1024, time.Hour, caching.DisableMetrics)
|
||||
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.DisableMetrics)
|
||||
rsAPI.SetFederationAPI(nil, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
AddPublicRoutes(processCtx, routers, cfg, &natsInstance, nil, rsAPI, nil, nil, nil, userAPI, nil, nil, caching.DisableMetrics)
|
||||
accessTokens := map[*test.User]userDevice{
|
||||
aliceAdmin: {},
|
||||
|
@ -415,7 +415,7 @@ func TestAdminDeleteRegistrationToken(t *testing.T) {
|
|||
caches := caching.NewRistrettoCache(128*1024*1024, time.Hour, caching.DisableMetrics)
|
||||
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.DisableMetrics)
|
||||
rsAPI.SetFederationAPI(nil, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
AddPublicRoutes(processCtx, routers, cfg, &natsInstance, nil, rsAPI, nil, nil, nil, userAPI, nil, nil, caching.DisableMetrics)
|
||||
accessTokens := map[*test.User]userDevice{
|
||||
aliceAdmin: {},
|
||||
|
@ -509,7 +509,7 @@ func TestAdminUpdateRegistrationToken(t *testing.T) {
|
|||
caches := caching.NewRistrettoCache(128*1024*1024, time.Hour, caching.DisableMetrics)
|
||||
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.DisableMetrics)
|
||||
rsAPI.SetFederationAPI(nil, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
AddPublicRoutes(processCtx, routers, cfg, &natsInstance, nil, rsAPI, nil, nil, nil, userAPI, nil, nil, caching.DisableMetrics)
|
||||
accessTokens := map[*test.User]userDevice{
|
||||
aliceAdmin: {},
|
||||
|
@ -693,7 +693,7 @@ func TestAdminResetPassword(t *testing.T) {
|
|||
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.DisableMetrics)
|
||||
rsAPI.SetFederationAPI(nil, nil)
|
||||
// Needed for changing the password/login
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
// We mostly need the userAPI for this test, so nil for other APIs/caches etc.
|
||||
AddPublicRoutes(processCtx, routers, cfg, &natsInstance, nil, rsAPI, nil, nil, nil, userAPI, nil, nil, caching.DisableMetrics)
|
||||
|
||||
|
@ -791,7 +791,7 @@ func TestPurgeRoom(t *testing.T) {
|
|||
fsAPI := federationapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, nil, rsAPI, caches, nil, true)
|
||||
rsAPI.SetFederationAPI(fsAPI, nil)
|
||||
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
syncapi.AddPublicRoutes(processCtx, routers, cfg, cm, &natsInstance, userAPI, rsAPI, caches, caching.DisableMetrics)
|
||||
|
||||
// Create the room
|
||||
|
@ -863,7 +863,7 @@ func TestAdminEvacuateRoom(t *testing.T) {
|
|||
fsAPI := federationapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, nil, rsAPI, caches, nil, true)
|
||||
rsAPI.SetFederationAPI(fsAPI, nil)
|
||||
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
|
||||
// Create the room
|
||||
if err := api.SendEvents(ctx, rsAPI, api.KindNew, room.Events(), "test", "test", api.DoNotSendToOtherServers, nil, false); err != nil {
|
||||
|
@ -964,7 +964,7 @@ func TestAdminEvacuateUser(t *testing.T) {
|
|||
fsAPI := federationapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, basepkg.CreateFederationClient(cfg, nil), rsAPI, caches, nil, true)
|
||||
rsAPI.SetFederationAPI(fsAPI, nil)
|
||||
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
|
||||
// Create the room
|
||||
if err := api.SendEvents(ctx, rsAPI, api.KindNew, room.Events(), "test", "test", api.DoNotSendToOtherServers, nil, false); err != nil {
|
||||
|
@ -1055,7 +1055,7 @@ func TestAdminMarkAsStale(t *testing.T) {
|
|||
cm := sqlutil.NewConnectionManager(processCtx, cfg.Global.DatabaseOptions)
|
||||
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.DisableMetrics)
|
||||
rsAPI.SetFederationAPI(nil, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
|
||||
// We mostly need the rsAPI for this test, so nil for other APIs/caches etc.
|
||||
AddPublicRoutes(processCtx, routers, cfg, &natsInstance, nil, rsAPI, nil, nil, nil, userAPI, nil, nil, caching.DisableMetrics)
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
|
||||
"github.com/matrix-org/dendrite/clientapi/routing"
|
||||
"github.com/matrix-org/dendrite/clientapi/threepid"
|
||||
"github.com/matrix-org/dendrite/federationapi/statistics"
|
||||
"github.com/matrix-org/dendrite/internal/caching"
|
||||
"github.com/matrix-org/dendrite/internal/httputil"
|
||||
"github.com/matrix-org/dendrite/internal/pushrules"
|
||||
|
@ -49,6 +50,10 @@ type userDevice struct {
|
|||
password string
|
||||
}
|
||||
|
||||
var testIsBlacklistedOrBackingOff = func(s spec.ServerName) (*statistics.ServerStatistics, error) {
|
||||
return &statistics.ServerStatistics{}, nil
|
||||
}
|
||||
|
||||
func TestGetPutDevices(t *testing.T) {
|
||||
alice := test.NewUser(t)
|
||||
bob := test.NewUser(t)
|
||||
|
@ -121,7 +126,7 @@ func TestGetPutDevices(t *testing.T) {
|
|||
cm := sqlutil.NewConnectionManager(processCtx, cfg.Global.DatabaseOptions)
|
||||
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.DisableMetrics)
|
||||
rsAPI.SetFederationAPI(nil, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
|
||||
// We mostly need the rsAPI for this test, so nil for other APIs/caches etc.
|
||||
AddPublicRoutes(processCtx, routers, cfg, &natsInstance, nil, rsAPI, nil, nil, nil, userAPI, nil, nil, caching.DisableMetrics)
|
||||
|
@ -170,7 +175,7 @@ func TestDeleteDevice(t *testing.T) {
|
|||
caches := caching.NewRistrettoCache(128*1024*1024, time.Hour, caching.DisableMetrics)
|
||||
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.DisableMetrics)
|
||||
rsAPI.SetFederationAPI(nil, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
|
||||
// We mostly need the rsAPI/ for this test, so nil for other APIs/caches etc.
|
||||
AddPublicRoutes(processCtx, routers, cfg, &natsInstance, nil, rsAPI, nil, nil, nil, userAPI, nil, nil, caching.DisableMetrics)
|
||||
|
@ -275,7 +280,7 @@ func TestDeleteDevices(t *testing.T) {
|
|||
caches := caching.NewRistrettoCache(128*1024*1024, time.Hour, caching.DisableMetrics)
|
||||
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.DisableMetrics)
|
||||
rsAPI.SetFederationAPI(nil, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
|
||||
// We mostly need the rsAPI/ for this test, so nil for other APIs/caches etc.
|
||||
AddPublicRoutes(processCtx, routers, cfg, &natsInstance, nil, rsAPI, nil, nil, nil, userAPI, nil, nil, caching.DisableMetrics)
|
||||
|
@ -442,7 +447,7 @@ func TestSetDisplayname(t *testing.T) {
|
|||
|
||||
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, natsInstance, caches, caching.DisableMetrics)
|
||||
rsAPI.SetFederationAPI(nil, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, natsInstance, rsAPI, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
asPI := appservice.NewInternalAPI(processCtx, cfg, natsInstance, userAPI, rsAPI)
|
||||
|
||||
AddPublicRoutes(processCtx, routers, cfg, natsInstance, base.CreateFederationClient(cfg, nil), rsAPI, asPI, nil, nil, userAPI, nil, nil, caching.DisableMetrics)
|
||||
|
@ -554,7 +559,7 @@ func TestSetAvatarURL(t *testing.T) {
|
|||
|
||||
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, natsInstance, caches, caching.DisableMetrics)
|
||||
rsAPI.SetFederationAPI(nil, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, natsInstance, rsAPI, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
asPI := appservice.NewInternalAPI(processCtx, cfg, natsInstance, userAPI, rsAPI)
|
||||
|
||||
AddPublicRoutes(processCtx, routers, cfg, natsInstance, base.CreateFederationClient(cfg, nil), rsAPI, asPI, nil, nil, userAPI, nil, nil, caching.DisableMetrics)
|
||||
|
@ -632,7 +637,7 @@ func TestTyping(t *testing.T) {
|
|||
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.DisableMetrics)
|
||||
rsAPI.SetFederationAPI(nil, nil)
|
||||
// Needed to create accounts
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
// We mostly need the rsAPI/userAPI for this test, so nil for other APIs etc.
|
||||
AddPublicRoutes(processCtx, routers, cfg, &natsInstance, nil, rsAPI, nil, nil, nil, userAPI, nil, nil, caching.DisableMetrics)
|
||||
|
||||
|
@ -716,7 +721,7 @@ func TestMembership(t *testing.T) {
|
|||
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.DisableMetrics)
|
||||
rsAPI.SetFederationAPI(nil, nil)
|
||||
// Needed to create accounts
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
rsAPI.SetUserAPI(userAPI)
|
||||
// We mostly need the rsAPI/userAPI for this test, so nil for other APIs etc.
|
||||
AddPublicRoutes(processCtx, routers, cfg, &natsInstance, nil, rsAPI, nil, nil, nil, userAPI, nil, nil, caching.DisableMetrics)
|
||||
|
@ -955,7 +960,7 @@ func TestCapabilities(t *testing.T) {
|
|||
// Needed to create accounts
|
||||
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, nil, caching.DisableMetrics)
|
||||
rsAPI.SetFederationAPI(nil, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
// We mostly need the rsAPI/userAPI for this test, so nil for other APIs etc.
|
||||
AddPublicRoutes(processCtx, routers, cfg, &natsInstance, nil, rsAPI, nil, nil, nil, userAPI, nil, nil, caching.DisableMetrics)
|
||||
|
||||
|
@ -1002,7 +1007,7 @@ func TestTurnserver(t *testing.T) {
|
|||
// Needed to create accounts
|
||||
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, nil, caching.DisableMetrics)
|
||||
rsAPI.SetFederationAPI(nil, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
//rsAPI.SetUserAPI(userAPI)
|
||||
// We mostly need the rsAPI/userAPI for this test, so nil for other APIs etc.
|
||||
AddPublicRoutes(processCtx, routers, cfg, &natsInstance, nil, rsAPI, nil, nil, nil, userAPI, nil, nil, caching.DisableMetrics)
|
||||
|
@ -1100,7 +1105,7 @@ func Test3PID(t *testing.T) {
|
|||
// Needed to create accounts
|
||||
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, nil, caching.DisableMetrics)
|
||||
rsAPI.SetFederationAPI(nil, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
// We mostly need the rsAPI/userAPI for this test, so nil for other APIs etc.
|
||||
AddPublicRoutes(processCtx, routers, cfg, &natsInstance, nil, rsAPI, nil, nil, nil, userAPI, nil, nil, caching.DisableMetrics)
|
||||
|
||||
|
@ -1276,7 +1281,7 @@ func TestPushRules(t *testing.T) {
|
|||
cm := sqlutil.NewConnectionManager(processCtx, cfg.Global.DatabaseOptions)
|
||||
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.DisableMetrics)
|
||||
rsAPI.SetFederationAPI(nil, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
|
||||
// We mostly need the rsAPI for this test, so nil for other APIs/caches etc.
|
||||
AddPublicRoutes(processCtx, routers, cfg, &natsInstance, nil, rsAPI, nil, nil, nil, userAPI, nil, nil, caching.DisableMetrics)
|
||||
|
@ -1663,7 +1668,7 @@ func TestKeys(t *testing.T) {
|
|||
cm := sqlutil.NewConnectionManager(processCtx, cfg.Global.DatabaseOptions)
|
||||
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.DisableMetrics)
|
||||
rsAPI.SetFederationAPI(nil, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
|
||||
// We mostly need the rsAPI for this test, so nil for other APIs/caches etc.
|
||||
AddPublicRoutes(processCtx, routers, cfg, &natsInstance, nil, rsAPI, nil, nil, nil, userAPI, nil, nil, caching.DisableMetrics)
|
||||
|
@ -2125,7 +2130,7 @@ func TestKeyBackup(t *testing.T) {
|
|||
cm := sqlutil.NewConnectionManager(processCtx, cfg.Global.DatabaseOptions)
|
||||
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.DisableMetrics)
|
||||
rsAPI.SetFederationAPI(nil, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
|
||||
// We mostly need the rsAPI for this test, so nil for other APIs/caches etc.
|
||||
AddPublicRoutes(processCtx, routers, cfg, &natsInstance, nil, rsAPI, nil, nil, nil, userAPI, nil, nil, caching.DisableMetrics)
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/matrix-org/dendrite/federationapi/statistics"
|
||||
"github.com/matrix-org/dendrite/internal/caching"
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||
|
@ -21,6 +22,10 @@ import (
|
|||
uapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
)
|
||||
|
||||
var testIsBlacklistedOrBackingOff = func(s spec.ServerName) (*statistics.ServerStatistics, error) {
|
||||
return &statistics.ServerStatistics{}, nil
|
||||
}
|
||||
|
||||
func TestJoinRoomByIDOrAlias(t *testing.T) {
|
||||
alice := test.NewUser(t)
|
||||
bob := test.NewUser(t)
|
||||
|
@ -36,7 +41,7 @@ func TestJoinRoomByIDOrAlias(t *testing.T) {
|
|||
natsInstance := jetstream.NATSInstance{}
|
||||
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.DisableMetrics)
|
||||
rsAPI.SetFederationAPI(nil, nil) // creates the rs.Inputer etc
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
asAPI := appservice.NewInternalAPI(processCtx, cfg, &natsInstance, userAPI, rsAPI)
|
||||
|
||||
// Create the users in the userapi
|
||||
|
|
|
@ -49,7 +49,7 @@ func TestLogin(t *testing.T) {
|
|||
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.DisableMetrics)
|
||||
rsAPI.SetFederationAPI(nil, nil)
|
||||
// Needed for /login
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
|
||||
// We mostly need the userAPI for this test, so nil for other APIs/caches etc.
|
||||
Setup(routers, cfg, nil, nil, userAPI, nil, nil, nil, nil, nil, nil, nil, caching.DisableMetrics)
|
||||
|
|
|
@ -416,7 +416,7 @@ func Test_register(t *testing.T) {
|
|||
cm := sqlutil.NewConnectionManager(processCtx, cfg.Global.DatabaseOptions)
|
||||
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.DisableMetrics)
|
||||
rsAPI.SetFederationAPI(nil, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
@ -596,7 +596,7 @@ func TestRegisterUserWithDisplayName(t *testing.T) {
|
|||
cm := sqlutil.NewConnectionManager(processCtx, cfg.Global.DatabaseOptions)
|
||||
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.DisableMetrics)
|
||||
rsAPI.SetFederationAPI(nil, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
deviceName, deviceID := "deviceName", "deviceID"
|
||||
expectedDisplayName := "DisplayName"
|
||||
response := completeRegistration(
|
||||
|
@ -637,7 +637,7 @@ func TestRegisterAdminUsingSharedSecret(t *testing.T) {
|
|||
caches := caching.NewRistrettoCache(128*1024*1024, time.Hour, caching.DisableMetrics)
|
||||
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.DisableMetrics)
|
||||
rsAPI.SetFederationAPI(nil, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
|
||||
expectedDisplayName := "rabbit"
|
||||
jsonStr := []byte(`{"admin":true,"mac":"24dca3bba410e43fe64b9b5c28306693bf3baa9f","nonce":"759f047f312b99ff428b21d581256f8592b8976e58bc1b543972dc6147e529a79657605b52d7becd160ff5137f3de11975684319187e06901955f79e5a6c5a79","password":"wonderland","username":"alice","displayname":"rabbit"}`)
|
||||
|
|
|
@ -145,7 +145,7 @@ func (p *P2PMonolith) SetupDendrite(
|
|||
)
|
||||
rsAPI.SetFederationAPI(fsAPI, keyRing)
|
||||
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, federation)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, federation, enableMetrics, fsAPI.IsBlacklistedOrBackingOff)
|
||||
|
||||
asAPI := appservice.NewInternalAPI(processCtx, cfg, &natsInstance, userAPI, rsAPI)
|
||||
|
||||
|
|
|
@ -213,14 +213,15 @@ func main() {
|
|||
natsInstance := jetstream.NATSInstance{}
|
||||
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.EnableMetrics)
|
||||
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, federation)
|
||||
|
||||
asAPI := appservice.NewInternalAPI(processCtx, cfg, &natsInstance, userAPI, rsAPI)
|
||||
rsAPI.SetAppserviceAPI(asAPI)
|
||||
fsAPI := federationapi.NewInternalAPI(
|
||||
processCtx, cfg, cm, &natsInstance, federation, rsAPI, caches, keyRing, true,
|
||||
)
|
||||
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, federation, caching.EnableMetrics, fsAPI.IsBlacklistedOrBackingOff)
|
||||
|
||||
asAPI := appservice.NewInternalAPI(processCtx, cfg, &natsInstance, userAPI, rsAPI)
|
||||
rsAPI.SetAppserviceAPI(asAPI)
|
||||
|
||||
rsAPI.SetFederationAPI(fsAPI, keyRing)
|
||||
|
||||
monolith := setup.Monolith{
|
||||
|
|
|
@ -162,7 +162,7 @@ func main() {
|
|||
// dependency. Other components also need updating after their dependencies are up.
|
||||
rsAPI.SetFederationAPI(fsAPI, keyRing)
|
||||
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, federationClient)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, federationClient, caching.EnableMetrics, fsAPI.IsBlacklistedOrBackingOff)
|
||||
asAPI := appservice.NewInternalAPI(processCtx, cfg, &natsInstance, userAPI, rsAPI)
|
||||
|
||||
rsAPI.SetAppserviceAPI(asAPI)
|
||||
|
|
|
@ -325,6 +325,10 @@ user_api:
|
|||
auto_join_rooms:
|
||||
# - "#main:matrix.org"
|
||||
|
||||
# The number of workers to start for the DeviceListUpdater. Defaults to 8.
|
||||
# This only needs updating if the "InputDeviceListUpdate" stream keeps growing indefinitely.
|
||||
# worker_count: 8
|
||||
|
||||
# Configuration for Opentracing.
|
||||
# See https://github.com/matrix-org/dendrite/tree/master/docs/tracing for information on
|
||||
# how this works and how to set it up.
|
||||
|
|
|
@ -24,7 +24,6 @@ import (
|
|||
"github.com/matrix-org/gomatrixserverlib/fclient"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/matrix-org/dendrite/federationapi/api"
|
||||
federationAPI "github.com/matrix-org/dendrite/federationapi/api"
|
||||
"github.com/matrix-org/dendrite/federationapi/consumers"
|
||||
"github.com/matrix-org/dendrite/federationapi/internal"
|
||||
|
@ -102,7 +101,7 @@ func NewInternalAPI(
|
|||
caches *caching.Caches,
|
||||
keyRing *gomatrixserverlib.KeyRing,
|
||||
resetBlacklist bool,
|
||||
) api.FederationInternalAPI {
|
||||
) *internal.FederationInternalAPI {
|
||||
cfg := &dendriteCfg.FederationAPI
|
||||
|
||||
federationDB, err := storage.NewDatabase(processContext.Context(), cm, &cfg.Database, caches, dendriteCfg.Global.IsLocalServerName)
|
||||
|
|
|
@ -112,7 +112,7 @@ func NewFederationInternalAPI(
|
|||
}
|
||||
}
|
||||
|
||||
func (a *FederationInternalAPI) isBlacklistedOrBackingOff(s spec.ServerName) (*statistics.ServerStatistics, error) {
|
||||
func (a *FederationInternalAPI) IsBlacklistedOrBackingOff(s spec.ServerName) (*statistics.ServerStatistics, error) {
|
||||
stats := a.statistics.ForServer(s)
|
||||
if stats.Blacklisted() {
|
||||
return stats, &api.FederationClientError{
|
||||
|
@ -151,7 +151,7 @@ func failBlacklistableError(err error, stats *statistics.ServerStatistics) (unti
|
|||
func (a *FederationInternalAPI) doRequestIfNotBackingOffOrBlacklisted(
|
||||
s spec.ServerName, request func() (interface{}, error),
|
||||
) (interface{}, error) {
|
||||
stats, err := a.isBlacklistedOrBackingOff(s)
|
||||
stats, err := a.IsBlacklistedOrBackingOff(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -26,7 +26,6 @@ import (
|
|||
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
|
||||
"github.com/matrix-org/dendrite/cmd/dendrite-demo-yggdrasil/signing"
|
||||
fedAPI "github.com/matrix-org/dendrite/federationapi"
|
||||
fedInternal "github.com/matrix-org/dendrite/federationapi/internal"
|
||||
"github.com/matrix-org/dendrite/federationapi/routing"
|
||||
"github.com/matrix-org/dendrite/internal/caching"
|
||||
"github.com/matrix-org/dendrite/internal/httputil"
|
||||
|
@ -67,11 +66,8 @@ func TestHandleQueryProfile(t *testing.T) {
|
|||
keyRing := serverKeyAPI.KeyRing()
|
||||
fedapi := fedAPI.NewInternalAPI(processCtx, cfg, cm, &natsInstance, &fedClient, nil, nil, keyRing, true)
|
||||
userapi := fakeUserAPI{}
|
||||
r, ok := fedapi.(*fedInternal.FederationInternalAPI)
|
||||
if !ok {
|
||||
panic("This is a programming error.")
|
||||
}
|
||||
routing.Setup(routers, cfg, nil, r, keyRing, &fedClient, &userapi, &cfg.MSCs, nil, caching.DisableMetrics)
|
||||
|
||||
routing.Setup(routers, cfg, nil, fedapi, keyRing, &fedClient, &userapi, &cfg.MSCs, nil, caching.DisableMetrics)
|
||||
|
||||
handler := fedMux.Get(routing.QueryProfileRouteName).GetHandler().ServeHTTP
|
||||
_, sk, _ := ed25519.GenerateKey(nil)
|
||||
|
|
|
@ -25,7 +25,6 @@ import (
|
|||
"github.com/gorilla/mux"
|
||||
"github.com/matrix-org/dendrite/cmd/dendrite-demo-yggdrasil/signing"
|
||||
fedAPI "github.com/matrix-org/dendrite/federationapi"
|
||||
fedInternal "github.com/matrix-org/dendrite/federationapi/internal"
|
||||
"github.com/matrix-org/dendrite/federationapi/routing"
|
||||
"github.com/matrix-org/dendrite/internal/caching"
|
||||
"github.com/matrix-org/dendrite/internal/httputil"
|
||||
|
@ -65,11 +64,8 @@ func TestHandleQueryDirectory(t *testing.T) {
|
|||
keyRing := serverKeyAPI.KeyRing()
|
||||
fedapi := fedAPI.NewInternalAPI(processCtx, cfg, cm, &natsInstance, &fedClient, nil, nil, keyRing, true)
|
||||
userapi := fakeUserAPI{}
|
||||
r, ok := fedapi.(*fedInternal.FederationInternalAPI)
|
||||
if !ok {
|
||||
panic("This is a programming error.")
|
||||
}
|
||||
routing.Setup(routers, cfg, nil, r, keyRing, &fedClient, &userapi, &cfg.MSCs, nil, caching.DisableMetrics)
|
||||
|
||||
routing.Setup(routers, cfg, nil, fedapi, keyRing, &fedClient, &userapi, &cfg.MSCs, nil, caching.DisableMetrics)
|
||||
|
||||
handler := fedMux.Get(routing.QueryDirectoryRouteName).GetHandler().ServeHTTP
|
||||
_, sk, _ := ed25519.GenerateKey(nil)
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
"github.com/gorilla/mux"
|
||||
"github.com/matrix-org/dendrite/cmd/dendrite-demo-yggdrasil/signing"
|
||||
fedAPI "github.com/matrix-org/dendrite/federationapi"
|
||||
fedInternal "github.com/matrix-org/dendrite/federationapi/internal"
|
||||
"github.com/matrix-org/dendrite/federationapi/routing"
|
||||
"github.com/matrix-org/dendrite/internal/caching"
|
||||
"github.com/matrix-org/dendrite/internal/httputil"
|
||||
|
@ -62,11 +61,8 @@ func TestHandleSend(t *testing.T) {
|
|||
fedapi := fedAPI.NewInternalAPI(processCtx, cfg, cm, &natsInstance, nil, nil, nil, nil, true)
|
||||
serverKeyAPI := &signing.YggdrasilKeys{}
|
||||
keyRing := serverKeyAPI.KeyRing()
|
||||
r, ok := fedapi.(*fedInternal.FederationInternalAPI)
|
||||
if !ok {
|
||||
panic("This is a programming error.")
|
||||
}
|
||||
routing.Setup(routers, cfg, nil, r, keyRing, nil, nil, &cfg.MSCs, nil, caching.DisableMetrics)
|
||||
|
||||
routing.Setup(routers, cfg, nil, fedapi, keyRing, nil, nil, &cfg.MSCs, nil, caching.DisableMetrics)
|
||||
|
||||
handler := fedMux.Get(routing.SendRouteName).GetHandler().ServeHTTP
|
||||
_, sk, _ := ed25519.GenerateKey(nil)
|
||||
|
|
|
@ -151,7 +151,7 @@ func (s *notaryServerKeysMetadataStatements) SelectKeys(ctx context.Context, txn
|
|||
}
|
||||
results = append(results, sk)
|
||||
}
|
||||
return results, nil
|
||||
return results, rows.Err()
|
||||
}
|
||||
|
||||
func (s *notaryServerKeysMetadataStatements) DeleteOldJSONResponses(ctx context.Context, txn *sql.Tx) error {
|
||||
|
|
|
@ -109,5 +109,5 @@ func (s *queueJSONStatements) SelectQueueJSON(
|
|||
}
|
||||
blobs[nid] = blob
|
||||
}
|
||||
return blobs, err
|
||||
return blobs, rows.Err()
|
||||
}
|
||||
|
|
|
@ -110,7 +110,7 @@ func (s *relayServersStatements) SelectRelayServers(
|
|||
}
|
||||
result = append(result, spec.ServerName(relayServer))
|
||||
}
|
||||
return result, nil
|
||||
return result, rows.Err()
|
||||
}
|
||||
|
||||
func (s *relayServersStatements) DeleteRelayServers(
|
||||
|
|
|
@ -216,5 +216,5 @@ func joinedHostsFromStmt(
|
|||
})
|
||||
}
|
||||
|
||||
return result, nil
|
||||
return result, rows.Err()
|
||||
}
|
||||
|
|
|
@ -154,7 +154,7 @@ func (s *notaryServerKeysMetadataStatements) SelectKeys(ctx context.Context, txn
|
|||
}
|
||||
results = append(results, sk)
|
||||
}
|
||||
return results, nil
|
||||
return results, rows.Err()
|
||||
}
|
||||
|
||||
func (s *notaryServerKeysMetadataStatements) DeleteOldJSONResponses(ctx context.Context, txn *sql.Tx) error {
|
||||
|
|
|
@ -135,5 +135,5 @@ func (s *queueJSONStatements) SelectQueueJSON(
|
|||
}
|
||||
blobs[nid] = blob
|
||||
}
|
||||
return blobs, err
|
||||
return blobs, rows.Err()
|
||||
}
|
||||
|
|
|
@ -109,7 +109,7 @@ func (s *relayServersStatements) SelectRelayServers(
|
|||
}
|
||||
result = append(result, spec.ServerName(relayServer))
|
||||
}
|
||||
return result, nil
|
||||
return result, rows.Err()
|
||||
}
|
||||
|
||||
func (s *relayServersStatements) DeleteRelayServers(
|
||||
|
|
6
go.mod
6
go.mod
|
@ -9,7 +9,7 @@ require (
|
|||
github.com/blevesearch/bleve/v2 v2.3.8
|
||||
github.com/codeclysm/extract v2.2.0+incompatible
|
||||
github.com/dgraph-io/ristretto v0.1.1
|
||||
github.com/docker/docker v24.0.5+incompatible
|
||||
github.com/docker/docker v24.0.7+incompatible
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/getsentry/sentry-go v0.14.0
|
||||
github.com/gologme/log v1.3.0
|
||||
|
@ -44,7 +44,7 @@ require (
|
|||
go.uber.org/atomic v1.10.0
|
||||
golang.org/x/crypto v0.14.0
|
||||
golang.org/x/exp v0.0.0-20230809150735-7b3493d9a819
|
||||
golang.org/x/image v0.5.0
|
||||
golang.org/x/image v0.10.0
|
||||
golang.org/x/mobile v0.0.0-20221020085226-b36e6246172e
|
||||
golang.org/x/sync v0.3.0
|
||||
golang.org/x/term v0.13.0
|
||||
|
@ -105,7 +105,7 @@ require (
|
|||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/mschoch/smat v0.2.0 // indirect
|
||||
github.com/nats-io/jwt/v2 v2.5.0 // indirect
|
||||
github.com/nats-io/nkeys v0.4.4 // indirect
|
||||
github.com/nats-io/nkeys v0.4.6 // indirect
|
||||
github.com/nats-io/nuid v1.0.1 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.11.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
|
|
19
go.sum
19
go.sum
|
@ -89,8 +89,8 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczC
|
|||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
|
||||
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v24.0.5+incompatible h1:WmgcE4fxyI6EEXxBRxsHnZXrO1pQ3smi0k/jho4HLeY=
|
||||
github.com/docker/docker v24.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM=
|
||||
github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
|
@ -248,8 +248,8 @@ github.com/nats-io/nats-server/v2 v2.9.23 h1:6Wj6H6QpP9FMlpCyWUaNu2yeZ/qGj+mdRkZ
|
|||
github.com/nats-io/nats-server/v2 v2.9.23/go.mod h1:wEjrEy9vnqIGE4Pqz4/c75v9Pmaq7My2IgFmnykc4C0=
|
||||
github.com/nats-io/nats.go v1.28.0 h1:Th4G6zdsz2d0OqXdfzKLClo6bOfoI/b1kInhRtFIy5c=
|
||||
github.com/nats-io/nats.go v1.28.0/go.mod h1:XpbWUlOElGwTYbMR7imivs7jJj9GtK7ypv321Wp6pjc=
|
||||
github.com/nats-io/nkeys v0.4.4 h1:xvBJ8d69TznjcQl9t6//Q5xXuVhyYiSos6RPtvQNTwA=
|
||||
github.com/nats-io/nkeys v0.4.4/go.mod h1:XUkxdLPTufzlihbamfzQ7mw/VGx6ObUs+0bN5sNvt64=
|
||||
github.com/nats-io/nkeys v0.4.6 h1:IzVe95ru2CT6ta874rt9saQRkWfe2nFj1NtvYSLqMzY=
|
||||
github.com/nats-io/nkeys v0.4.6/go.mod h1:4DxZNzenSVd1cYQoAa8948QY3QDjrHfcfVADymtkpts=
|
||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/neilalexander/utp v0.1.1-0.20210727203401-54ae7b1cd5f9 h1:lrVQzBtkeQEGGYUHwSX1XPe1E5GL6U3KYCNe2G4bncQ=
|
||||
|
@ -366,8 +366,8 @@ golang.org/x/exp v0.0.0-20230809150735-7b3493d9a819/go.mod h1:FXUEEKJgO7OQYeo8N0
|
|||
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/image v0.5.0 h1:5JMiNunQeQw++mMOz48/ISeNu3Iweh/JaZU8ZLqHRrI=
|
||||
golang.org/x/image v0.5.0/go.mod h1:FVC7BI/5Ym8R25iw5OLsgshdUBbT1h5jZTpA+mvAdZ4=
|
||||
golang.org/x/image v0.10.0 h1:gXjUUtwtx5yOE0VKWq1CH4IJAClq4UGgUA3i+rpON9M=
|
||||
golang.org/x/image v0.10.0/go.mod h1:jtrku+n79PfroUbvDdeUWMAI+heR786BofxrbiSF+J0=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mobile v0.0.0-20221020085226-b36e6246172e h1:zSgtO19fpg781xknwqiQPmOHaASr6E7ZVlTseLd9Fx4=
|
||||
golang.org/x/mobile v0.0.0-20221020085226-b36e6246172e/go.mod h1:aAjjkJNdrh3PMckS4B10TGS2nag27cbKR1y2BpUxsiY=
|
||||
|
@ -376,6 +376,7 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
|
@ -386,6 +387,7 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY
|
|||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -394,6 +396,7 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -418,10 +421,12 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
|
||||
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -429,6 +434,7 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
|||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
@ -445,6 +451,7 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
|
|||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss=
|
||||
golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
|
|
@ -109,5 +109,5 @@ func (s *relayQueueJSONStatements) SelectQueueJSON(
|
|||
}
|
||||
blobs[nid] = blob
|
||||
}
|
||||
return blobs, err
|
||||
return blobs, rows.Err()
|
||||
}
|
||||
|
|
|
@ -133,5 +133,5 @@ func (s *relayQueueJSONStatements) SelectQueueJSON(
|
|||
}
|
||||
blobs[nid] = blob
|
||||
}
|
||||
return blobs, err
|
||||
return blobs, rows.Err()
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@ package query
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
|
@ -56,6 +57,12 @@ func (querier *Queryer) QueryNextRoomHierarchyPage(ctx context.Context, walker r
|
|||
break
|
||||
}
|
||||
|
||||
// If the context is canceled, we might still have discovered rooms
|
||||
// return them to the client and let the client know there _may_ be more rooms.
|
||||
if errors.Is(ctx.Err(), context.Canceled) {
|
||||
break
|
||||
}
|
||||
|
||||
// pop the stack
|
||||
queuedRoom := unvisited[len(unvisited)-1]
|
||||
unvisited = unvisited[:len(unvisited)-1]
|
||||
|
@ -112,6 +119,11 @@ func (querier *Queryer) QueryNextRoomHierarchyPage(ctx context.Context, walker r
|
|||
|
||||
pubRoom := publicRoomsChunk(ctx, querier, queuedRoom.RoomID)
|
||||
|
||||
if pubRoom == nil {
|
||||
util.GetLogger(ctx).WithField("room_id", queuedRoom.RoomID).Debug("unable to get publicRoomsChunk")
|
||||
continue
|
||||
}
|
||||
|
||||
discoveredRooms = append(discoveredRooms, fclient.RoomHierarchyRoom{
|
||||
PublicRoom: *pubRoom,
|
||||
RoomType: roomType,
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/matrix-org/dendrite/federationapi/statistics"
|
||||
"github.com/matrix-org/dendrite/internal/caching"
|
||||
"github.com/matrix-org/dendrite/internal/eventutil"
|
||||
"github.com/matrix-org/dendrite/internal/httputil"
|
||||
|
@ -34,6 +35,10 @@ import (
|
|||
"github.com/matrix-org/dendrite/test/testrig"
|
||||
)
|
||||
|
||||
var testIsBlacklistedOrBackingOff = func(s spec.ServerName) (*statistics.ServerStatistics, error) {
|
||||
return &statistics.ServerStatistics{}, nil
|
||||
}
|
||||
|
||||
type FakeQuerier struct {
|
||||
api.QuerySenderIDAPI
|
||||
}
|
||||
|
@ -58,7 +63,7 @@ func TestUsers(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("kick users", func(t *testing.T) {
|
||||
usrAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil)
|
||||
usrAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
rsAPI.SetUserAPI(usrAPI)
|
||||
testKickUsers(t, rsAPI, usrAPI)
|
||||
})
|
||||
|
@ -258,7 +263,7 @@ func TestPurgeRoom(t *testing.T) {
|
|||
fsAPI := federationapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, nil, rsAPI, caches, nil, true)
|
||||
rsAPI.SetFederationAPI(fsAPI, nil)
|
||||
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, fsAPI.IsBlacklistedOrBackingOff)
|
||||
syncapi.AddPublicRoutes(processCtx, routers, cfg, cm, &natsInstance, userAPI, rsAPI, caches, caching.DisableMetrics)
|
||||
|
||||
// Create the room
|
||||
|
@ -1050,7 +1055,7 @@ func TestUpgrade(t *testing.T) {
|
|||
|
||||
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.DisableMetrics)
|
||||
rsAPI.SetFederationAPI(nil, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil)
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
rsAPI.SetUserAPI(userAPI)
|
||||
|
||||
for _, tc := range testCases {
|
||||
|
|
|
@ -249,6 +249,7 @@ func (s *eventStatements) BulkSelectSnapshotsFromEventIDs(
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer internal.CloseAndLogIfError(ctx, rows, "BulkSelectSnapshotsFromEventIDs: rows.close() failed")
|
||||
|
||||
var eventID string
|
||||
var stateNID types.StateSnapshotNID
|
||||
|
@ -563,7 +564,7 @@ func (s *eventStatements) SelectRoomNIDsForEventNIDs(
|
|||
}
|
||||
result[eventNID] = roomNID
|
||||
}
|
||||
return result, nil
|
||||
return result, rows.Err()
|
||||
}
|
||||
|
||||
func eventNIDsAsArray(eventNIDs []types.EventNID) pq.Int64Array {
|
||||
|
|
|
@ -363,7 +363,7 @@ func (s *membershipStatements) SelectRoomsWithMembership(
|
|||
}
|
||||
roomNIDs = append(roomNIDs, roomNID)
|
||||
}
|
||||
return roomNIDs, nil
|
||||
return roomNIDs, rows.Err()
|
||||
}
|
||||
|
||||
func (s *membershipStatements) SelectJoinedUsersSetForRooms(
|
||||
|
|
|
@ -137,7 +137,7 @@ func (s *roomStatements) SelectRoomIDsWithEvents(ctx context.Context, txn *sql.T
|
|||
}
|
||||
roomIDs = append(roomIDs, roomID)
|
||||
}
|
||||
return roomIDs, nil
|
||||
return roomIDs, rows.Err()
|
||||
}
|
||||
func (s *roomStatements) InsertRoomNID(
|
||||
ctx context.Context, txn *sql.Tx,
|
||||
|
@ -255,7 +255,7 @@ func (s *roomStatements) SelectRoomVersionsForRoomNIDs(
|
|||
}
|
||||
result[roomNID] = roomVersion
|
||||
}
|
||||
return result, nil
|
||||
return result, rows.Err()
|
||||
}
|
||||
|
||||
func (s *roomStatements) BulkSelectRoomIDs(ctx context.Context, txn *sql.Tx, roomNIDs []types.RoomNID) ([]string, error) {
|
||||
|
@ -277,7 +277,7 @@ func (s *roomStatements) BulkSelectRoomIDs(ctx context.Context, txn *sql.Tx, roo
|
|||
}
|
||||
roomIDs = append(roomIDs, roomID)
|
||||
}
|
||||
return roomIDs, nil
|
||||
return roomIDs, rows.Err()
|
||||
}
|
||||
|
||||
func (s *roomStatements) BulkSelectRoomNIDs(ctx context.Context, txn *sql.Tx, roomIDs []string) ([]types.RoomNID, error) {
|
||||
|
@ -299,7 +299,7 @@ func (s *roomStatements) BulkSelectRoomNIDs(ctx context.Context, txn *sql.Tx, ro
|
|||
}
|
||||
roomNIDs = append(roomNIDs, roomNID)
|
||||
}
|
||||
return roomNIDs, nil
|
||||
return roomNIDs, rows.Err()
|
||||
}
|
||||
|
||||
func roomNIDsAsArray(roomNIDs []types.RoomNID) pq.Int64Array {
|
||||
|
|
|
@ -162,6 +162,7 @@ func (s *userRoomKeysStatements) SelectAllPublicKeysForUser(ctx context.Context,
|
|||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, nil
|
||||
}
|
||||
defer internal.CloseAndLogIfError(ctx, rows, "SelectAllPublicKeysForUser: failed to close rows")
|
||||
|
||||
resultMap := make(map[types.RoomNID]ed25519.PublicKey)
|
||||
|
||||
|
@ -173,5 +174,5 @@ func (s *userRoomKeysStatements) SelectAllPublicKeysForUser(ctx context.Context,
|
|||
}
|
||||
resultMap[roomNID] = pubkey
|
||||
}
|
||||
return resultMap, err
|
||||
return resultMap, rows.Err()
|
||||
}
|
||||
|
|
|
@ -109,5 +109,5 @@ func (s *eventJSONStatements) BulkSelectEventJSON(
|
|||
}
|
||||
result.EventNID = types.EventNID(eventNID)
|
||||
}
|
||||
return results[:i], nil
|
||||
return results[:i], rows.Err()
|
||||
}
|
||||
|
|
|
@ -136,7 +136,7 @@ func (s *eventStateKeyStatements) BulkSelectEventStateKeyNID(
|
|||
}
|
||||
result[stateKey] = types.EventStateKeyNID(stateKeyNID)
|
||||
}
|
||||
return result, nil
|
||||
return result, rows.Err()
|
||||
}
|
||||
|
||||
func (s *eventStateKeyStatements) BulkSelectEventStateKey(
|
||||
|
@ -167,5 +167,5 @@ func (s *eventStateKeyStatements) BulkSelectEventStateKey(
|
|||
}
|
||||
result[types.EventStateKeyNID(stateKeyNID)] = stateKey
|
||||
}
|
||||
return result, nil
|
||||
return result, rows.Err()
|
||||
}
|
||||
|
|
|
@ -147,5 +147,5 @@ func (s *eventTypeStatements) BulkSelectEventTypeNID(
|
|||
}
|
||||
result[eventType] = types.EventTypeNID(eventTypeNID)
|
||||
}
|
||||
return result, nil
|
||||
return result, rows.Err()
|
||||
}
|
||||
|
|
|
@ -310,6 +310,9 @@ func (s *eventStatements) BulkSelectStateEventByID(
|
|||
}
|
||||
results = append(results, result)
|
||||
}
|
||||
if err = rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !excludeRejected && i != len(eventIDs) {
|
||||
// If there are fewer rows returned than IDs then we were asked to lookup event IDs we don't have.
|
||||
// We don't know which ones were missing because we don't return the string IDs in the query.
|
||||
|
@ -377,7 +380,7 @@ func (s *eventStatements) BulkSelectStateEventByNID(
|
|||
return nil, err
|
||||
}
|
||||
}
|
||||
return results[:i], err
|
||||
return results[:i], rows.Err()
|
||||
}
|
||||
|
||||
// bulkSelectStateAtEventByID lookups the state at a list of events by event ID.
|
||||
|
@ -425,6 +428,9 @@ func (s *eventStatements) BulkSelectStateAtEventByID(
|
|||
)
|
||||
}
|
||||
}
|
||||
if err = rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if i != len(eventIDs) {
|
||||
return nil, types.MissingEventError(
|
||||
fmt.Sprintf("storage: event IDs missing from the database (%d != %d)", i, len(eventIDs)),
|
||||
|
@ -507,6 +513,9 @@ func (s *eventStatements) BulkSelectStateAtEventAndReference(
|
|||
result.BeforeStateSnapshotNID = types.StateSnapshotNID(stateSnapshotNID)
|
||||
result.EventID = eventID
|
||||
}
|
||||
if err = rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if i != len(eventNIDs) {
|
||||
return nil, fmt.Errorf("storage: event NIDs missing from the database (%d != %d)", i, len(eventNIDs))
|
||||
}
|
||||
|
@ -544,6 +553,9 @@ func (s *eventStatements) BulkSelectEventID(ctx context.Context, txn *sql.Tx, ev
|
|||
}
|
||||
results[types.EventNID(eventNID)] = eventID
|
||||
}
|
||||
if err = rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if i != len(eventNIDs) {
|
||||
return nil, fmt.Errorf("storage: event NIDs missing from the database (%d != %d)", i, len(eventNIDs))
|
||||
}
|
||||
|
@ -602,7 +614,7 @@ func (s *eventStatements) bulkSelectEventNID(ctx context.Context, txn *sql.Tx, e
|
|||
RoomNID: types.RoomNID(roomNID),
|
||||
}
|
||||
}
|
||||
return results, nil
|
||||
return results, rows.Err()
|
||||
}
|
||||
|
||||
func (s *eventStatements) SelectMaxEventDepth(ctx context.Context, txn *sql.Tx, eventNIDs []types.EventNID) (int64, error) {
|
||||
|
@ -652,7 +664,7 @@ func (s *eventStatements) SelectRoomNIDsForEventNIDs(
|
|||
}
|
||||
result[eventNID] = roomNID
|
||||
}
|
||||
return result, nil
|
||||
return result, rows.Err()
|
||||
}
|
||||
|
||||
func eventNIDsAsArray(eventNIDs []types.EventNID) string {
|
||||
|
|
|
@ -126,6 +126,9 @@ func (s *inviteStatements) UpdateInviteRetired(
|
|||
}
|
||||
eventIDs = append(eventIDs, inviteEventID)
|
||||
}
|
||||
if err = rows.Err(); err != nil {
|
||||
return
|
||||
}
|
||||
// now retire the invites
|
||||
stmt = sqlutil.TxStmt(txn, s.updateInviteRetiredStmt)
|
||||
_, err = stmt.ExecContext(ctx, roomNID, targetUserNID)
|
||||
|
@ -157,5 +160,5 @@ func (s *inviteStatements) SelectInviteActiveForUserInRoom(
|
|||
result = append(result, types.EventStateKeyNID(senderUserNID))
|
||||
eventIDs = append(eventIDs, eventID)
|
||||
}
|
||||
return result, eventIDs, eventJSON, nil
|
||||
return result, eventIDs, eventJSON, rows.Err()
|
||||
}
|
||||
|
|
|
@ -250,6 +250,7 @@ func (s *membershipStatements) SelectMembershipsFromRoom(
|
|||
}
|
||||
eventNIDs = append(eventNIDs, eNID)
|
||||
}
|
||||
err = rows.Err()
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -277,6 +278,7 @@ func (s *membershipStatements) SelectMembershipsFromRoomAndMembership(
|
|||
}
|
||||
eventNIDs = append(eventNIDs, eNID)
|
||||
}
|
||||
err = rows.Err()
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -313,7 +315,7 @@ func (s *membershipStatements) SelectRoomsWithMembership(
|
|||
}
|
||||
roomNIDs = append(roomNIDs, roomNID)
|
||||
}
|
||||
return roomNIDs, nil
|
||||
return roomNIDs, rows.Err()
|
||||
}
|
||||
|
||||
func (s *membershipStatements) SelectJoinedUsersSetForRooms(ctx context.Context, txn *sql.Tx, roomNIDs []types.RoomNID, userNIDs []types.EventStateKeyNID, localOnly bool) (map[types.EventStateKeyNID]int, error) {
|
||||
|
|
|
@ -121,7 +121,7 @@ func (s *roomAliasesStatements) SelectAliasesFromRoomID(
|
|||
|
||||
aliases = append(aliases, alias)
|
||||
}
|
||||
|
||||
err = rows.Err()
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -128,7 +128,7 @@ func (s *roomStatements) SelectRoomIDsWithEvents(ctx context.Context, txn *sql.T
|
|||
}
|
||||
roomIDs = append(roomIDs, roomID)
|
||||
}
|
||||
return roomIDs, nil
|
||||
return roomIDs, rows.Err()
|
||||
}
|
||||
|
||||
func (s *roomStatements) SelectRoomInfo(ctx context.Context, txn *sql.Tx, roomID string) (*types.RoomInfo, error) {
|
||||
|
@ -265,7 +265,7 @@ func (s *roomStatements) SelectRoomVersionsForRoomNIDs(
|
|||
}
|
||||
result[roomNID] = roomVersion
|
||||
}
|
||||
return result, nil
|
||||
return result, rows.Err()
|
||||
}
|
||||
|
||||
func (s *roomStatements) BulkSelectRoomIDs(ctx context.Context, txn *sql.Tx, roomNIDs []types.RoomNID) ([]string, error) {
|
||||
|
@ -293,7 +293,7 @@ func (s *roomStatements) BulkSelectRoomIDs(ctx context.Context, txn *sql.Tx, roo
|
|||
}
|
||||
roomIDs = append(roomIDs, roomID)
|
||||
}
|
||||
return roomIDs, nil
|
||||
return roomIDs, rows.Err()
|
||||
}
|
||||
|
||||
func (s *roomStatements) BulkSelectRoomNIDs(ctx context.Context, txn *sql.Tx, roomIDs []string) ([]types.RoomNID, error) {
|
||||
|
@ -321,5 +321,5 @@ func (s *roomStatements) BulkSelectRoomNIDs(ctx context.Context, txn *sql.Tx, ro
|
|||
}
|
||||
roomNIDs = append(roomNIDs, roomNID)
|
||||
}
|
||||
return roomNIDs, nil
|
||||
return roomNIDs, rows.Err()
|
||||
}
|
||||
|
|
|
@ -133,13 +133,16 @@ func (s *stateSnapshotStatements) BulkSelectStateBlockNIDs(
|
|||
var stateBlockNIDsJSON string
|
||||
for ; rows.Next(); i++ {
|
||||
result := &results[i]
|
||||
if err := rows.Scan(&result.StateSnapshotNID, &stateBlockNIDsJSON); err != nil {
|
||||
if err = rows.Scan(&result.StateSnapshotNID, &stateBlockNIDsJSON); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := json.Unmarshal([]byte(stateBlockNIDsJSON), &result.StateBlockNIDs); err != nil {
|
||||
if err = json.Unmarshal([]byte(stateBlockNIDsJSON), &result.StateBlockNIDs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err = rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if i != len(stateNIDs) {
|
||||
return nil, types.MissingStateError(fmt.Sprintf("storage: state NIDs missing from the database (%d != %d)", i, len(stateNIDs)))
|
||||
}
|
||||
|
|
|
@ -177,6 +177,7 @@ func (s *userRoomKeysStatements) SelectAllPublicKeysForUser(ctx context.Context,
|
|||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, nil
|
||||
}
|
||||
defer internal.CloseAndLogIfError(ctx, rows, "SelectAllPublicKeysForUser: failed to close rows")
|
||||
|
||||
resultMap := make(map[types.RoomNID]ed25519.PublicKey)
|
||||
|
||||
|
@ -188,5 +189,5 @@ func (s *userRoomKeysStatements) SelectAllPublicKeysForUser(ctx context.Context,
|
|||
}
|
||||
resultMap[roomNID] = pubkey
|
||||
}
|
||||
return resultMap, err
|
||||
return resultMap, rows.Err()
|
||||
}
|
||||
|
|
|
@ -21,6 +21,10 @@ type UserAPI struct {
|
|||
// Users who register on this homeserver will automatically
|
||||
// be joined to the rooms listed under this option.
|
||||
AutoJoinRooms []string `yaml:"auto_join_rooms"`
|
||||
|
||||
// The number of workers to start for the DeviceListUpdater. Defaults to 8.
|
||||
// This only needs updating if the "InputDeviceListUpdate" stream keeps growing indefinitely.
|
||||
WorkerCount int `yaml:"worker_count"`
|
||||
}
|
||||
|
||||
const DefaultOpenIDTokenLifetimeMS = 3600000 // 60 minutes
|
||||
|
@ -28,6 +32,7 @@ const DefaultOpenIDTokenLifetimeMS = 3600000 // 60 minutes
|
|||
func (c *UserAPI) Defaults(opts DefaultOpts) {
|
||||
c.BCryptCost = bcrypt.DefaultCost
|
||||
c.OpenIDTokenLifetimeMS = DefaultOpenIDTokenLifetimeMS
|
||||
c.WorkerCount = 8
|
||||
if opts.Generate {
|
||||
if !opts.SingleDatabase {
|
||||
c.AccountDatabase.ConnectionString = "file:userapi_accounts.db"
|
||||
|
|
|
@ -301,7 +301,7 @@ func (p *DB) ChildrenForParent(ctx context.Context, eventID, relType string, rec
|
|||
}
|
||||
children = append(children, evInfo)
|
||||
}
|
||||
return children, nil
|
||||
return children, rows.Err()
|
||||
}
|
||||
|
||||
func (p *DB) ParentForChild(ctx context.Context, eventID, relType string) (*eventInfo, error) {
|
||||
|
|
|
@ -44,7 +44,7 @@ func GetEvent(
|
|||
rsAPI api.SyncRoomserverAPI,
|
||||
) util.JSONResponse {
|
||||
ctx := req.Context()
|
||||
db, err := syncDB.NewDatabaseTransaction(ctx)
|
||||
db, err := syncDB.NewDatabaseSnapshot(ctx)
|
||||
logger := util.GetLogger(ctx).WithFields(logrus.Fields{
|
||||
"event_id": eventID,
|
||||
"room_id": rawRoomID,
|
||||
|
@ -56,6 +56,7 @@ func GetEvent(
|
|||
JSON: spec.InternalServerError{},
|
||||
}
|
||||
}
|
||||
defer db.Rollback() // nolint: errcheck
|
||||
|
||||
roomID, err := spec.NewRoomID(rawRoomID)
|
||||
if err != nil {
|
||||
|
|
|
@ -392,7 +392,7 @@ func currentRoomStateRowsToStreamEvents(rows *sql.Rows) ([]types.StreamEvent, er
|
|||
})
|
||||
}
|
||||
|
||||
return events, nil
|
||||
return events, rows.Err()
|
||||
}
|
||||
|
||||
func rowsToEvents(rows *sql.Rows) ([]*rstypes.HeaderedEvent, error) {
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
rstypes "github.com/matrix-org/dendrite/roomserver/types"
|
||||
"github.com/matrix-org/dendrite/syncapi/storage/tables"
|
||||
|
@ -160,6 +161,7 @@ func (s *membershipsStatements) SelectMemberships(
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer internal.CloseAndLogIfError(ctx, rows, "SelectMemberships: failed to close rows")
|
||||
var (
|
||||
eventID string
|
||||
)
|
||||
|
|
|
@ -164,7 +164,7 @@ func (s *peekStatements) SelectPeekingDevices(
|
|||
devices = append(devices, types.PeekingDevice{UserID: userID, DeviceID: deviceID})
|
||||
result[roomID] = devices
|
||||
}
|
||||
return result, nil
|
||||
return result, rows.Err()
|
||||
}
|
||||
|
||||
func (s *peekStatements) SelectMaxPeekID(
|
||||
|
|
|
@ -144,7 +144,7 @@ func (p *presenceStatements) GetPresenceForUsers(
|
|||
presence.ClientFields.Presence = presence.Presence.String()
|
||||
result = append(result, presence)
|
||||
}
|
||||
return result, err
|
||||
return result, rows.Err()
|
||||
}
|
||||
|
||||
func (p *presenceStatements) GetMaxPresenceID(ctx context.Context, txn *sql.Tx) (pos types.StreamPosition, err error) {
|
||||
|
|
|
@ -177,7 +177,7 @@ func (s *currentRoomStateStatements) SelectJoinedUsers(
|
|||
users = append(users, userID)
|
||||
result[roomID] = users
|
||||
}
|
||||
return result, nil
|
||||
return result, rows.Err()
|
||||
}
|
||||
|
||||
// SelectJoinedUsersInRoom returns a map of room ID to a list of joined user IDs for a given room.
|
||||
|
@ -236,7 +236,7 @@ func (s *currentRoomStateStatements) SelectRoomIDsWithMembership(
|
|||
}
|
||||
result = append(result, roomID)
|
||||
}
|
||||
return result, nil
|
||||
return result, rows.Err()
|
||||
}
|
||||
|
||||
// SelectRoomIDsWithAnyMembership returns a map of all memberships for the given user.
|
||||
|
@ -419,7 +419,7 @@ func currentRoomStateRowsToStreamEvents(rows *sql.Rows) ([]types.StreamEvent, er
|
|||
})
|
||||
}
|
||||
|
||||
return events, nil
|
||||
return events, rows.Err()
|
||||
}
|
||||
|
||||
func rowsToEvents(rows *sql.Rows) ([]*rstypes.HeaderedEvent, error) {
|
||||
|
|
|
@ -176,7 +176,7 @@ func (s *inviteEventsStatements) SelectInviteEventsInRange(
|
|||
if lastPos == 0 {
|
||||
lastPos = r.To
|
||||
}
|
||||
return result, retired, lastPos, nil
|
||||
return result, retired, lastPos, rows.Err()
|
||||
}
|
||||
|
||||
func (s *inviteEventsStatements) SelectMaxInviteID(
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
rstypes "github.com/matrix-org/dendrite/roomserver/types"
|
||||
"github.com/matrix-org/dendrite/syncapi/storage/tables"
|
||||
|
@ -163,6 +164,7 @@ func (s *membershipsStatements) SelectMemberships(
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer internal.CloseAndLogIfError(ctx, rows, "SelectMemberships: failed to close rows")
|
||||
var eventID string
|
||||
for rows.Next() {
|
||||
if err = rows.Scan(&eventID); err != nil {
|
||||
|
|
|
@ -274,7 +274,7 @@ func (s *outputRoomEventsStatements) SelectStateInRange(
|
|||
}
|
||||
}
|
||||
|
||||
return stateNeeded, eventIDToEvent, nil
|
||||
return stateNeeded, eventIDToEvent, rows.Err()
|
||||
}
|
||||
|
||||
// MaxID returns the ID of the last inserted event in this table. 'txn' is optional. If it is not supplied,
|
||||
|
@ -520,7 +520,7 @@ func rowsToStreamEvents(rows *sql.Rows) ([]types.StreamEvent, error) {
|
|||
ExcludeFromSync: excludeFromSync,
|
||||
})
|
||||
}
|
||||
return result, nil
|
||||
return result, rows.Err()
|
||||
}
|
||||
func (s *outputRoomEventsStatements) SelectContextEvent(
|
||||
ctx context.Context, txn *sql.Tx, roomID, eventID string,
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
rstypes "github.com/matrix-org/dendrite/roomserver/types"
|
||||
"github.com/matrix-org/dendrite/syncapi/storage/tables"
|
||||
|
@ -137,6 +138,7 @@ func (s *outputRoomEventsTopologyStatements) SelectEventIDsInRange(
|
|||
} else if err != nil {
|
||||
return
|
||||
}
|
||||
defer internal.CloseAndLogIfError(ctx, rows, "SelectEventIDsInRange: failed to close rows")
|
||||
|
||||
// Return the IDs.
|
||||
var eventID string
|
||||
|
@ -155,7 +157,7 @@ func (s *outputRoomEventsTopologyStatements) SelectEventIDsInRange(
|
|||
start = tokens[0]
|
||||
end = tokens[len(tokens)-1]
|
||||
}
|
||||
|
||||
err = rows.Err()
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -184,7 +184,7 @@ func (s *peekStatements) SelectPeekingDevices(
|
|||
devices = append(devices, types.PeekingDevice{UserID: userID, DeviceID: deviceID})
|
||||
result[roomID] = devices
|
||||
}
|
||||
return result, nil
|
||||
return result, rows.Err()
|
||||
}
|
||||
|
||||
func (s *peekStatements) SelectMaxPeekID(
|
||||
|
|
|
@ -169,7 +169,7 @@ func (p *presenceStatements) GetPresenceForUsers(
|
|||
presence.ClientFields.Presence = presence.Presence.String()
|
||||
result = append(result, presence)
|
||||
}
|
||||
return result, err
|
||||
return result, rows.Err()
|
||||
}
|
||||
|
||||
func (p *presenceStatements) GetMaxPresenceID(ctx context.Context, txn *sql.Tx) (pos types.StreamPosition, err error) {
|
||||
|
|
|
@ -17,6 +17,7 @@ package consumers
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/matrix-org/dendrite/userapi/internal"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
|
@ -82,7 +83,10 @@ func (t *DeviceListUpdateConsumer) onMessage(ctx context.Context, msgs []*nats.M
|
|||
return true
|
||||
}
|
||||
|
||||
err := t.updater.Update(ctx, m)
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Second*30)
|
||||
defer cancel()
|
||||
|
||||
err := t.updater.Update(timeoutCtx, m)
|
||||
if err != nil {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"user_id": m.UserID,
|
||||
|
|
|
@ -21,9 +21,11 @@ import (
|
|||
"fmt"
|
||||
"hash/fnv"
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/matrix-org/dendrite/federationapi/statistics"
|
||||
rsapi "github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/gomatrixserverlib/fclient"
|
||||
"github.com/matrix-org/gomatrixserverlib/spec"
|
||||
|
@ -107,6 +109,8 @@ type DeviceListUpdater struct {
|
|||
userIDToChan map[string]chan bool
|
||||
userIDToChanMu *sync.Mutex
|
||||
rsAPI rsapi.KeyserverRoomserverAPI
|
||||
|
||||
isBlacklistedOrBackingOffFn func(s spec.ServerName) (*statistics.ServerStatistics, error)
|
||||
}
|
||||
|
||||
// DeviceListUpdaterDatabase is the subset of functionality from storage.Database required for the updater.
|
||||
|
@ -142,26 +146,52 @@ type KeyChangeProducer interface {
|
|||
ProduceKeyChanges(keys []api.DeviceMessage) error
|
||||
}
|
||||
|
||||
var deviceListUpdaterBackpressure = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "dendrite",
|
||||
Subsystem: "keyserver",
|
||||
Name: "worker_backpressure",
|
||||
Help: "How many device list updater requests are queued",
|
||||
},
|
||||
[]string{"worker_id"},
|
||||
)
|
||||
var deviceListUpdaterServersRetrying = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "dendrite",
|
||||
Subsystem: "keyserver",
|
||||
Name: "worker_servers_retrying",
|
||||
Help: "How many servers are queued for retry",
|
||||
},
|
||||
[]string{"worker_id"},
|
||||
)
|
||||
|
||||
// NewDeviceListUpdater creates a new updater which fetches fresh device lists when they go stale.
|
||||
func NewDeviceListUpdater(
|
||||
process *process.ProcessContext, db DeviceListUpdaterDatabase,
|
||||
api DeviceListUpdaterAPI, producer KeyChangeProducer,
|
||||
fedClient fedsenderapi.KeyserverFederationAPI, numWorkers int,
|
||||
rsAPI rsapi.KeyserverRoomserverAPI, thisServer spec.ServerName,
|
||||
rsAPI rsapi.KeyserverRoomserverAPI,
|
||||
thisServer spec.ServerName,
|
||||
enableMetrics bool,
|
||||
isBlacklistedOrBackingOffFn func(s spec.ServerName) (*statistics.ServerStatistics, error),
|
||||
) *DeviceListUpdater {
|
||||
if enableMetrics {
|
||||
prometheus.MustRegister(deviceListUpdaterBackpressure, deviceListUpdaterServersRetrying)
|
||||
}
|
||||
return &DeviceListUpdater{
|
||||
process: process,
|
||||
userIDToMutex: make(map[string]*sync.Mutex),
|
||||
mu: &sync.Mutex{},
|
||||
db: db,
|
||||
api: api,
|
||||
producer: producer,
|
||||
fedClient: fedClient,
|
||||
thisServer: thisServer,
|
||||
workerChans: make([]chan spec.ServerName, numWorkers),
|
||||
userIDToChan: make(map[string]chan bool),
|
||||
userIDToChanMu: &sync.Mutex{},
|
||||
rsAPI: rsAPI,
|
||||
process: process,
|
||||
userIDToMutex: make(map[string]*sync.Mutex),
|
||||
mu: &sync.Mutex{},
|
||||
db: db,
|
||||
api: api,
|
||||
producer: producer,
|
||||
fedClient: fedClient,
|
||||
thisServer: thisServer,
|
||||
workerChans: make([]chan spec.ServerName, numWorkers),
|
||||
userIDToChan: make(map[string]chan bool),
|
||||
userIDToChanMu: &sync.Mutex{},
|
||||
rsAPI: rsAPI,
|
||||
isBlacklistedOrBackingOffFn: isBlacklistedOrBackingOffFn,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -173,7 +203,7 @@ func (u *DeviceListUpdater) Start() error {
|
|||
// to stop (in this transaction) until key requests can be made.
|
||||
ch := make(chan spec.ServerName, 10)
|
||||
u.workerChans[i] = ch
|
||||
go u.worker(ch)
|
||||
go u.worker(ch, i)
|
||||
}
|
||||
|
||||
staleLists, err := u.db.StaleDeviceLists(u.process.Context(), []spec.ServerName{})
|
||||
|
@ -338,11 +368,22 @@ func (u *DeviceListUpdater) notifyWorkers(userID string) {
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, err = u.isBlacklistedOrBackingOffFn(remoteServer)
|
||||
var federationClientError *fedsenderapi.FederationClientError
|
||||
if errors.As(err, &federationClientError) {
|
||||
if federationClientError.Blacklisted {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
hash := fnv.New32a()
|
||||
_, _ = hash.Write([]byte(remoteServer))
|
||||
index := int(int64(hash.Sum32()) % int64(len(u.workerChans)))
|
||||
|
||||
ch := u.assignChannel(userID)
|
||||
// Since workerChans are buffered, we only increment here and let the worker
|
||||
// decrement it once it is done processing.
|
||||
deviceListUpdaterBackpressure.With(prometheus.Labels{"worker_id": strconv.Itoa(index)}).Inc()
|
||||
u.workerChans[index] <- remoteServer
|
||||
select {
|
||||
case <-ch:
|
||||
|
@ -372,27 +413,44 @@ func (u *DeviceListUpdater) clearChannel(userID string) {
|
|||
}
|
||||
}
|
||||
|
||||
func (u *DeviceListUpdater) worker(ch chan spec.ServerName) {
|
||||
func (u *DeviceListUpdater) worker(ch chan spec.ServerName, workerID int) {
|
||||
retries := make(map[spec.ServerName]time.Time)
|
||||
retriesMu := &sync.Mutex{}
|
||||
// restarter goroutine which will inject failed servers into ch when it is time
|
||||
go func() {
|
||||
var serversToRetry []spec.ServerName
|
||||
for {
|
||||
serversToRetry = serversToRetry[:0] // reuse memory
|
||||
time.Sleep(time.Second)
|
||||
// nuke serversToRetry by re-slicing it to be "empty".
|
||||
// The capacity of the slice is unchanged, which ensures we can reuse the memory.
|
||||
serversToRetry = serversToRetry[:0]
|
||||
|
||||
deviceListUpdaterServersRetrying.With(prometheus.Labels{"worker_id": strconv.Itoa(workerID)}).Set(float64(len(retries)))
|
||||
time.Sleep(time.Second * 2)
|
||||
|
||||
// -2, so we have space for incoming device list updates over federation
|
||||
maxServers := (cap(ch) - len(ch)) - 2
|
||||
if maxServers <= 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
retriesMu.Lock()
|
||||
now := time.Now()
|
||||
for srv, retryAt := range retries {
|
||||
if now.After(retryAt) {
|
||||
serversToRetry = append(serversToRetry, srv)
|
||||
if maxServers == len(serversToRetry) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, srv := range serversToRetry {
|
||||
delete(retries, srv)
|
||||
}
|
||||
retriesMu.Unlock()
|
||||
|
||||
for _, srv := range serversToRetry {
|
||||
deviceListUpdaterBackpressure.With(prometheus.Labels{"worker_id": strconv.Itoa(workerID)}).Inc()
|
||||
ch <- srv
|
||||
}
|
||||
}
|
||||
|
@ -401,8 +459,18 @@ func (u *DeviceListUpdater) worker(ch chan spec.ServerName) {
|
|||
retriesMu.Lock()
|
||||
_, exists := retries[serverName]
|
||||
retriesMu.Unlock()
|
||||
if exists {
|
||||
// Don't retry a server that we're already waiting for.
|
||||
|
||||
// If the serverName is coming from retries, maybe it was
|
||||
// blacklisted in the meantime.
|
||||
_, err := u.isBlacklistedOrBackingOffFn(serverName)
|
||||
var federationClientError *fedsenderapi.FederationClientError
|
||||
// unwrap errors and check for FederationClientError, if found, federationClientError will be not nil
|
||||
errors.As(err, &federationClientError)
|
||||
isBlacklisted := federationClientError != nil && federationClientError.Blacklisted
|
||||
|
||||
// Don't retry a server that we're already waiting for or is blacklisted by now.
|
||||
if exists || isBlacklisted {
|
||||
deviceListUpdaterBackpressure.With(prometheus.Labels{"worker_id": strconv.Itoa(workerID)}).Dec()
|
||||
continue
|
||||
}
|
||||
waitTime, shouldRetry := u.processServer(serverName)
|
||||
|
@ -413,6 +481,7 @@ func (u *DeviceListUpdater) worker(ch chan spec.ServerName) {
|
|||
}
|
||||
retriesMu.Unlock()
|
||||
}
|
||||
deviceListUpdaterBackpressure.With(prometheus.Labels{"worker_id": strconv.Itoa(workerID)}).Dec()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -27,6 +27,9 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
api2 "github.com/matrix-org/dendrite/federationapi/api"
|
||||
"github.com/matrix-org/dendrite/federationapi/statistics"
|
||||
"github.com/matrix-org/dendrite/internal/caching"
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/gomatrixserverlib/fclient"
|
||||
|
@ -128,6 +131,10 @@ type mockDeviceListUpdaterAPI struct {
|
|||
func (d *mockDeviceListUpdaterAPI) PerformUploadDeviceKeys(ctx context.Context, req *api.PerformUploadDeviceKeysRequest, res *api.PerformUploadDeviceKeysResponse) {
|
||||
}
|
||||
|
||||
var testIsBlacklistedOrBackingOff = func(s spec.ServerName) (*statistics.ServerStatistics, error) {
|
||||
return &statistics.ServerStatistics{}, nil
|
||||
}
|
||||
|
||||
type roundTripper struct {
|
||||
fn func(*http.Request) (*http.Response, error)
|
||||
}
|
||||
|
@ -161,7 +168,7 @@ func TestUpdateHavePrevID(t *testing.T) {
|
|||
}
|
||||
ap := &mockDeviceListUpdaterAPI{}
|
||||
producer := &mockKeyChangeProducer{}
|
||||
updater := NewDeviceListUpdater(process.NewProcessContext(), db, ap, producer, nil, 1, nil, "localhost")
|
||||
updater := NewDeviceListUpdater(process.NewProcessContext(), db, ap, producer, nil, 1, nil, "localhost", caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
event := gomatrixserverlib.DeviceListUpdateEvent{
|
||||
DeviceDisplayName: "Foo Bar",
|
||||
Deleted: false,
|
||||
|
@ -233,7 +240,7 @@ func TestUpdateNoPrevID(t *testing.T) {
|
|||
`)),
|
||||
}, nil
|
||||
})
|
||||
updater := NewDeviceListUpdater(process.NewProcessContext(), db, ap, producer, fedClient, 2, nil, "example.test")
|
||||
updater := NewDeviceListUpdater(process.NewProcessContext(), db, ap, producer, fedClient, 2, nil, "example.test", caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
if err := updater.Start(); err != nil {
|
||||
t.Fatalf("failed to start updater: %s", err)
|
||||
}
|
||||
|
@ -303,7 +310,7 @@ func TestDebounce(t *testing.T) {
|
|||
close(incomingFedReq)
|
||||
return <-fedCh, nil
|
||||
})
|
||||
updater := NewDeviceListUpdater(process.NewProcessContext(), db, ap, producer, fedClient, 1, nil, "localhost")
|
||||
updater := NewDeviceListUpdater(process.NewProcessContext(), db, ap, producer, fedClient, 1, nil, "localhost", caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
if err := updater.Start(); err != nil {
|
||||
t.Fatalf("failed to start updater: %s", err)
|
||||
}
|
||||
|
@ -406,7 +413,7 @@ func TestDeviceListUpdater_CleanUp(t *testing.T) {
|
|||
|
||||
updater := NewDeviceListUpdater(processCtx, db, nil,
|
||||
nil, nil,
|
||||
0, rsAPI, "test")
|
||||
0, rsAPI, "test", caching.DisableMetrics, testIsBlacklistedOrBackingOff)
|
||||
if err := updater.CleanUp(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
@ -474,3 +481,68 @@ func Test_dedupeStateList(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeviceListUpdaterIgnoreBlacklisted(t *testing.T) {
|
||||
unreachableServer := spec.ServerName("notlocalhost")
|
||||
|
||||
updater := DeviceListUpdater{
|
||||
workerChans: make([]chan spec.ServerName, 1),
|
||||
isBlacklistedOrBackingOffFn: func(s spec.ServerName) (*statistics.ServerStatistics, error) {
|
||||
switch s {
|
||||
case unreachableServer:
|
||||
return nil, &api2.FederationClientError{Blacklisted: true}
|
||||
}
|
||||
return nil, nil
|
||||
},
|
||||
mu: &sync.Mutex{},
|
||||
userIDToChanMu: &sync.Mutex{},
|
||||
userIDToChan: make(map[string]chan bool),
|
||||
userIDToMutex: make(map[string]*sync.Mutex),
|
||||
}
|
||||
workerCh := make(chan spec.ServerName)
|
||||
defer close(workerCh)
|
||||
updater.workerChans[0] = workerCh
|
||||
|
||||
// happy case
|
||||
alice := "@alice:localhost"
|
||||
aliceCh := updater.assignChannel(alice)
|
||||
defer updater.clearChannel(alice)
|
||||
|
||||
// failing case
|
||||
bob := "@bob:" + unreachableServer
|
||||
bobCh := updater.assignChannel(string(bob))
|
||||
defer updater.clearChannel(string(bob))
|
||||
|
||||
expectedServers := map[spec.ServerName]struct{}{
|
||||
"localhost": {},
|
||||
}
|
||||
unexpectedServers := make(map[spec.ServerName]struct{})
|
||||
|
||||
go func() {
|
||||
for serverName := range workerCh {
|
||||
switch serverName {
|
||||
case "localhost":
|
||||
delete(expectedServers, serverName)
|
||||
aliceCh <- true // unblock notifyWorkers
|
||||
case unreachableServer: // this should not happen as it is "filtered" away by the blacklist
|
||||
unexpectedServers[serverName] = struct{}{}
|
||||
bobCh <- true
|
||||
default:
|
||||
unexpectedServers[serverName] = struct{}{}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// alice is not blacklisted
|
||||
updater.notifyWorkers(alice)
|
||||
// bob is blacklisted
|
||||
updater.notifyWorkers(string(bob))
|
||||
|
||||
for server := range expectedServers {
|
||||
t.Errorf("Server still in expectedServers map: %s", server)
|
||||
}
|
||||
|
||||
for server := range unexpectedServers {
|
||||
t.Errorf("unexpected server in result: %s", server)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -77,7 +77,7 @@ func (s *crossSigningKeysStatements) SelectCrossSigningKeysForUser(
|
|||
for rows.Next() {
|
||||
var keyTypeInt int16
|
||||
var keyData spec.Base64Bytes
|
||||
if err := rows.Scan(&keyTypeInt, &keyData); err != nil {
|
||||
if err = rows.Scan(&keyTypeInt, &keyData); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keyType, ok := types.KeyTypeIntToPurpose[keyTypeInt]
|
||||
|
@ -86,6 +86,7 @@ func (s *crossSigningKeysStatements) SelectCrossSigningKeysForUser(
|
|||
}
|
||||
r[keyType] = keyData
|
||||
}
|
||||
err = rows.Err()
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -98,7 +98,7 @@ func (s *crossSigningSigsStatements) SelectCrossSigningSigsForTarget(
|
|||
var userID string
|
||||
var keyID gomatrixserverlib.KeyID
|
||||
var signature spec.Base64Bytes
|
||||
if err := rows.Scan(&userID, &keyID, &signature); err != nil {
|
||||
if err = rows.Scan(&userID, &keyID, &signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, ok := r[userID]; !ok {
|
||||
|
@ -106,6 +106,7 @@ func (s *crossSigningSigsStatements) SelectCrossSigningSigsForTarget(
|
|||
}
|
||||
r[userID][keyID] = signature
|
||||
}
|
||||
err = rows.Err()
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -162,5 +162,5 @@ func unpackKeys(ctx context.Context, rows *sql.Rows) (map[string]map[string]api.
|
|||
roomData[key.SessionID] = key.KeyBackupSession
|
||||
result[key.RoomID] = roomData
|
||||
}
|
||||
return result, nil
|
||||
return result, rows.Err()
|
||||
}
|
||||
|
|
|
@ -115,7 +115,7 @@ func (s *keyChangesStatements) SelectKeyChanges(
|
|||
for rows.Next() {
|
||||
var userID string
|
||||
var offset int64
|
||||
if err := rows.Scan(&userID, &offset); err != nil {
|
||||
if err = rows.Scan(&userID, &offset); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if offset > latestOffset {
|
||||
|
@ -123,5 +123,6 @@ func (s *keyChangesStatements) SelectKeyChanges(
|
|||
}
|
||||
userIDs = append(userIDs, userID)
|
||||
}
|
||||
err = rows.Err()
|
||||
return
|
||||
}
|
||||
|
|
|
@ -134,7 +134,7 @@ func (s *oneTimeKeysStatements) CountOneTimeKeys(ctx context.Context, userID, de
|
|||
}
|
||||
counts.KeyCount[algorithm] = count
|
||||
}
|
||||
return counts, nil
|
||||
return counts, rows.Err()
|
||||
}
|
||||
|
||||
func (s *oneTimeKeysStatements) InsertOneTimeKeys(ctx context.Context, txn *sql.Tx, keys api.OneTimeKeys) (*api.OneTimeKeysCount, error) {
|
||||
|
|
|
@ -165,5 +165,5 @@ func (s *profilesStatements) SelectProfilesBySearch(
|
|||
profiles = append(profiles, profile)
|
||||
}
|
||||
}
|
||||
return profiles, nil
|
||||
return profiles, rows.Err()
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
"github.com/matrix-org/dendrite/userapi/storage/tables"
|
||||
"github.com/matrix-org/gomatrixserverlib/spec"
|
||||
|
@ -94,6 +95,7 @@ func (s *threepidStatements) SelectThreePIDsForLocalpart(
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer internal.CloseAndLogIfError(ctx, rows, "SelectThreePIDsForLocalpart: failed to close rows")
|
||||
|
||||
threepids = []authtypes.ThreePID{}
|
||||
for rows.Next() {
|
||||
|
@ -107,7 +109,7 @@ func (s *threepidStatements) SelectThreePIDsForLocalpart(
|
|||
Medium: medium,
|
||||
})
|
||||
}
|
||||
|
||||
err = rows.Err()
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
"database/sql"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
"github.com/matrix-org/dendrite/userapi/storage/tables"
|
||||
"github.com/matrix-org/gomatrixserverlib/spec"
|
||||
|
@ -95,6 +96,7 @@ func (s *accountDataStatements) SelectAccountData(
|
|||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer internal.CloseAndLogIfError(ctx, rows, "SelectAccountData: failed to close rows")
|
||||
|
||||
global := map[string]json.RawMessage{}
|
||||
rooms := map[string]map[string]json.RawMessage{}
|
||||
|
@ -118,7 +120,7 @@ func (s *accountDataStatements) SelectAccountData(
|
|||
}
|
||||
}
|
||||
|
||||
return global, rooms, nil
|
||||
return global, rooms, rows.Err()
|
||||
}
|
||||
|
||||
func (s *accountDataStatements) SelectAccountDataByType(
|
||||
|
|
|
@ -76,7 +76,7 @@ func (s *crossSigningKeysStatements) SelectCrossSigningKeysForUser(
|
|||
for rows.Next() {
|
||||
var keyTypeInt int16
|
||||
var keyData spec.Base64Bytes
|
||||
if err := rows.Scan(&keyTypeInt, &keyData); err != nil {
|
||||
if err = rows.Scan(&keyTypeInt, &keyData); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keyType, ok := types.KeyTypeIntToPurpose[keyTypeInt]
|
||||
|
@ -85,6 +85,7 @@ func (s *crossSigningKeysStatements) SelectCrossSigningKeysForUser(
|
|||
}
|
||||
r[keyType] = keyData
|
||||
}
|
||||
err = rows.Err()
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -96,7 +96,7 @@ func (s *crossSigningSigsStatements) SelectCrossSigningSigsForTarget(
|
|||
var userID string
|
||||
var keyID gomatrixserverlib.KeyID
|
||||
var signature spec.Base64Bytes
|
||||
if err := rows.Scan(&userID, &keyID, &signature); err != nil {
|
||||
if err = rows.Scan(&userID, &keyID, &signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, ok := r[userID]; !ok {
|
||||
|
@ -104,6 +104,7 @@ func (s *crossSigningSigsStatements) SelectCrossSigningSigsForTarget(
|
|||
}
|
||||
r[userID][keyID] = signature
|
||||
}
|
||||
err = rows.Err()
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -296,6 +296,7 @@ func (s *devicesStatements) SelectDevicesByLocalpart(
|
|||
if err != nil {
|
||||
return devices, err
|
||||
}
|
||||
defer internal.CloseAndLogIfError(ctx, rows, "SelectDevicesByLocalpart: failed to close rows")
|
||||
|
||||
var dev api.Device
|
||||
var lastseents sql.NullInt64
|
||||
|
@ -325,7 +326,7 @@ func (s *devicesStatements) SelectDevicesByLocalpart(
|
|||
devices = append(devices, dev)
|
||||
}
|
||||
|
||||
return devices, nil
|
||||
return devices, rows.Err()
|
||||
}
|
||||
|
||||
func (s *devicesStatements) SelectDevicesByID(ctx context.Context, deviceIDs []string) ([]api.Device, error) {
|
||||
|
|
|
@ -162,5 +162,5 @@ func unpackKeys(ctx context.Context, rows *sql.Rows) (map[string]map[string]api.
|
|||
roomData[key.SessionID] = key.KeyBackupSession
|
||||
result[key.RoomID] = roomData
|
||||
}
|
||||
return result, nil
|
||||
return result, rows.Err()
|
||||
}
|
||||
|
|
|
@ -113,7 +113,7 @@ func (s *keyChangesStatements) SelectKeyChanges(
|
|||
for rows.Next() {
|
||||
var userID string
|
||||
var offset int64
|
||||
if err := rows.Scan(&userID, &offset); err != nil {
|
||||
if err = rows.Scan(&userID, &offset); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if offset > latestOffset {
|
||||
|
@ -121,5 +121,6 @@ func (s *keyChangesStatements) SelectKeyChanges(
|
|||
}
|
||||
userIDs = append(userIDs, userID)
|
||||
}
|
||||
err = rows.Err()
|
||||
return
|
||||
}
|
||||
|
|
|
@ -140,7 +140,7 @@ func (s *oneTimeKeysStatements) CountOneTimeKeys(ctx context.Context, userID, de
|
|||
}
|
||||
counts.KeyCount[algorithm] = count
|
||||
}
|
||||
return counts, nil
|
||||
return counts, rows.Err()
|
||||
}
|
||||
|
||||
func (s *oneTimeKeysStatements) InsertOneTimeKeys(
|
||||
|
|
|
@ -173,5 +173,5 @@ func (s *profilesStatements) SelectProfilesBySearch(
|
|||
profiles = append(profiles, profile)
|
||||
}
|
||||
}
|
||||
return profiles, nil
|
||||
return profiles, rows.Err()
|
||||
}
|
||||
|
|
|
@ -18,10 +18,12 @@ import (
|
|||
"time"
|
||||
|
||||
fedsenderapi "github.com/matrix-org/dendrite/federationapi/api"
|
||||
"github.com/matrix-org/dendrite/federationapi/statistics"
|
||||
"github.com/matrix-org/dendrite/internal/pushgateway"
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/gomatrixserverlib/spec"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
rsapi "github.com/matrix-org/dendrite/roomserver/api"
|
||||
|
@ -46,6 +48,8 @@ func NewInternalAPI(
|
|||
natsInstance *jetstream.NATSInstance,
|
||||
rsAPI rsapi.UserRoomserverAPI,
|
||||
fedClient fedsenderapi.KeyserverFederationAPI,
|
||||
enableMetrics bool,
|
||||
blacklistedOrBackingOffFn func(s spec.ServerName) (*statistics.ServerStatistics, error),
|
||||
) *internal.UserInternalAPI {
|
||||
js, _ := natsInstance.Prepare(processContext, &dendriteCfg.Global.JetStream)
|
||||
appServices := dendriteCfg.Derived.ApplicationServices
|
||||
|
@ -99,7 +103,7 @@ func NewInternalAPI(
|
|||
FedClient: fedClient,
|
||||
}
|
||||
|
||||
updater := internal.NewDeviceListUpdater(processContext, keyDB, userAPI, keyChangeProducer, fedClient, 8, rsAPI, dendriteCfg.Global.ServerName) // 8 workers TODO: configurable
|
||||
updater := internal.NewDeviceListUpdater(processContext, keyDB, userAPI, keyChangeProducer, fedClient, dendriteCfg.UserAPI.WorkerCount, rsAPI, dendriteCfg.Global.ServerName, enableMetrics, blacklistedOrBackingOffFn)
|
||||
userAPI.Updater = updater
|
||||
// Remove users which we don't share a room with anymore
|
||||
if err := updater.CleanUp(); err != nil {
|
||||
|
|
Loading…
Reference in a new issue