mirror of
https://github.com/matrix-org/dendrite.git
synced 2025-12-28 01:03:10 -06:00
Device list updates
This commit is contained in:
parent
fb90ca074e
commit
ff22d71de6
|
|
@ -49,8 +49,8 @@ func DeviceOTKCounts(ctx context.Context, keyAPI keyapi.KeyInternalAPI, userID,
|
||||||
// nolint:gocyclo
|
// nolint:gocyclo
|
||||||
func DeviceListCatchup(
|
func DeviceListCatchup(
|
||||||
ctx context.Context, keyAPI keyapi.KeyInternalAPI, rsAPI roomserverAPI.RoomserverInternalAPI,
|
ctx context.Context, keyAPI keyapi.KeyInternalAPI, rsAPI roomserverAPI.RoomserverInternalAPI,
|
||||||
userID string, res *types.Response, from, to types.StreamingToken,
|
userID string, res *types.Response, from, to types.LogPosition,
|
||||||
) (hasNew bool, err error) {
|
) (newPos types.LogPosition, hasNew bool, err error) {
|
||||||
|
|
||||||
// Track users who we didn't track before but now do by virtue of sharing a room with them, or not.
|
// Track users who we didn't track before but now do by virtue of sharing a room with them, or not.
|
||||||
newlyJoinedRooms := joinedRooms(res, userID)
|
newlyJoinedRooms := joinedRooms(res, userID)
|
||||||
|
|
@ -58,7 +58,7 @@ func DeviceListCatchup(
|
||||||
if len(newlyJoinedRooms) > 0 || len(newlyLeftRooms) > 0 {
|
if len(newlyJoinedRooms) > 0 || len(newlyLeftRooms) > 0 {
|
||||||
changed, left, err := TrackChangedUsers(ctx, rsAPI, userID, newlyJoinedRooms, newlyLeftRooms)
|
changed, left, err := TrackChangedUsers(ctx, rsAPI, userID, newlyJoinedRooms, newlyLeftRooms)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return to, false, err
|
||||||
}
|
}
|
||||||
res.DeviceLists.Changed = changed
|
res.DeviceLists.Changed = changed
|
||||||
res.DeviceLists.Left = left
|
res.DeviceLists.Left = left
|
||||||
|
|
@ -73,13 +73,13 @@ func DeviceListCatchup(
|
||||||
offset = sarama.OffsetOldest
|
offset = sarama.OffsetOldest
|
||||||
// Extract partition/offset from sync token
|
// Extract partition/offset from sync token
|
||||||
// TODO: In a world where keyserver is sharded there will be multiple partitions and hence multiple QueryKeyChanges to make.
|
// TODO: In a world where keyserver is sharded there will be multiple partitions and hence multiple QueryKeyChanges to make.
|
||||||
if !from.DeviceListPosition.IsEmpty() {
|
if !from.IsEmpty() {
|
||||||
partition = from.DeviceListPosition.Partition
|
partition = from.Partition
|
||||||
offset = from.DeviceListPosition.Offset
|
offset = from.Offset
|
||||||
}
|
}
|
||||||
var toOffset int64
|
var toOffset int64
|
||||||
toOffset = sarama.OffsetNewest
|
toOffset = sarama.OffsetNewest
|
||||||
if toLog := to.DeviceListPosition; toLog.Partition == partition && toLog.Offset > 0 {
|
if toLog := to; toLog.Partition == partition && toLog.Offset > 0 {
|
||||||
toOffset = toLog.Offset
|
toOffset = toLog.Offset
|
||||||
}
|
}
|
||||||
var queryRes api.QueryKeyChangesResponse
|
var queryRes api.QueryKeyChangesResponse
|
||||||
|
|
@ -91,7 +91,7 @@ func DeviceListCatchup(
|
||||||
if queryRes.Error != nil {
|
if queryRes.Error != nil {
|
||||||
// don't fail the catchup because we may have got useful information by tracking membership
|
// don't fail the catchup because we may have got useful information by tracking membership
|
||||||
util.GetLogger(ctx).WithError(queryRes.Error).Error("QueryKeyChanges failed")
|
util.GetLogger(ctx).WithError(queryRes.Error).Error("QueryKeyChanges failed")
|
||||||
return hasNew, nil
|
return to, hasNew, nil
|
||||||
}
|
}
|
||||||
// QueryKeyChanges gets ALL users who have changed keys, we want the ones who share rooms with the user.
|
// QueryKeyChanges gets ALL users who have changed keys, we want the ones who share rooms with the user.
|
||||||
var sharedUsersMap map[string]int
|
var sharedUsersMap map[string]int
|
||||||
|
|
@ -128,13 +128,12 @@ func DeviceListCatchup(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// set the new token
|
// set the new token
|
||||||
to.DeviceListPosition = types.LogPosition{
|
to = types.LogPosition{
|
||||||
Partition: queryRes.Partition,
|
Partition: queryRes.Partition,
|
||||||
Offset: queryRes.Offset,
|
Offset: queryRes.Offset,
|
||||||
}
|
}
|
||||||
res.NextBatch.ApplyUpdates(to)
|
|
||||||
|
|
||||||
return hasNew, nil
|
return to, hasNew, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TrackChangedUsers calculates the values of device_lists.changed|left in the /sync response.
|
// TrackChangedUsers calculates the values of device_lists.changed|left in the /sync response.
|
||||||
|
|
|
||||||
|
|
@ -16,12 +16,10 @@ import (
|
||||||
|
|
||||||
var (
|
var (
|
||||||
syncingUser = "@alice:localhost"
|
syncingUser = "@alice:localhost"
|
||||||
emptyToken = types.StreamingToken{}
|
emptyToken = types.LogPosition{}
|
||||||
newestToken = types.StreamingToken{
|
newestToken = types.LogPosition{
|
||||||
DeviceListPosition: types.LogPosition{
|
|
||||||
Offset: sarama.OffsetNewest,
|
Offset: sarama.OffsetNewest,
|
||||||
Partition: 0,
|
Partition: 0,
|
||||||
},
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -180,7 +178,7 @@ func TestKeyChangeCatchupOnJoinShareNewUser(t *testing.T) {
|
||||||
"!another:room": {syncingUser},
|
"!another:room": {syncingUser},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, newestToken)
|
_, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, newestToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("DeviceListCatchup returned an error: %s", err)
|
t.Fatalf("DeviceListCatchup returned an error: %s", err)
|
||||||
}
|
}
|
||||||
|
|
@ -203,7 +201,7 @@ func TestKeyChangeCatchupOnLeaveShareLeftUser(t *testing.T) {
|
||||||
"!another:room": {syncingUser},
|
"!another:room": {syncingUser},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, newestToken)
|
_, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, newestToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("DeviceListCatchup returned an error: %s", err)
|
t.Fatalf("DeviceListCatchup returned an error: %s", err)
|
||||||
}
|
}
|
||||||
|
|
@ -226,7 +224,7 @@ func TestKeyChangeCatchupOnJoinShareNoNewUsers(t *testing.T) {
|
||||||
"!another:room": {syncingUser, existingUser},
|
"!another:room": {syncingUser, existingUser},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, newestToken)
|
_, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, newestToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Catchup returned an error: %s", err)
|
t.Fatalf("Catchup returned an error: %s", err)
|
||||||
}
|
}
|
||||||
|
|
@ -248,7 +246,7 @@ func TestKeyChangeCatchupOnLeaveShareNoUsers(t *testing.T) {
|
||||||
"!another:room": {syncingUser, existingUser},
|
"!another:room": {syncingUser, existingUser},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, newestToken)
|
_, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, newestToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("DeviceListCatchup returned an error: %s", err)
|
t.Fatalf("DeviceListCatchup returned an error: %s", err)
|
||||||
}
|
}
|
||||||
|
|
@ -307,7 +305,7 @@ func TestKeyChangeCatchupNoNewJoinsButMessages(t *testing.T) {
|
||||||
roomID: {syncingUser, existingUser},
|
roomID: {syncingUser, existingUser},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, newestToken)
|
_, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, newestToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("DeviceListCatchup returned an error: %s", err)
|
t.Fatalf("DeviceListCatchup returned an error: %s", err)
|
||||||
}
|
}
|
||||||
|
|
@ -335,7 +333,7 @@ func TestKeyChangeCatchupChangeAndLeft(t *testing.T) {
|
||||||
"!another:room": {syncingUser},
|
"!another:room": {syncingUser},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, newestToken)
|
_, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, newestToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Catchup returned an error: %s", err)
|
t.Fatalf("Catchup returned an error: %s", err)
|
||||||
}
|
}
|
||||||
|
|
@ -420,7 +418,7 @@ func TestKeyChangeCatchupChangeAndLeftSameRoom(t *testing.T) {
|
||||||
"!another:room": {syncingUser},
|
"!another:room": {syncingUser},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
hasNew, err := DeviceListCatchup(
|
_, hasNew, err := DeviceListCatchup(
|
||||||
context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, newestToken,
|
context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, newestToken,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
||||||
|
|
@ -3,18 +3,23 @@ package streams
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
|
keyapi "github.com/matrix-org/dendrite/keyserver/api"
|
||||||
|
"github.com/matrix-org/dendrite/roomserver/api"
|
||||||
|
"github.com/matrix-org/dendrite/syncapi/internal"
|
||||||
"github.com/matrix-org/dendrite/syncapi/types"
|
"github.com/matrix-org/dendrite/syncapi/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
type DeviceListStreamProvider struct {
|
type DeviceListStreamProvider struct {
|
||||||
PartitionedStreamProvider
|
PartitionedStreamProvider
|
||||||
|
rsAPI api.RoomserverInternalAPI
|
||||||
|
keyAPI keyapi.KeyInternalAPI
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *DeviceListStreamProvider) CompleteSync(
|
func (p *DeviceListStreamProvider) CompleteSync(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
req *types.SyncRequest,
|
req *types.SyncRequest,
|
||||||
) types.LogPosition {
|
) types.LogPosition {
|
||||||
return p.LatestPosition(ctx)
|
return p.IncrementalSync(ctx, req, types.LogPosition{}, p.LatestPosition(ctx))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *DeviceListStreamProvider) IncrementalSync(
|
func (p *DeviceListStreamProvider) IncrementalSync(
|
||||||
|
|
@ -22,6 +27,15 @@ func (p *DeviceListStreamProvider) IncrementalSync(
|
||||||
req *types.SyncRequest,
|
req *types.SyncRequest,
|
||||||
from, to types.LogPosition,
|
from, to types.LogPosition,
|
||||||
) types.LogPosition {
|
) types.LogPosition {
|
||||||
|
var err error
|
||||||
|
to, _, err = internal.DeviceListCatchup(context.Background(), p.keyAPI, p.rsAPI, req.Device.UserID, req.Response, from, to)
|
||||||
|
if err != nil {
|
||||||
|
return to // nil, fmt.Errorf("internal.DeviceListCatchup: %w", err)
|
||||||
|
}
|
||||||
|
err = internal.DeviceOTKCounts(req.Context, p.keyAPI, req.Device.UserID, req.Device.ID, req.Response)
|
||||||
|
if err != nil {
|
||||||
|
return to // res, fmt.Errorf("internal.DeviceOTKCounts: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
return to
|
return to
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -18,9 +18,6 @@ func (p *TypingStreamProvider) CompleteSync(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
req *types.SyncRequest,
|
req *types.SyncRequest,
|
||||||
) types.StreamPosition {
|
) types.StreamPosition {
|
||||||
// It isn't beneficial to send previous typing notifications
|
|
||||||
// after a complete sync, so just return the latest position
|
|
||||||
// and otherwise do nothing.
|
|
||||||
return p.IncrementalSync(ctx, req, 0, p.LatestPosition(ctx))
|
return p.IncrementalSync(ctx, req, 0, p.LatestPosition(ctx))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,8 @@ package streams
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/matrix-org/dendrite/eduserver/cache"
|
"github.com/matrix-org/dendrite/eduserver/cache"
|
||||||
|
keyapi "github.com/matrix-org/dendrite/keyserver/api"
|
||||||
|
rsapi "github.com/matrix-org/dendrite/roomserver/api"
|
||||||
"github.com/matrix-org/dendrite/syncapi/storage"
|
"github.com/matrix-org/dendrite/syncapi/storage"
|
||||||
"github.com/matrix-org/dendrite/syncapi/types"
|
"github.com/matrix-org/dendrite/syncapi/types"
|
||||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||||
|
|
@ -19,6 +21,7 @@ type Streams struct {
|
||||||
|
|
||||||
func NewSyncStreamProviders(
|
func NewSyncStreamProviders(
|
||||||
d storage.Database, userAPI userapi.UserInternalAPI,
|
d storage.Database, userAPI userapi.UserInternalAPI,
|
||||||
|
rsAPI rsapi.RoomserverInternalAPI, keyAPI keyapi.KeyInternalAPI,
|
||||||
eduCache *cache.EDUCache,
|
eduCache *cache.EDUCache,
|
||||||
) *Streams {
|
) *Streams {
|
||||||
streams := &Streams{
|
streams := &Streams{
|
||||||
|
|
@ -44,6 +47,8 @@ func NewSyncStreamProviders(
|
||||||
},
|
},
|
||||||
DeviceListStreamProvider: &DeviceListStreamProvider{
|
DeviceListStreamProvider: &DeviceListStreamProvider{
|
||||||
PartitionedStreamProvider: PartitionedStreamProvider{DB: d},
|
PartitionedStreamProvider: PartitionedStreamProvider{DB: d},
|
||||||
|
rsAPI: rsAPI,
|
||||||
|
keyAPI: keyAPI,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,6 @@ package sync
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
@ -185,10 +184,10 @@ func (rp *RequestPool) OnIncomingSyncRequest(req *http.Request, device *userapi.
|
||||||
case <-rp.streams.DeviceListStreamProvider.NotifyAfter(waitctx, device, syncReq.Since.DeviceListPosition):
|
case <-rp.streams.DeviceListStreamProvider.NotifyAfter(waitctx, device, syncReq.Since.DeviceListPosition):
|
||||||
}
|
}
|
||||||
|
|
||||||
syncReq.Log.Println("Responding to sync after wakeup")
|
syncReq.Log.Debugln("Responding to sync after wakeup")
|
||||||
waitcancel()
|
waitcancel()
|
||||||
} else {
|
} else {
|
||||||
syncReq.Log.Println("Responding to sync immediately")
|
syncReq.Log.Debugln("Responding to sync immediately")
|
||||||
}
|
}
|
||||||
|
|
||||||
if syncReq.Since.IsEmpty() {
|
if syncReq.Since.IsEmpty() {
|
||||||
|
|
@ -279,20 +278,18 @@ func (rp *RequestPool) OnIncomingKeyChangeRequest(req *http.Request, device *use
|
||||||
JSON: jsonerror.InvalidArgumentValue("bad 'to' value"),
|
JSON: jsonerror.InvalidArgumentValue("bad 'to' value"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// work out room joins/leaves
|
syncReq, err := newSyncRequest(req, *device, rp.db)
|
||||||
/*
|
|
||||||
res, err := rp.db.IncrementalSync(
|
|
||||||
req.Context(), types.NewResponse(), *device, fromToken, toToken, 10, false,
|
|
||||||
)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
util.GetLogger(req.Context()).WithError(err).Error("Failed to IncrementalSync")
|
util.GetLogger(req.Context()).WithError(err).Error("newSyncRequest failed")
|
||||||
return jsonerror.InternalServerError()
|
return jsonerror.InternalServerError()
|
||||||
}
|
}
|
||||||
*/
|
rp.streams.PDUStreamProvider.IncrementalSync(req.Context(), syncReq, fromToken.PDUPosition, toToken.PDUPosition)
|
||||||
res := types.NewResponse()
|
_, _, err = internal.DeviceListCatchup(
|
||||||
res, err = rp.appendDeviceLists(res, device.UserID, fromToken, toToken)
|
req.Context(), rp.keyAPI, rp.rsAPI, syncReq.Device.UserID,
|
||||||
|
syncReq.Response, fromToken.DeviceListPosition, toToken.DeviceListPosition,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
util.GetLogger(req.Context()).WithError(err).Error("Failed to appendDeviceLists info")
|
util.GetLogger(req.Context()).WithError(err).Error("Failed to DeviceListCatchup info")
|
||||||
return jsonerror.InternalServerError()
|
return jsonerror.InternalServerError()
|
||||||
}
|
}
|
||||||
return util.JSONResponse{
|
return util.JSONResponse{
|
||||||
|
|
@ -301,171 +298,12 @@ func (rp *RequestPool) OnIncomingKeyChangeRequest(req *http.Request, device *use
|
||||||
Changed []string `json:"changed"`
|
Changed []string `json:"changed"`
|
||||||
Left []string `json:"left"`
|
Left []string `json:"left"`
|
||||||
}{
|
}{
|
||||||
Changed: res.DeviceLists.Changed,
|
Changed: syncReq.Response.DeviceLists.Changed,
|
||||||
Left: res.DeviceLists.Left,
|
Left: syncReq.Response.DeviceLists.Left,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// nolint:gocyclo
|
|
||||||
/*
|
|
||||||
func (rp *RequestPool) currentSyncForUser(req syncRequest, latestPos types.StreamingToken) (*types.Response, error) {
|
|
||||||
res := types.NewResponse()
|
|
||||||
|
|
||||||
// TODO: handle ignored users
|
|
||||||
if req.since.IsEmpty() {
|
|
||||||
res, err = rp.db.CompleteSync(req.ctx, res, req.device, req.limit)
|
|
||||||
if err != nil {
|
|
||||||
return res, fmt.Errorf("rp.db.CompleteSync: %w", err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
res, err = rp.db.IncrementalSync(req.ctx, res, req.device, req.since, latestPos, req.limit, req.wantFullState)
|
|
||||||
if err != nil {
|
|
||||||
return res, fmt.Errorf("rp.db.IncrementalSync: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
accountDataFilter := gomatrixserverlib.DefaultEventFilter() // TODO: use filter provided in req instead
|
|
||||||
res, err = rp.appendAccountData(res, req.device.UserID, req, latestPos.PDUPosition, &accountDataFilter)
|
|
||||||
if err != nil {
|
|
||||||
return res, fmt.Errorf("rp.appendAccountData: %w", err)
|
|
||||||
}
|
|
||||||
res, err = rp.appendDeviceLists(res, req.device.UserID, req.since, latestPos)
|
|
||||||
if err != nil {
|
|
||||||
return res, fmt.Errorf("rp.appendDeviceLists: %w", err)
|
|
||||||
}
|
|
||||||
err = internal.DeviceOTKCounts(req.ctx, rp.keyAPI, req.device.UserID, req.device.ID, res)
|
|
||||||
if err != nil {
|
|
||||||
return res, fmt.Errorf("internal.DeviceOTKCounts: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return res, err
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
func (rp *RequestPool) appendDeviceLists(
|
|
||||||
data *types.Response, userID string, since, to types.StreamingToken,
|
|
||||||
) (*types.Response, error) {
|
|
||||||
_, err := internal.DeviceListCatchup(context.Background(), rp.keyAPI, rp.rsAPI, userID, data, since, to)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("internal.DeviceListCatchup: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return data, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
func (rp *RequestPool) appendAccountData(
|
|
||||||
data *types.Response, userID string, req syncRequest, currentPos types.StreamPosition,
|
|
||||||
accountDataFilter *gomatrixserverlib.EventFilter,
|
|
||||||
) (*types.Response, error) {
|
|
||||||
// TODO: Account data doesn't have a sync position of its own, meaning that
|
|
||||||
// account data might be sent multiple time to the client if multiple account
|
|
||||||
// data keys were set between two message. This isn't a huge issue since the
|
|
||||||
// duplicate data doesn't represent a huge quantity of data, but an optimisation
|
|
||||||
// here would be making sure each data is sent only once to the client.
|
|
||||||
if req.since.IsEmpty() {
|
|
||||||
// If this is the initial sync, we don't need to check if a data has
|
|
||||||
// already been sent. Instead, we send the whole batch.
|
|
||||||
dataReq := &userapi.QueryAccountDataRequest{
|
|
||||||
UserID: userID,
|
|
||||||
}
|
|
||||||
dataRes := &userapi.QueryAccountDataResponse{}
|
|
||||||
if err := rp.userAPI.QueryAccountData(req.ctx, dataReq, dataRes); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for datatype, databody := range dataRes.GlobalAccountData {
|
|
||||||
data.AccountData.Events = append(
|
|
||||||
data.AccountData.Events,
|
|
||||||
gomatrixserverlib.ClientEvent{
|
|
||||||
Type: datatype,
|
|
||||||
Content: gomatrixserverlib.RawJSON(databody),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
for r, j := range data.Rooms.Join {
|
|
||||||
for datatype, databody := range dataRes.RoomAccountData[r] {
|
|
||||||
j.AccountData.Events = append(
|
|
||||||
j.AccountData.Events,
|
|
||||||
gomatrixserverlib.ClientEvent{
|
|
||||||
Type: datatype,
|
|
||||||
Content: gomatrixserverlib.RawJSON(databody),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
data.Rooms.Join[r] = j
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return data, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
r := types.Range{
|
|
||||||
From: req.since.PDUPosition,
|
|
||||||
To: currentPos,
|
|
||||||
}
|
|
||||||
// If both positions are the same, it means that the data was saved after the
|
|
||||||
// latest room event. In that case, we need to decrement the old position as
|
|
||||||
// results are exclusive of Low.
|
|
||||||
if r.Low() == r.High() {
|
|
||||||
r.From--
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sync is not initial, get all account data since the latest sync
|
|
||||||
dataTypes, err := rp.db.GetAccountDataInRange(
|
|
||||||
req.ctx, userID, r, accountDataFilter,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("rp.db.GetAccountDataInRange: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(dataTypes) == 0 {
|
|
||||||
// TODO: this fixes the sytest but is it the right thing to do?
|
|
||||||
dataTypes[""] = []string{"m.push_rules"}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Iterate over the rooms
|
|
||||||
for roomID, dataTypes := range dataTypes {
|
|
||||||
// Request the missing data from the database
|
|
||||||
for _, dataType := range dataTypes {
|
|
||||||
dataReq := userapi.QueryAccountDataRequest{
|
|
||||||
UserID: userID,
|
|
||||||
RoomID: roomID,
|
|
||||||
DataType: dataType,
|
|
||||||
}
|
|
||||||
dataRes := userapi.QueryAccountDataResponse{}
|
|
||||||
err = rp.userAPI.QueryAccountData(req.ctx, &dataReq, &dataRes)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if roomID == "" {
|
|
||||||
if globalData, ok := dataRes.GlobalAccountData[dataType]; ok {
|
|
||||||
data.AccountData.Events = append(
|
|
||||||
data.AccountData.Events,
|
|
||||||
gomatrixserverlib.ClientEvent{
|
|
||||||
Type: dataType,
|
|
||||||
Content: gomatrixserverlib.RawJSON(globalData),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if roomData, ok := dataRes.RoomAccountData[roomID][dataType]; ok {
|
|
||||||
joinData := data.Rooms.Join[roomID]
|
|
||||||
joinData.AccountData.Events = append(
|
|
||||||
joinData.AccountData.Events,
|
|
||||||
gomatrixserverlib.ClientEvent{
|
|
||||||
Type: dataType,
|
|
||||||
Content: gomatrixserverlib.RawJSON(roomData),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
data.Rooms.Join[roomID] = joinData
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return data, nil
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
// shouldReturnImmediately returns whether the /sync request is an initial sync,
|
// shouldReturnImmediately returns whether the /sync request is an initial sync,
|
||||||
// or timeout=0, or full_state=true, in any of the cases the request should
|
// or timeout=0, or full_state=true, in any of the cases the request should
|
||||||
// return immediately.
|
// return immediately.
|
||||||
|
|
|
||||||
|
|
@ -51,7 +51,7 @@ func AddPublicRoutes(
|
||||||
}
|
}
|
||||||
|
|
||||||
eduCache := cache.New()
|
eduCache := cache.New()
|
||||||
streams := streams.NewSyncStreamProviders(syncDB, userAPI, eduCache)
|
streams := streams.NewSyncStreamProviders(syncDB, userAPI, rsAPI, keyAPI, eduCache)
|
||||||
|
|
||||||
requestPool := sync.NewRequestPool(syncDB, cfg, userAPI, keyAPI, rsAPI, streams)
|
requestPool := sync.NewRequestPool(syncDB, cfg, userAPI, keyAPI, rsAPI, streams)
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue