mirror of
https://github.com/matrix-org/dendrite.git
synced 2024-11-26 00:01:55 -06:00
Implement /keys/changes (#1232)
* Implement /keys/changes And refactor QueryKeyChanges to accept a `to` offset. * Unbreak tests * Sort keys when serialising log tokens
This commit is contained in:
parent
9355fb5ac8
commit
a2174d3294
|
@ -138,6 +138,9 @@ type QueryKeyChangesRequest struct {
|
||||||
Partition int32
|
Partition int32
|
||||||
// The offset of the last received key event, or sarama.OffsetOldest if this is from the beginning
|
// The offset of the last received key event, or sarama.OffsetOldest if this is from the beginning
|
||||||
Offset int64
|
Offset int64
|
||||||
|
// The inclusive offset where to track key changes up to. Messages with this offset are included in the response.
|
||||||
|
// Use sarama.OffsetNewest if the offset is unknown (then check the response Offset to avoid racing).
|
||||||
|
ToOffset int64
|
||||||
}
|
}
|
||||||
|
|
||||||
type QueryKeyChangesResponse struct {
|
type QueryKeyChangesResponse struct {
|
||||||
|
|
|
@ -44,7 +44,7 @@ func (a *KeyInternalAPI) QueryKeyChanges(ctx context.Context, req *api.QueryKeyC
|
||||||
if req.Partition < 0 {
|
if req.Partition < 0 {
|
||||||
req.Partition = a.Producer.DefaultPartition()
|
req.Partition = a.Producer.DefaultPartition()
|
||||||
}
|
}
|
||||||
userIDs, latest, err := a.DB.KeyChanges(ctx, req.Partition, req.Offset)
|
userIDs, latest, err := a.DB.KeyChanges(ctx, req.Partition, req.Offset, req.ToOffset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
res.Error = &api.KeyError{
|
res.Error = &api.KeyError{
|
||||||
Err: err.Error(),
|
Err: err.Error(),
|
||||||
|
|
|
@ -48,7 +48,8 @@ type Database interface {
|
||||||
// their keys in some way.
|
// their keys in some way.
|
||||||
StoreKeyChange(ctx context.Context, partition int32, offset int64, userID string) error
|
StoreKeyChange(ctx context.Context, partition int32, offset int64, userID string) error
|
||||||
|
|
||||||
// KeyChanges returns a list of user IDs who have modified their keys from the offset given.
|
// KeyChanges returns a list of user IDs who have modified their keys from the offset given (exclusive) to the offset given (inclusive).
|
||||||
|
// A to offset of sarama.OffsetNewest means no upper limit.
|
||||||
// Returns the offset of the latest key change.
|
// Returns the offset of the latest key change.
|
||||||
KeyChanges(ctx context.Context, partition int32, fromOffset int64) (userIDs []string, latestOffset int64, err error)
|
KeyChanges(ctx context.Context, partition int32, fromOffset, toOffset int64) (userIDs []string, latestOffset int64, err error)
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,9 @@ package postgres
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"database/sql"
|
"database/sql"
|
||||||
|
"math"
|
||||||
|
|
||||||
|
"github.com/Shopify/sarama"
|
||||||
"github.com/matrix-org/dendrite/internal"
|
"github.com/matrix-org/dendrite/internal"
|
||||||
"github.com/matrix-org/dendrite/keyserver/storage/tables"
|
"github.com/matrix-org/dendrite/keyserver/storage/tables"
|
||||||
)
|
)
|
||||||
|
@ -44,7 +46,7 @@ const upsertKeyChangeSQL = "" +
|
||||||
// select the highest offset for each user in the range. The grouping by user gives distinct entries and then we just
|
// select the highest offset for each user in the range. The grouping by user gives distinct entries and then we just
|
||||||
// take the max offset value as the latest offset.
|
// take the max offset value as the latest offset.
|
||||||
const selectKeyChangesSQL = "" +
|
const selectKeyChangesSQL = "" +
|
||||||
"SELECT user_id, MAX(log_offset) FROM keyserver_key_changes WHERE partition = $1 AND log_offset > $2 GROUP BY user_id"
|
"SELECT user_id, MAX(log_offset) FROM keyserver_key_changes WHERE partition = $1 AND log_offset > $2 AND log_offset <= $3 GROUP BY user_id"
|
||||||
|
|
||||||
type keyChangesStatements struct {
|
type keyChangesStatements struct {
|
||||||
db *sql.DB
|
db *sql.DB
|
||||||
|
@ -75,9 +77,12 @@ func (s *keyChangesStatements) InsertKeyChange(ctx context.Context, partition in
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *keyChangesStatements) SelectKeyChanges(
|
func (s *keyChangesStatements) SelectKeyChanges(
|
||||||
ctx context.Context, partition int32, fromOffset int64,
|
ctx context.Context, partition int32, fromOffset, toOffset int64,
|
||||||
) (userIDs []string, latestOffset int64, err error) {
|
) (userIDs []string, latestOffset int64, err error) {
|
||||||
rows, err := s.selectKeyChangesStmt.QueryContext(ctx, partition, fromOffset)
|
if toOffset == sarama.OffsetNewest {
|
||||||
|
toOffset = math.MaxInt64
|
||||||
|
}
|
||||||
|
rows, err := s.selectKeyChangesStmt.QueryContext(ctx, partition, fromOffset, toOffset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -78,6 +78,6 @@ func (d *Database) StoreKeyChange(ctx context.Context, partition int32, offset i
|
||||||
return d.KeyChangesTable.InsertKeyChange(ctx, partition, offset, userID)
|
return d.KeyChangesTable.InsertKeyChange(ctx, partition, offset, userID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Database) KeyChanges(ctx context.Context, partition int32, fromOffset int64) (userIDs []string, latestOffset int64, err error) {
|
func (d *Database) KeyChanges(ctx context.Context, partition int32, fromOffset, toOffset int64) (userIDs []string, latestOffset int64, err error) {
|
||||||
return d.KeyChangesTable.SelectKeyChanges(ctx, partition, fromOffset)
|
return d.KeyChangesTable.SelectKeyChanges(ctx, partition, fromOffset, toOffset)
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,9 @@ package sqlite3
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"database/sql"
|
"database/sql"
|
||||||
|
"math"
|
||||||
|
|
||||||
|
"github.com/Shopify/sarama"
|
||||||
"github.com/matrix-org/dendrite/internal"
|
"github.com/matrix-org/dendrite/internal"
|
||||||
"github.com/matrix-org/dendrite/keyserver/storage/tables"
|
"github.com/matrix-org/dendrite/keyserver/storage/tables"
|
||||||
)
|
)
|
||||||
|
@ -45,7 +47,7 @@ const upsertKeyChangeSQL = "" +
|
||||||
// select the highest offset for each user in the range. The grouping by user gives distinct entries and then we just
|
// select the highest offset for each user in the range. The grouping by user gives distinct entries and then we just
|
||||||
// take the max offset value as the latest offset.
|
// take the max offset value as the latest offset.
|
||||||
const selectKeyChangesSQL = "" +
|
const selectKeyChangesSQL = "" +
|
||||||
"SELECT user_id, MAX(offset) FROM keyserver_key_changes WHERE partition = $1 AND offset > $2 GROUP BY user_id"
|
"SELECT user_id, MAX(offset) FROM keyserver_key_changes WHERE partition = $1 AND offset > $2 AND offset <= $3 GROUP BY user_id"
|
||||||
|
|
||||||
type keyChangesStatements struct {
|
type keyChangesStatements struct {
|
||||||
db *sql.DB
|
db *sql.DB
|
||||||
|
@ -76,9 +78,12 @@ func (s *keyChangesStatements) InsertKeyChange(ctx context.Context, partition in
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *keyChangesStatements) SelectKeyChanges(
|
func (s *keyChangesStatements) SelectKeyChanges(
|
||||||
ctx context.Context, partition int32, fromOffset int64,
|
ctx context.Context, partition int32, fromOffset, toOffset int64,
|
||||||
) (userIDs []string, latestOffset int64, err error) {
|
) (userIDs []string, latestOffset int64, err error) {
|
||||||
rows, err := s.selectKeyChangesStmt.QueryContext(ctx, partition, fromOffset)
|
if toOffset == sarama.OffsetNewest {
|
||||||
|
toOffset = math.MaxInt64
|
||||||
|
}
|
||||||
|
rows, err := s.selectKeyChangesStmt.QueryContext(ctx, partition, fromOffset, toOffset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,8 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/Shopify/sarama"
|
||||||
)
|
)
|
||||||
|
|
||||||
var ctx = context.Background()
|
var ctx = context.Background()
|
||||||
|
@ -24,7 +26,7 @@ func TestKeyChanges(t *testing.T) {
|
||||||
MustNotError(t, db.StoreKeyChange(ctx, 0, 0, "@alice:localhost"))
|
MustNotError(t, db.StoreKeyChange(ctx, 0, 0, "@alice:localhost"))
|
||||||
MustNotError(t, db.StoreKeyChange(ctx, 0, 1, "@bob:localhost"))
|
MustNotError(t, db.StoreKeyChange(ctx, 0, 1, "@bob:localhost"))
|
||||||
MustNotError(t, db.StoreKeyChange(ctx, 0, 2, "@charlie:localhost"))
|
MustNotError(t, db.StoreKeyChange(ctx, 0, 2, "@charlie:localhost"))
|
||||||
userIDs, latest, err := db.KeyChanges(ctx, 0, 1)
|
userIDs, latest, err := db.KeyChanges(ctx, 0, 1, sarama.OffsetNewest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to KeyChanges: %s", err)
|
t.Fatalf("Failed to KeyChanges: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -44,7 +46,7 @@ func TestKeyChangesNoDupes(t *testing.T) {
|
||||||
MustNotError(t, db.StoreKeyChange(ctx, 0, 0, "@alice:localhost"))
|
MustNotError(t, db.StoreKeyChange(ctx, 0, 0, "@alice:localhost"))
|
||||||
MustNotError(t, db.StoreKeyChange(ctx, 0, 1, "@alice:localhost"))
|
MustNotError(t, db.StoreKeyChange(ctx, 0, 1, "@alice:localhost"))
|
||||||
MustNotError(t, db.StoreKeyChange(ctx, 0, 2, "@alice:localhost"))
|
MustNotError(t, db.StoreKeyChange(ctx, 0, 2, "@alice:localhost"))
|
||||||
userIDs, latest, err := db.KeyChanges(ctx, 0, 0)
|
userIDs, latest, err := db.KeyChanges(ctx, 0, 0, sarama.OffsetNewest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to KeyChanges: %s", err)
|
t.Fatalf("Failed to KeyChanges: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -55,3 +57,23 @@ func TestKeyChangesNoDupes(t *testing.T) {
|
||||||
t.Fatalf("KeyChanges: wrong user_ids: %v", userIDs)
|
t.Fatalf("KeyChanges: wrong user_ids: %v", userIDs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestKeyChangesUpperLimit(t *testing.T) {
|
||||||
|
db, err := NewDatabase("file::memory:", nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to NewDatabase: %s", err)
|
||||||
|
}
|
||||||
|
MustNotError(t, db.StoreKeyChange(ctx, 0, 0, "@alice:localhost"))
|
||||||
|
MustNotError(t, db.StoreKeyChange(ctx, 0, 1, "@bob:localhost"))
|
||||||
|
MustNotError(t, db.StoreKeyChange(ctx, 0, 2, "@charlie:localhost"))
|
||||||
|
userIDs, latest, err := db.KeyChanges(ctx, 0, 0, 1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to KeyChanges: %s", err)
|
||||||
|
}
|
||||||
|
if latest != 1 {
|
||||||
|
t.Fatalf("KeyChanges: got latest=%d want 1", latest)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(userIDs, []string{"@bob:localhost"}) {
|
||||||
|
t.Fatalf("KeyChanges: wrong user_ids: %v", userIDs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -38,5 +38,7 @@ type DeviceKeys interface {
|
||||||
|
|
||||||
type KeyChanges interface {
|
type KeyChanges interface {
|
||||||
InsertKeyChange(ctx context.Context, partition int32, offset int64, userID string) error
|
InsertKeyChange(ctx context.Context, partition int32, offset int64, userID string) error
|
||||||
SelectKeyChanges(ctx context.Context, partition int32, fromOffset int64) (userIDs []string, latestOffset int64, err error)
|
// SelectKeyChanges returns the set (de-duplicated) of users who have changed their keys between the two offsets.
|
||||||
|
// Results are exclusive of fromOffset and inclusive of toOffset. A toOffset of sarama.OffsetNewest means no upper offset.
|
||||||
|
SelectKeyChanges(ctx context.Context, partition int32, fromOffset, toOffset int64) (userIDs []string, latestOffset int64, err error)
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,15 +33,15 @@ const DeviceListLogName = "dl"
|
||||||
// be already filled in with join/leave information.
|
// be already filled in with join/leave information.
|
||||||
func DeviceListCatchup(
|
func DeviceListCatchup(
|
||||||
ctx context.Context, keyAPI keyapi.KeyInternalAPI, stateAPI currentstateAPI.CurrentStateInternalAPI,
|
ctx context.Context, keyAPI keyapi.KeyInternalAPI, stateAPI currentstateAPI.CurrentStateInternalAPI,
|
||||||
userID string, res *types.Response, tok types.StreamingToken,
|
userID string, res *types.Response, from, to types.StreamingToken,
|
||||||
) (newTok *types.StreamingToken, hasNew bool, err error) {
|
) (hasNew bool, err error) {
|
||||||
// Track users who we didn't track before but now do by virtue of sharing a room with them, or not.
|
// Track users who we didn't track before but now do by virtue of sharing a room with them, or not.
|
||||||
newlyJoinedRooms := joinedRooms(res, userID)
|
newlyJoinedRooms := joinedRooms(res, userID)
|
||||||
newlyLeftRooms := leftRooms(res)
|
newlyLeftRooms := leftRooms(res)
|
||||||
if len(newlyJoinedRooms) > 0 || len(newlyLeftRooms) > 0 {
|
if len(newlyJoinedRooms) > 0 || len(newlyLeftRooms) > 0 {
|
||||||
changed, left, err := TrackChangedUsers(ctx, stateAPI, userID, newlyJoinedRooms, newlyLeftRooms)
|
changed, left, err := TrackChangedUsers(ctx, stateAPI, userID, newlyJoinedRooms, newlyLeftRooms)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, err
|
return false, err
|
||||||
}
|
}
|
||||||
res.DeviceLists.Changed = changed
|
res.DeviceLists.Changed = changed
|
||||||
res.DeviceLists.Left = left
|
res.DeviceLists.Left = left
|
||||||
|
@ -54,7 +54,7 @@ func DeviceListCatchup(
|
||||||
var offset int64
|
var offset int64
|
||||||
// Extract partition/offset from sync token
|
// Extract partition/offset from sync token
|
||||||
// TODO: In a world where keyserver is sharded there will be multiple partitions and hence multiple QueryKeyChanges to make.
|
// TODO: In a world where keyserver is sharded there will be multiple partitions and hence multiple QueryKeyChanges to make.
|
||||||
logOffset := tok.Log(DeviceListLogName)
|
logOffset := from.Log(DeviceListLogName)
|
||||||
if logOffset != nil {
|
if logOffset != nil {
|
||||||
partition = logOffset.Partition
|
partition = logOffset.Partition
|
||||||
offset = logOffset.Offset
|
offset = logOffset.Offset
|
||||||
|
@ -62,15 +62,23 @@ func DeviceListCatchup(
|
||||||
partition = -1
|
partition = -1
|
||||||
offset = sarama.OffsetOldest
|
offset = sarama.OffsetOldest
|
||||||
}
|
}
|
||||||
|
var toOffset int64
|
||||||
|
toLog := to.Log(DeviceListLogName)
|
||||||
|
if toLog != nil {
|
||||||
|
toOffset = toLog.Offset
|
||||||
|
} else {
|
||||||
|
toOffset = sarama.OffsetNewest
|
||||||
|
}
|
||||||
var queryRes api.QueryKeyChangesResponse
|
var queryRes api.QueryKeyChangesResponse
|
||||||
keyAPI.QueryKeyChanges(ctx, &api.QueryKeyChangesRequest{
|
keyAPI.QueryKeyChanges(ctx, &api.QueryKeyChangesRequest{
|
||||||
Partition: partition,
|
Partition: partition,
|
||||||
Offset: offset,
|
Offset: offset,
|
||||||
|
ToOffset: toOffset,
|
||||||
}, &queryRes)
|
}, &queryRes)
|
||||||
if queryRes.Error != nil {
|
if queryRes.Error != nil {
|
||||||
// don't fail the catchup because we may have got useful information by tracking membership
|
// don't fail the catchup because we may have got useful information by tracking membership
|
||||||
util.GetLogger(ctx).WithError(queryRes.Error).Error("QueryKeyChanges failed")
|
util.GetLogger(ctx).WithError(queryRes.Error).Error("QueryKeyChanges failed")
|
||||||
return
|
return hasNew, nil
|
||||||
}
|
}
|
||||||
userSet := make(map[string]bool)
|
userSet := make(map[string]bool)
|
||||||
for _, userID := range res.DeviceLists.Changed {
|
for _, userID := range res.DeviceLists.Changed {
|
||||||
|
@ -82,13 +90,7 @@ func DeviceListCatchup(
|
||||||
hasNew = true
|
hasNew = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Make a new streaming token using the new offset
|
return hasNew, nil
|
||||||
tok.SetLog(DeviceListLogName, &types.LogPosition{
|
|
||||||
Offset: queryRes.Offset,
|
|
||||||
Partition: queryRes.Partition,
|
|
||||||
})
|
|
||||||
newTok = &tok
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TrackChangedUsers calculates the values of device_lists.changed|left in the /sync response.
|
// TrackChangedUsers calculates the values of device_lists.changed|left in the /sync response.
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/Shopify/sarama"
|
||||||
"github.com/matrix-org/dendrite/currentstateserver/api"
|
"github.com/matrix-org/dendrite/currentstateserver/api"
|
||||||
keyapi "github.com/matrix-org/dendrite/keyserver/api"
|
keyapi "github.com/matrix-org/dendrite/keyserver/api"
|
||||||
"github.com/matrix-org/dendrite/syncapi/types"
|
"github.com/matrix-org/dendrite/syncapi/types"
|
||||||
|
@ -15,6 +16,12 @@ import (
|
||||||
var (
|
var (
|
||||||
syncingUser = "@alice:localhost"
|
syncingUser = "@alice:localhost"
|
||||||
emptyToken = types.NewStreamToken(0, 0, nil)
|
emptyToken = types.NewStreamToken(0, 0, nil)
|
||||||
|
newestToken = types.NewStreamToken(0, 0, map[string]*types.LogPosition{
|
||||||
|
DeviceListLogName: &types.LogPosition{
|
||||||
|
Offset: sarama.OffsetNewest,
|
||||||
|
Partition: 0,
|
||||||
|
},
|
||||||
|
})
|
||||||
)
|
)
|
||||||
|
|
||||||
type mockKeyAPI struct{}
|
type mockKeyAPI struct{}
|
||||||
|
@ -162,12 +169,12 @@ func TestKeyChangeCatchupOnJoinShareNewUser(t *testing.T) {
|
||||||
syncResponse := types.NewResponse()
|
syncResponse := types.NewResponse()
|
||||||
syncResponse = joinResponseWithRooms(syncResponse, syncingUser, []string{newlyJoinedRoom})
|
syncResponse = joinResponseWithRooms(syncResponse, syncingUser, []string{newlyJoinedRoom})
|
||||||
|
|
||||||
_, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, &mockCurrentStateAPI{
|
hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, &mockCurrentStateAPI{
|
||||||
roomIDToJoinedMembers: map[string][]string{
|
roomIDToJoinedMembers: map[string][]string{
|
||||||
newlyJoinedRoom: {syncingUser, newShareUser},
|
newlyJoinedRoom: {syncingUser, newShareUser},
|
||||||
"!another:room": {syncingUser},
|
"!another:room": {syncingUser},
|
||||||
},
|
},
|
||||||
}, syncingUser, syncResponse, emptyToken)
|
}, syncingUser, syncResponse, emptyToken, newestToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("DeviceListCatchup returned an error: %s", err)
|
t.Fatalf("DeviceListCatchup returned an error: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -184,12 +191,12 @@ func TestKeyChangeCatchupOnLeaveShareLeftUser(t *testing.T) {
|
||||||
syncResponse := types.NewResponse()
|
syncResponse := types.NewResponse()
|
||||||
syncResponse = leaveResponseWithRooms(syncResponse, syncingUser, []string{newlyLeftRoom})
|
syncResponse = leaveResponseWithRooms(syncResponse, syncingUser, []string{newlyLeftRoom})
|
||||||
|
|
||||||
_, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, &mockCurrentStateAPI{
|
hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, &mockCurrentStateAPI{
|
||||||
roomIDToJoinedMembers: map[string][]string{
|
roomIDToJoinedMembers: map[string][]string{
|
||||||
newlyLeftRoom: {removeUser},
|
newlyLeftRoom: {removeUser},
|
||||||
"!another:room": {syncingUser},
|
"!another:room": {syncingUser},
|
||||||
},
|
},
|
||||||
}, syncingUser, syncResponse, emptyToken)
|
}, syncingUser, syncResponse, emptyToken, newestToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("DeviceListCatchup returned an error: %s", err)
|
t.Fatalf("DeviceListCatchup returned an error: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -206,12 +213,12 @@ func TestKeyChangeCatchupOnJoinShareNoNewUsers(t *testing.T) {
|
||||||
syncResponse := types.NewResponse()
|
syncResponse := types.NewResponse()
|
||||||
syncResponse = joinResponseWithRooms(syncResponse, syncingUser, []string{newlyJoinedRoom})
|
syncResponse = joinResponseWithRooms(syncResponse, syncingUser, []string{newlyJoinedRoom})
|
||||||
|
|
||||||
_, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, &mockCurrentStateAPI{
|
hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, &mockCurrentStateAPI{
|
||||||
roomIDToJoinedMembers: map[string][]string{
|
roomIDToJoinedMembers: map[string][]string{
|
||||||
newlyJoinedRoom: {syncingUser, existingUser},
|
newlyJoinedRoom: {syncingUser, existingUser},
|
||||||
"!another:room": {syncingUser, existingUser},
|
"!another:room": {syncingUser, existingUser},
|
||||||
},
|
},
|
||||||
}, syncingUser, syncResponse, emptyToken)
|
}, syncingUser, syncResponse, emptyToken, newestToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Catchup returned an error: %s", err)
|
t.Fatalf("Catchup returned an error: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -227,12 +234,12 @@ func TestKeyChangeCatchupOnLeaveShareNoUsers(t *testing.T) {
|
||||||
syncResponse := types.NewResponse()
|
syncResponse := types.NewResponse()
|
||||||
syncResponse = leaveResponseWithRooms(syncResponse, syncingUser, []string{newlyLeftRoom})
|
syncResponse = leaveResponseWithRooms(syncResponse, syncingUser, []string{newlyLeftRoom})
|
||||||
|
|
||||||
_, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, &mockCurrentStateAPI{
|
hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, &mockCurrentStateAPI{
|
||||||
roomIDToJoinedMembers: map[string][]string{
|
roomIDToJoinedMembers: map[string][]string{
|
||||||
newlyLeftRoom: {existingUser},
|
newlyLeftRoom: {existingUser},
|
||||||
"!another:room": {syncingUser, existingUser},
|
"!another:room": {syncingUser, existingUser},
|
||||||
},
|
},
|
||||||
}, syncingUser, syncResponse, emptyToken)
|
}, syncingUser, syncResponse, emptyToken, newestToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("DeviceListCatchup returned an error: %s", err)
|
t.Fatalf("DeviceListCatchup returned an error: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -286,11 +293,11 @@ func TestKeyChangeCatchupNoNewJoinsButMessages(t *testing.T) {
|
||||||
jr.Timeline.Events = roomTimelineEvents
|
jr.Timeline.Events = roomTimelineEvents
|
||||||
syncResponse.Rooms.Join[roomID] = jr
|
syncResponse.Rooms.Join[roomID] = jr
|
||||||
|
|
||||||
_, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, &mockCurrentStateAPI{
|
hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, &mockCurrentStateAPI{
|
||||||
roomIDToJoinedMembers: map[string][]string{
|
roomIDToJoinedMembers: map[string][]string{
|
||||||
roomID: {syncingUser, existingUser},
|
roomID: {syncingUser, existingUser},
|
||||||
},
|
},
|
||||||
}, syncingUser, syncResponse, emptyToken)
|
}, syncingUser, syncResponse, emptyToken, newestToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("DeviceListCatchup returned an error: %s", err)
|
t.Fatalf("DeviceListCatchup returned an error: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -311,13 +318,13 @@ func TestKeyChangeCatchupChangeAndLeft(t *testing.T) {
|
||||||
syncResponse = joinResponseWithRooms(syncResponse, syncingUser, []string{newlyJoinedRoom})
|
syncResponse = joinResponseWithRooms(syncResponse, syncingUser, []string{newlyJoinedRoom})
|
||||||
syncResponse = leaveResponseWithRooms(syncResponse, syncingUser, []string{newlyLeftRoom})
|
syncResponse = leaveResponseWithRooms(syncResponse, syncingUser, []string{newlyLeftRoom})
|
||||||
|
|
||||||
_, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, &mockCurrentStateAPI{
|
hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, &mockCurrentStateAPI{
|
||||||
roomIDToJoinedMembers: map[string][]string{
|
roomIDToJoinedMembers: map[string][]string{
|
||||||
newlyJoinedRoom: {syncingUser, newShareUser, newShareUser2},
|
newlyJoinedRoom: {syncingUser, newShareUser, newShareUser2},
|
||||||
newlyLeftRoom: {newlyLeftUser, newlyLeftUser2},
|
newlyLeftRoom: {newlyLeftUser, newlyLeftUser2},
|
||||||
"!another:room": {syncingUser},
|
"!another:room": {syncingUser},
|
||||||
},
|
},
|
||||||
}, syncingUser, syncResponse, emptyToken)
|
}, syncingUser, syncResponse, emptyToken, newestToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Catchup returned an error: %s", err)
|
t.Fatalf("Catchup returned an error: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -396,12 +403,12 @@ func TestKeyChangeCatchupChangeAndLeftSameRoom(t *testing.T) {
|
||||||
lr.Timeline.Events = roomEvents
|
lr.Timeline.Events = roomEvents
|
||||||
syncResponse.Rooms.Leave[roomID] = lr
|
syncResponse.Rooms.Leave[roomID] = lr
|
||||||
|
|
||||||
_, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, &mockCurrentStateAPI{
|
hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, &mockCurrentStateAPI{
|
||||||
roomIDToJoinedMembers: map[string][]string{
|
roomIDToJoinedMembers: map[string][]string{
|
||||||
roomID: {newShareUser, newShareUser2},
|
roomID: {newShareUser, newShareUser2},
|
||||||
"!another:room": {syncingUser},
|
"!another:room": {syncingUser},
|
||||||
},
|
},
|
||||||
}, syncingUser, syncResponse, emptyToken)
|
}, syncingUser, syncResponse, emptyToken, newestToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("DeviceListCatchup returned an error: %s", err)
|
t.Fatalf("DeviceListCatchup returned an error: %s", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -75,4 +75,8 @@ func Setup(
|
||||||
return GetFilter(req, device, syncDB, vars["userId"], vars["filterId"])
|
return GetFilter(req, device, syncDB, vars["userId"], vars["filterId"])
|
||||||
}),
|
}),
|
||||||
).Methods(http.MethodGet, http.MethodOptions)
|
).Methods(http.MethodGet, http.MethodOptions)
|
||||||
|
|
||||||
|
r0mux.Handle("/keys/changes", httputil.MakeAuthAPI("keys_changes", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||||
|
return srp.OnIncomingKeyChangeRequest(req, device)
|
||||||
|
})).Methods(http.MethodGet, http.MethodOptions)
|
||||||
}
|
}
|
||||||
|
|
|
@ -143,6 +143,55 @@ func (rp *RequestPool) OnIncomingSyncRequest(req *http.Request, device *userapi.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (rp *RequestPool) OnIncomingKeyChangeRequest(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||||
|
from := req.URL.Query().Get("from")
|
||||||
|
to := req.URL.Query().Get("to")
|
||||||
|
if from == "" || to == "" {
|
||||||
|
return util.JSONResponse{
|
||||||
|
Code: 400,
|
||||||
|
JSON: jsonerror.InvalidArgumentValue("missing ?from= or ?to="),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fromToken, err := types.NewStreamTokenFromString(from)
|
||||||
|
if err != nil {
|
||||||
|
return util.JSONResponse{
|
||||||
|
Code: 400,
|
||||||
|
JSON: jsonerror.InvalidArgumentValue("bad 'from' value"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
toToken, err := types.NewStreamTokenFromString(to)
|
||||||
|
if err != nil {
|
||||||
|
return util.JSONResponse{
|
||||||
|
Code: 400,
|
||||||
|
JSON: jsonerror.InvalidArgumentValue("bad 'to' value"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// work out room joins/leaves
|
||||||
|
res, err := rp.db.IncrementalSync(
|
||||||
|
req.Context(), types.NewResponse(), *device, fromToken, toToken, 0, false,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
util.GetLogger(req.Context()).WithError(err).Error("Failed to IncrementalSync")
|
||||||
|
return jsonerror.InternalServerError()
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err = rp.appendDeviceLists(res, device.UserID, fromToken, toToken)
|
||||||
|
if err != nil {
|
||||||
|
util.GetLogger(req.Context()).WithError(err).Error("Failed to appendDeviceLists info")
|
||||||
|
return jsonerror.InternalServerError()
|
||||||
|
}
|
||||||
|
return util.JSONResponse{
|
||||||
|
Code: 200,
|
||||||
|
JSON: struct {
|
||||||
|
Changed []string `json:"changed"`
|
||||||
|
Left []string `json:"left"`
|
||||||
|
}{
|
||||||
|
Changed: res.DeviceLists.Changed,
|
||||||
|
Left: res.DeviceLists.Left,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (rp *RequestPool) currentSyncForUser(req syncRequest, latestPos types.StreamingToken) (res *types.Response, err error) {
|
func (rp *RequestPool) currentSyncForUser(req syncRequest, latestPos types.StreamingToken) (res *types.Response, err error) {
|
||||||
res = types.NewResponse()
|
res = types.NewResponse()
|
||||||
|
|
||||||
|
@ -172,7 +221,7 @@ func (rp *RequestPool) currentSyncForUser(req syncRequest, latestPos types.Strea
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
res, err = rp.appendDeviceLists(res, req.device.UserID, since)
|
res, err = rp.appendDeviceLists(res, req.device.UserID, since, latestPos)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -205,14 +254,9 @@ func (rp *RequestPool) currentSyncForUser(req syncRequest, latestPos types.Strea
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rp *RequestPool) appendDeviceLists(
|
func (rp *RequestPool) appendDeviceLists(
|
||||||
data *types.Response, userID string, since types.StreamingToken,
|
data *types.Response, userID string, since, to types.StreamingToken,
|
||||||
) (*types.Response, error) {
|
) (*types.Response, error) {
|
||||||
// TODO: Currently this code will race which may result in duplicates but not missing data.
|
_, err := internal.DeviceListCatchup(context.Background(), rp.keyAPI, rp.stateAPI, userID, data, since, to)
|
||||||
// This happens because, whilst we are told the range to fetch here (since / latest) the
|
|
||||||
// QueryKeyChanges API only exposes a "from" value (on purpose to avoid racing, which then
|
|
||||||
// returns the latest position with which the response has authority on). We'd need to tweak
|
|
||||||
// the API to expose a "to" value to fix this.
|
|
||||||
_, _, err := internal.DeviceListCatchup(context.Background(), rp.keyAPI, rp.stateAPI, userID, data, since)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,6 +18,7 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
@ -129,15 +130,14 @@ func (t *StreamingToken) EDUPosition() StreamPosition {
|
||||||
return t.Positions[1]
|
return t.Positions[1]
|
||||||
}
|
}
|
||||||
func (t *StreamingToken) String() string {
|
func (t *StreamingToken) String() string {
|
||||||
logStrings := []string{
|
var logStrings []string
|
||||||
t.syncToken.String(),
|
|
||||||
}
|
|
||||||
for name, lp := range t.logs {
|
for name, lp := range t.logs {
|
||||||
logStr := fmt.Sprintf("%s-%d-%d", name, lp.Partition, lp.Offset)
|
logStr := fmt.Sprintf("%s-%d-%d", name, lp.Partition, lp.Offset)
|
||||||
logStrings = append(logStrings, logStr)
|
logStrings = append(logStrings, logStr)
|
||||||
}
|
}
|
||||||
|
sort.Strings(logStrings)
|
||||||
// E.g s11_22_33.dl0-134.ab1-441
|
// E.g s11_22_33.dl0-134.ab1-441
|
||||||
return strings.Join(logStrings, ".")
|
return strings.Join(append([]string{t.syncToken.String()}, logStrings...), ".")
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsAfter returns true if ANY position in this token is greater than `other`.
|
// IsAfter returns true if ANY position in this token is greater than `other`.
|
||||||
|
|
|
@ -20,7 +20,7 @@ func TestNewSyncTokenWithLogs(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"s4_0.dl-0-123.ab-1-14419482332": &StreamingToken{
|
"s4_0.ab-1-14419482332.dl-0-123": &StreamingToken{
|
||||||
syncToken: syncToken{Type: "s", Positions: []StreamPosition{4, 0}},
|
syncToken: syncToken{Type: "s", Positions: []StreamPosition{4, 0}},
|
||||||
logs: map[string]*LogPosition{
|
logs: map[string]*LogPosition{
|
||||||
"ab": &LogPosition{
|
"ab": &LogPosition{
|
||||||
|
@ -46,8 +46,9 @@ func TestNewSyncTokenWithLogs(t *testing.T) {
|
||||||
if !reflect.DeepEqual(got, *want) {
|
if !reflect.DeepEqual(got, *want) {
|
||||||
t.Errorf("%s mismatch: got %v want %v", tok, got, want)
|
t.Errorf("%s mismatch: got %v want %v", tok, got, want)
|
||||||
}
|
}
|
||||||
if got.String() != tok {
|
gotStr := got.String()
|
||||||
t.Errorf("%s reserialisation mismatch: got %s want %s", tok, got.String(), tok)
|
if gotStr != tok {
|
||||||
|
t.Errorf("%s reserialisation mismatch: got %s want %s", tok, gotStr, tok)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -128,6 +128,8 @@ query for user with no keys returns empty key dict
|
||||||
Can claim one time key using POST
|
Can claim one time key using POST
|
||||||
Can claim remote one time key using POST
|
Can claim remote one time key using POST
|
||||||
Local device key changes appear in v2 /sync
|
Local device key changes appear in v2 /sync
|
||||||
|
Local device key changes appear in /keys/changes
|
||||||
|
Get left notifs for other users in sync and /keys/changes when user leaves
|
||||||
Can add account data
|
Can add account data
|
||||||
Can add account data to room
|
Can add account data to room
|
||||||
Can get account data without syncing
|
Can get account data without syncing
|
||||||
|
|
Loading…
Reference in a new issue